From 3d91383dfe199c134fddf074675c15d8a0427110 Mon Sep 17 00:00:00 2001 From: Cody Lamson Date: Fri, 30 Jun 2023 09:01:17 +0200 Subject: [PATCH 001/722] fix: only check timestamped forks after the merge (#2503) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/primitives/src/chain/spec.rs | 54 ++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 4e888a5c10d1..0c84dc275399 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -285,29 +285,49 @@ impl ChainSpec { ForkFilter::new(head, self.genesis_hash(), forks) } - /// Compute the [`ForkId`] for the given [`Head`] + /// Compute the [`ForkId`] for the given [`Head`] folowing eip-6122 spec pub fn fork_id(&self, head: &Head) -> ForkId { - let mut curr_forkhash = ForkHash::from(self.genesis_hash()); - let mut current_applied_value = 0; + let mut forkhash = ForkHash::from(self.genesis_hash()); + let mut current_applied = 0; + // handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122 for (_, cond) in self.forks_iter() { - let value = match cond { - ForkCondition::Block(block) => block, - ForkCondition::Timestamp(time) => time, - ForkCondition::TTD { fork_block: Some(block), .. } => block, - _ => continue, - }; - - if cond.active_at_head(head) { - if value != current_applied_value { - curr_forkhash += value; - current_applied_value = value; + // handle block based forks and the sepolia merge netsplit block edge case (TTD + // ForkCondition with Some(block)) + if let ForkCondition::Block(block) | + ForkCondition::TTD { fork_block: Some(block), .. } = cond + { + if cond.active_at_head(head) { + if block != current_applied { + forkhash += block; + current_applied = block; + } + } else { + // we can return here because this block fork is not active, so we set the + // `next` value + return ForkId { hash: forkhash, next: block } } - } else { - return ForkId { hash: curr_forkhash, next: value } } } - ForkId { hash: curr_forkhash, next: 0 } + + // timestamp are ALWAYS applied after the merge. + for (_, cond) in self.forks_iter() { + if let ForkCondition::Timestamp(timestamp) = cond { + if cond.active_at_head(head) { + if timestamp != current_applied { + forkhash += timestamp; + current_applied = timestamp; + } + } else { + // can safely return here because we have already handled all block forks and + // have handled all active timestamp forks, and set the next value to the + // timestamp that is known but not active yet + return ForkId { hash: forkhash, next: timestamp } + } + } + } + + ForkId { hash: forkhash, next: 0 } } /// Build a chainspec using [`ChainSpecBuilder`] From 50f4e5de9a69a983da7ebf8de839c6266e39c2c8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 10:51:48 +0200 Subject: [PATCH 002/722] chore(deps): move proc-macro and syn to workspace deps (#3492) --- Cargo.lock | 134 +++++++++++------------ Cargo.toml | 3 + crates/metrics/metrics-derive/Cargo.toml | 4 +- crates/rlp/rlp-derive/Cargo.toml | 4 +- crates/storage/codecs/derive/Cargo.toml | 4 +- 5 files changed, 76 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d3d294f7747..3e4dd33a5274 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,7 +165,7 @@ checksum = "759d98a5db12e9c9d98ef2b92f794ae5c7ded6ec18d21c3fa485c9c65bec237d" dependencies = [ "itertools", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -222,7 +222,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -286,7 +286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -399,7 +399,7 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "regex", "rustc-hash", @@ -420,7 +420,7 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "regex", "rustc-hash", @@ -599,7 +599,7 @@ name = "boa_macros" version = "0.16.0" source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", "synstructure 0.13.0", @@ -887,7 +887,7 @@ checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -922,7 +922,7 @@ version = "0.1.0-alpha.1" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde", "syn 2.0.18", @@ -1312,7 +1312,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "scratch", "syn 1.0.109", @@ -1330,7 +1330,7 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1373,7 +1373,7 @@ checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.9.3", "syn 1.0.109", @@ -1387,7 +1387,7 @@ checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -1401,7 +1401,7 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", "syn 2.0.18", @@ -1494,7 +1494,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1507,7 +1507,7 @@ checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" dependencies = [ "darling 0.10.2", "derive_builder_core", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1519,7 +1519,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1531,7 +1531,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "rustc_version", "syn 1.0.109", @@ -1656,7 +1656,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -1735,7 +1735,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0188e3c3ba8df5753894d54461f0e39bc91741dc5b22e1c46999ec2c71f4e4" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1832,7 +1832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1844,7 +1844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1857,7 +1857,7 @@ checksum = "a62bb1df8b45ecb7ffa78dca1c17a438fb193eb083db0b1b494d2a61bcb5096a" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "rustc_version", "syn 1.0.109", @@ -1869,7 +1869,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48016319042fb7c87b78d2993084a831793a897a5cd1a2a67cab9d1eeb4b7d76" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -2013,7 +2013,7 @@ dependencies = [ "eyre", "hex", "prettyplease", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "regex", "serde", @@ -2033,7 +2033,7 @@ dependencies = [ "ethers-contract-abigen", "ethers-core", "hex", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde_json", "syn 2.0.18", @@ -2363,7 +2363,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -2970,7 +2970,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3058,7 +3058,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3336,7 +3336,7 @@ checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" dependencies = [ "heck", "proc-macro-crate", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3685,7 +3685,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3791,7 +3791,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" dependencies = [ "cfg-if", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3812,7 +3812,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3984,7 +3984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -4040,7 +4040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4123,7 +4123,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4274,7 +4274,7 @@ checksum = "92aacdc5f16768709a569e913f7451034034178b05bdc8acda226659a3dccc66" dependencies = [ "phf_generator", "phf_shared", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4303,7 +4303,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -4490,7 +4490,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "syn 2.0.18", ] @@ -4525,7 +4525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -4537,7 +4537,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "version_check", ] @@ -4553,9 +4553,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] @@ -4678,7 +4678,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", ] [[package]] @@ -5385,7 +5385,7 @@ version = "0.1.0-alpha.1" dependencies = [ "metrics", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "regex", "serial_test 0.10.0", @@ -5628,7 +5628,7 @@ dependencies = [ name = "reth-rlp-derive" version = "0.1.0-alpha.1" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -6041,7 +6041,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6232,7 +6232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6393,7 +6393,7 @@ version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -6453,7 +6453,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" dependencies = [ "darling 0.14.3", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6492,7 +6492,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6503,7 +6503,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -6818,7 +6818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "rustversion", "syn 1.0.109", @@ -6902,7 +6902,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] @@ -6913,7 +6913,7 @@ version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] @@ -6924,7 +6924,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -6936,7 +6936,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", "unicode-xid 0.2.4", @@ -6995,7 +6995,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "385624eb0031d550fe1bf99c08af79b838605fc4fcec2c4d55e229a2c342fdd0" dependencies = [ "cargo_metadata", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde", "strum_macros", @@ -7011,7 +7011,7 @@ dependencies = [ "if_chain", "itertools", "lazy_static", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "subprocess", "syn 2.0.18", @@ -7060,7 +7060,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -7171,7 +7171,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -7375,7 +7375,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7849,7 +7849,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "wasm-bindgen-shared", @@ -7883,7 +7883,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "wasm-bindgen-backend", @@ -8211,7 +8211,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "synstructure 0.12.6", @@ -8232,7 +8232,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "synstructure 0.12.6", @@ -8253,7 +8253,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.18", ] @@ -8275,7 +8275,7 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "synstructure 0.12.6", diff --git a/Cargo.toml b/Cargo.toml index 6b94e8db06f0..8bfa8df771fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,9 @@ serde_json = "1.0.94" serde = { version = "1.0", default-features = false } rand = "0.8.5" +### proc-macros +proc-macro2 = "1.0" +quote = "1.0" ## tokio tokio-stream = "0.1.11" diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml index c78a48db9f83..23e35c3954b7 100644 --- a/crates/metrics/metrics-derive/Cargo.toml +++ b/crates/metrics/metrics-derive/Cargo.toml @@ -11,9 +11,9 @@ repository.workspace = true proc-macro = true [dependencies] -proc-macro2 = "1.0" +proc-macro2.workspace = true syn = { version = "2.0", features = ["extra-traits"] } -quote = "1.0" +quote.workspace = true regex = "1.6.0" once_cell = "1.17.0" diff --git a/crates/rlp/rlp-derive/Cargo.toml b/crates/rlp/rlp-derive/Cargo.toml index 299cb0993e7d..72760af83d96 100644 --- a/crates/rlp/rlp-derive/Cargo.toml +++ b/crates/rlp/rlp-derive/Cargo.toml @@ -13,5 +13,5 @@ proc-macro = true [dependencies] syn = "2" -quote = "1" -proc-macro2 = "1" +quote.workspace = true +proc-macro2.workspace = true diff --git a/crates/storage/codecs/derive/Cargo.toml b/crates/storage/codecs/derive/Cargo.toml index a59b26c578e1..12d14d51ab7c 100644 --- a/crates/storage/codecs/derive/Cargo.toml +++ b/crates/storage/codecs/derive/Cargo.toml @@ -20,8 +20,8 @@ normal = [ proc-macro = true [dependencies] -proc-macro2 = "1.0.47" -quote = "1.0" +proc-macro2.workspace = true +quote.workspace = true syn = { version = "2.0", features = ["full"] } convert_case = "0.6.0" From 84875e90df940d6d7e7737e6e47d94eefaff44df Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 10:52:04 +0200 Subject: [PATCH 003/722] feat(txpool): add transaction_event_listener function (#3493) --- crates/transaction-pool/src/lib.rs | 4 ++++ crates/transaction-pool/src/pool/listener.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 14 ++++++++++++++ crates/transaction-pool/src/traits.rs | 5 +++++ 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index ccafbe97781f..65e2e0e958a1 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -280,6 +280,10 @@ where Ok(transactions) } + fn transaction_event_listener(&self, tx_hash: TxHash) -> Option { + self.pool.add_transaction_event_listener(tx_hash) + } + fn pending_transactions_listener(&self) -> Receiver { self.pool.add_pending_listener() } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 39950f7a4c4f..b2197e432822 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -9,7 +9,7 @@ use std::{ }; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -/// A Stream that receives [TransactionEvent] for the transactions +/// A Stream that receives [TransactionEvent] for the transaction with the given hash. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct TransactionEvents { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 8832ed644f60..30cd3daffc4a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -204,6 +204,20 @@ where rx } + /// If the pool contains the transaction, this adds a new listener that gets notified about + /// transaction events. + pub(crate) fn add_transaction_event_listener( + &self, + tx_hash: TxHash, + ) -> Option { + let pool = self.pool.read(); + if pool.contains(&tx_hash) { + Some(self.event_listener.write().subscribe(tx_hash)) + } else { + None + } + } + /// Returns hashes of _all_ transactions in the pool. pub(crate) fn pooled_transactions_hashes(&self) -> Vec { let pool = self.pool.read(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 05c027a804fe..121889bb18b2 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -89,6 +89,11 @@ pub trait TransactionPool: Send + Sync + Clone { transactions: Vec, ) -> PoolResult>>; + /// Returns a new transaction change event stream for the given transaction. + /// + /// Returns `None` if the transaction is not in the pool. + fn transaction_event_listener(&self, tx_hash: TxHash) -> Option; + /// Returns a new Stream that yields transactions hashes for new ready transactions. /// /// Consumer: RPC From b225b4790c98c040599f4a62f7b1e5b082278383 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 12:36:01 +0300 Subject: [PATCH 004/722] fix(engine): logs for new payload v2 (#3494) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 55e631328c5d..4803c4d2d715 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -314,10 +314,10 @@ where Ok(EngineApi::new_payload_v1(self, payload).await?) } - /// Handler for `engine_newPayloadV1` + /// Handler for `engine_newPayloadV2` /// See also async fn new_payload_v2(&self, payload: ExecutionPayload) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); + trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); Ok(EngineApi::new_payload_v2(self, payload).await?) } From 20ed70b1c3386fdaa73883a3ec32bb457e66452c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 13:40:43 +0200 Subject: [PATCH 005/722] fix: always return logs of single block range query (#3497) --- crates/rpc/rpc/src/eth/filter.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 47bddaa98eeb..e7b397307998 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -369,6 +369,8 @@ where let address_filter = FilteredParams::address_filter(&filter.address); let topics_filter = FilteredParams::topics_filter(&topics); + let is_multi_block_range = from_block != to_block; + // loop over the range of new blocks and check logs if the filter matches the log's bloom // filter for (from, to) in @@ -401,8 +403,9 @@ where false, ); - // size check - if all_logs.len() > self.max_logs_per_response { + // size check but only if range is multiple blocks, so we always return all + // logs of a single block + if is_multi_block_range && all_logs.len() > self.max_logs_per_response { return Err(FilterError::QueryExceedsMaxResults( self.max_logs_per_response, )) From 07e81c0e7e21f558f7239c6d5c3de406856fc0e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 13:42:03 +0200 Subject: [PATCH 006/722] chore: bump default max logs (#3498) --- crates/rpc/rpc-builder/src/auth.rs | 4 ++-- crates/rpc/rpc-builder/src/eth.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index e44911d4bbf2..567f10bfca4f 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -1,7 +1,7 @@ use crate::{ constants, error::{RpcError, ServerKind}, - eth::DEFAULT_MAX_LOGS_IN_RESPONSE, + eth::DEFAULT_MAX_LOGS_PER_RESPONSE, }; use hyper::header::AUTHORIZATION; pub use jsonrpsee::server::ServerBuilder; @@ -67,7 +67,7 @@ where provider, pool, eth_cache.clone(), - DEFAULT_MAX_LOGS_IN_RESPONSE, + DEFAULT_MAX_LOGS_PER_RESPONSE, Box::new(executor.clone()), ); launch_with_eth_api(eth_api, eth_filter, engine_api, socket_addr, secret).await diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 907740b76958..3968eef6f83e 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -8,7 +8,7 @@ use reth_rpc::{ use serde::{Deserialize, Serialize}; /// The default maximum of logs in a single response. -pub(crate) const DEFAULT_MAX_LOGS_IN_RESPONSE: usize = 10_000; +pub(crate) const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000; /// The default maximum number of concurrently executed tracing calls pub(crate) const DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; @@ -45,7 +45,7 @@ impl Default for EthConfig { cache: EthStateCacheConfig::default(), gas_oracle: GasPriceOracleConfig::default(), max_tracing_requests: DEFAULT_MAX_TRACING_REQUESTS, - max_logs_per_response: DEFAULT_MAX_LOGS_IN_RESPONSE, + max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, } } } From 5c7d5a3b3e81431a75eb5b910ea93a70904ab7bb Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Fri, 30 Jun 2023 20:01:30 +0800 Subject: [PATCH 007/722] feat: make chain canonical if new payload is the missing block for current sync target (#3459) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/forkchoice.rs | 3 ++- crates/consensus/beacon/src/engine/mod.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 8332ea030841..f41200e39615 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -18,7 +18,8 @@ pub(crate) struct ForkchoiceStateTracker { impl ForkchoiceStateTracker { /// Sets the latest forkchoice state that we received. /// - /// If the status is valid, we also update the last valid forkchoice state. + /// If the status is `VALID`, we also update the last valid forkchoice state and set the + /// `sync_target` to `None`, since we're now fully synced. pub(crate) fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { if status.is_valid() { self.set_valid(state); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7f6c23bcf99e..d815d91b319d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -830,6 +830,7 @@ where Err(status) => return Ok(status), }; let block_hash = block.hash(); + let block_num_hash = block.num_hash(); // now check the block itself if let Some(status) = self.check_invalid_ancestor_with_head(block.parent_hash, block.hash) { @@ -847,6 +848,13 @@ where let status = match res { Ok(status) => { if status.is_valid() { + if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { + // if we're currently syncing and the inserted block is the targeted FCU + // head block, we can try to make it canonical. + if block_hash == target.head_block_hash { + self.try_make_sync_target_canonical(block_num_hash); + } + } // block was successfully inserted, so we can cancel the full block request, if // any exists self.sync.cancel_full_block_request(block_hash); From 886828bad9a02cf40a5490036ab816f3c3e79804 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 15:35:01 +0300 Subject: [PATCH 008/722] feat(rpc): cache metrics (#3499) --- Cargo.lock | 1 + crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/eth/cache.rs | 62 +++++++++++++++++++++++++++------ 3 files changed, 54 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e4dd33a5274..dc6fb0b6568d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5651,6 +5651,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-interfaces", + "reth-metrics", "reth-network-api", "reth-primitives", "reth-provider", diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 796c5ded0ef8..2b3d1631ab17 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -22,6 +22,7 @@ reth-network-api = { workspace = true, features = ["test-utils"] } reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-revm = { path = "../../revm" } reth-tasks = { workspace = true } +reth-metrics = { workspace = true } # eth revm = { workspace = true, features = [ diff --git a/crates/rpc/rpc/src/eth/cache.rs b/crates/rpc/rpc/src/eth/cache.rs index 5e84fcd6934e..af0858cb4b11 100644 --- a/crates/rpc/rpc/src/eth/cache.rs +++ b/crates/rpc/rpc/src/eth/cache.rs @@ -2,6 +2,10 @@ use futures::{future::Either, Stream, StreamExt}; use reth_interfaces::{provider::ProviderError, Result}; +use reth_metrics::{ + metrics::{self, Gauge}, + Metrics, +}; use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, H256}; use reth_provider::{BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -104,9 +108,9 @@ impl EthStateCache { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, - full_block_cache: BlockLruCache::with_memory_budget(max_block_bytes), - receipts_cache: ReceiptsLruCache::with_memory_budget(max_receipt_bytes), - evm_env_cache: EnvLruCache::with_memory_budget(max_env_bytes), + full_block_cache: BlockLruCache::new(max_block_bytes, "blocks"), + receipts_cache: ReceiptsLruCache::new(max_receipt_bytes, "receipts"), + evm_env_cache: EnvLruCache::new(max_env_bytes, "evm_env"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, @@ -274,7 +278,7 @@ where Tasks: TaskSpawner + Clone + 'static, { fn on_new_block(&mut self, block_hash: H256, res: Result>) { - if let Some(queued) = self.full_block_cache.queued.remove(&block_hash) { + if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { match tx { @@ -297,7 +301,7 @@ where } fn on_new_receipts(&mut self, block_hash: H256, res: Result>>) { - if let Some(queued) = self.receipts_cache.queued.remove(&block_hash) { + if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { let _ = tx.send(res.clone()); @@ -309,6 +313,12 @@ where self.receipts_cache.cache.insert(block_hash, receipts); } } + + fn update_cached_metrics(&self) { + self.full_block_cache.update_cached_count(); + self.receipts_cache.update_cached_count(); + self.evm_env_cache.update_cached_count(); + } } impl Future for EthStateCacheService @@ -419,7 +429,7 @@ where } CacheAction::EnvResult { block_hash, res } => { let res = *res; - if let Some(queued) = this.evm_env_cache.queued.remove(&block_hash) { + if let Some(queued) = this.evm_env_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { let _ = tx.send(res.clone()); @@ -443,7 +453,8 @@ where ); } } - } + }; + this.update_cached_metrics(); } } } @@ -459,6 +470,8 @@ where cache: LruMap, /// All queued consumers queued: HashMap>, + /// Cache metrics + metrics: CacheMetrics, } impl MultiConsumerLruCache @@ -470,6 +483,7 @@ where /// /// Returns true if this is the first queued sender for the key fn queue(&mut self, key: K, sender: S) -> bool { + self.metrics.queued_consumers_count.increment(1.0); match self.queued.entry(key) { Entry::Occupied(mut entry) => { entry.get_mut().push(sender); @@ -481,17 +495,36 @@ where } } } + + /// Remove consumers for a given key. + fn remove(&mut self, key: &K) -> Option> { + match self.queued.remove(key) { + Some(removed) => { + self.metrics.queued_consumers_count.decrement(removed.len() as f64); + Some(removed) + } + None => None, + } + } + + fn update_cached_count(&self) { + self.metrics.cached_count.set(self.cache.len() as f64); + } } impl MultiConsumerLruCache where K: Hash + Eq, { - /// Creates a new empty map with a given `memory_budget`. + /// Creates a new empty map with a given `memory_budget` and metric label. /// /// See also [LruMap::with_memory_budget] - fn with_memory_budget(memory_budget: usize) -> Self { - Self { cache: LruMap::with_memory_budget(memory_budget), queued: Default::default() } + fn new(memory_budget: usize, cache_id: &str) -> Self { + Self { + cache: LruMap::with_memory_budget(memory_budget), + queued: Default::default(), + metrics: CacheMetrics::new_with_labels(&[("cache", cache_id.to_string())]), + } } } @@ -541,3 +574,12 @@ where } } } + +#[derive(Metrics)] +#[metrics(scope = "rpc.eth_cache")] +struct CacheMetrics { + /// The number of entities in the cache. + cached_count: Gauge, + /// The number of queued consumers. + queued_consumers_count: Gauge, +} From f08a863e124003ec941bf117b1d7f19a9aa87bcc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 14:57:46 +0200 Subject: [PATCH 009/722] chore: add total-transactions metric (#3500) --- crates/transaction-pool/src/metrics.rs | 3 +++ crates/transaction-pool/src/pool/txpool.rs | 2 ++ crates/transaction-pool/src/traits.rs | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index cb6ba58fc335..91e31992bf2f 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -30,4 +30,7 @@ pub struct TxPoolMetrics { pub(crate) queued_pool_transactions: Gauge, /// Total amount of memory used by the transactions in the queued sub-pool in bytes pub(crate) queued_pool_size_bytes: Gauge, + + /// Number of all transactions of all sub-pools: pending + basefee + queued + pub(crate) total_transactions: Gauge, } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 3b4b03e70475..aba3e1e698d4 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -120,6 +120,7 @@ impl TxPool { basefee_size: self.basefee_pool.size(), queued: self.queued_pool.len(), queued_size: self.queued_pool.size(), + total: self.all_transactions.len(), } } @@ -272,6 +273,7 @@ impl TxPool { self.metrics.basefee_pool_size_bytes.set(stats.basefee_size as f64); self.metrics.queued_pool_transactions.set(stats.queued as f64); self.metrics.queued_pool_size_bytes.set(stats.queued_size as f64); + self.metrics.total_transactions.set(stats.total as f64); } /// Adds the transaction into the pool. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 121889bb18b2..1781e8f5e98f 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -591,6 +591,10 @@ pub struct PoolSize { pub queued: usize, /// Reported size of transactions in the _queued_ sub-pool. pub queued_size: usize, + /// Number of all transactions of all sub-pools + /// + /// Note: this is the sum of ```pending + basefee + queued``` + pub total: usize, } /// Represents the current status of the pool. From 10db78618fb070dfab3e891804a5d04de6168f74 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 16:48:37 +0300 Subject: [PATCH 010/722] chore(rpc): add cached bytes metric (#3502) --- crates/rpc/rpc/src/eth/cache.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/eth/cache.rs b/crates/rpc/rpc/src/eth/cache.rs index af0858cb4b11..ffa0e4ec441e 100644 --- a/crates/rpc/rpc/src/eth/cache.rs +++ b/crates/rpc/rpc/src/eth/cache.rs @@ -315,9 +315,9 @@ where } fn update_cached_metrics(&self) { - self.full_block_cache.update_cached_count(); - self.receipts_cache.update_cached_count(); - self.evm_env_cache.update_cached_count(); + self.full_block_cache.update_cached_metrics(); + self.receipts_cache.update_cached_metrics(); + self.evm_env_cache.update_cached_metrics(); } } @@ -507,8 +507,9 @@ where } } - fn update_cached_count(&self) { + fn update_cached_metrics(&self) { self.metrics.cached_count.set(self.cache.len() as f64); + self.metrics.cached_bytes.set(self.cache.memory_usage() as f64); } } @@ -580,6 +581,8 @@ where struct CacheMetrics { /// The number of entities in the cache. cached_count: Gauge, + /// The memory usage of the cache in bytes. + cached_bytes: Gauge, /// The number of queued consumers. queued_consumers_count: Gauge, } From 83b14cb611fe4460873c944d9f65da3675799ba8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 30 Jun 2023 15:27:01 +0100 Subject: [PATCH 011/722] feat(bin, stages): metrics listener (#3483) --- bin/reth/src/node/mod.rs | 12 +++ crates/metrics/Cargo.toml | 2 +- crates/stages/src/lib.rs | 2 + crates/stages/src/metrics/listener.rs | 85 ++++++++++++++++++++++ crates/stages/src/metrics/mod.rs | 5 ++ crates/stages/src/metrics/sync_metrics.rs | 22 ++++++ crates/stages/src/pipeline/builder.rs | 15 +++- crates/stages/src/pipeline/mod.rs | 41 +++++++---- crates/stages/src/pipeline/sync_metrics.rs | 63 ---------------- 9 files changed, 164 insertions(+), 83 deletions(-) create mode 100644 crates/stages/src/metrics/listener.rs create mode 100644 crates/stages/src/metrics/mod.rs create mode 100644 crates/stages/src/metrics/sync_metrics.rs delete mode 100644 crates/stages/src/pipeline/sync_metrics.rs diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index aaecdda514eb..d8c3cbf5a0ce 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -51,6 +51,7 @@ use reth_stages::{ ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, TotalDifficultyStage, }, + MetricEventsSender, MetricsListener, }; use reth_tasks::TaskExecutor; use reth_transaction_pool::{EthTransactionValidator, TransactionPool}; @@ -275,6 +276,11 @@ impl Command { debug!(target: "reth::cli", "Spawning payload builder service"); ctx.task_executor.spawn_critical("payload builder service", payload_service); + debug!(target: "reth::cli", "Spawning metrics listener task"); + let (metrics_tx, metrics_rx) = unbounded_channel(); + let metrics_listener = MetricsListener::new(metrics_rx); + ctx.task_executor.spawn_critical("metrics listener task", metrics_listener); + // Configure the pipeline let (mut pipeline, client) = if self.auto_mine { let (_, client, mut task) = AutoSealBuilder::new( @@ -293,6 +299,7 @@ impl Command { Arc::clone(&consensus), db.clone(), &ctx.task_executor, + metrics_tx, ) .await?; @@ -310,6 +317,7 @@ impl Command { Arc::clone(&consensus), db.clone(), &ctx.task_executor, + metrics_tx, ) .await?; @@ -421,6 +429,7 @@ impl Command { consensus: Arc, db: DB, task_executor: &TaskExecutor, + metrics_tx: MetricEventsSender, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, @@ -452,6 +461,7 @@ impl Command { consensus, max_block, self.debug.continuous, + metrics_tx, ) .await?; @@ -632,6 +642,7 @@ impl Command { consensus: Arc, max_block: Option, continuous: bool, + metrics_tx: MetricEventsSender, ) -> eyre::Result> where DB: Database + Clone + 'static, @@ -670,6 +681,7 @@ impl Command { if continuous { HeaderSyncMode::Continuous } else { HeaderSyncMode::Tip(tip_rx) }; let pipeline = builder .with_tip_sender(tip_tx) + .with_metric_events(metrics_tx) .add_stages( DefaultStages::new( header_mode, diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index adbd26cb8029..63a14cc8344d 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true description = "reth metrics utilities" [dependencies] -# reth +# reth reth-metrics-derive = { path = "./metrics-derive" } # metrics diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index a1694a6b7426..ebfbac6f7934 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -64,6 +64,7 @@ //! //! - `test-utils`: Export utilities for testing mod error; +mod metrics; mod pipeline; mod stage; mod util; @@ -81,5 +82,6 @@ pub mod stages; pub mod sets; pub use error::*; +pub use metrics::*; pub use pipeline::*; pub use stage::*; diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages/src/metrics/listener.rs new file mode 100644 index 000000000000..f6672a4e68dc --- /dev/null +++ b/crates/stages/src/metrics/listener.rs @@ -0,0 +1,85 @@ +use crate::metrics::{StageMetrics, SyncMetrics}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + BlockNumber, +}; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; + +/// Alias type for metric producers to use. +pub type MetricEventsSender = UnboundedSender; + +/// Collection of metric events. +#[derive(Clone, Copy, Debug)] +pub enum MetricEvent { + /// Stage reached new checkpoint. + StageCheckpoint { + /// Stage ID. + stage_id: StageId, + /// Stage checkpoint. + checkpoint: StageCheckpoint, + /// Maximum known block number reachable by this stage. + /// If specified, `entities_total` metric is updated. + max_block_number: Option, + }, +} + +/// Metrics routine that listens to new metric events on the `events_rx` receiver. +/// Upon receiving new event, related metrics are updated. +#[derive(Debug)] +pub struct MetricsListener { + events_rx: UnboundedReceiver, + pub(crate) sync_metrics: SyncMetrics, +} + +impl MetricsListener { + /// Creates a new [MetricsListener] with the provided receiver of [MetricEvent]. + pub fn new(events_rx: UnboundedReceiver) -> Self { + Self { events_rx, sync_metrics: SyncMetrics::default() } + } + + fn handle_event(&mut self, event: MetricEvent) { + match event { + MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number } => { + let stage_metrics = self.sync_metrics.stages.entry(stage_id).or_insert_with(|| { + StageMetrics::new_with_labels(&[("stage", stage_id.to_string())]) + }); + + stage_metrics.checkpoint.set(checkpoint.block_number as f64); + + let (processed, total) = match checkpoint.entities() { + Some(entities) => (entities.processed, Some(entities.total)), + None => (checkpoint.block_number, max_block_number), + }; + + stage_metrics.entities_processed.set(processed as f64); + + if let Some(total) = total { + stage_metrics.entities_total.set(total as f64); + } + } + } + } +} + +impl Future for MetricsListener { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Loop until we drain the `events_rx` channel + loop { + let Some(event) = ready!(this.events_rx.poll_recv(cx)) else { + // Channel has closed + return Poll::Ready(()) + }; + + this.handle_event(event); + } + } +} diff --git a/crates/stages/src/metrics/mod.rs b/crates/stages/src/metrics/mod.rs new file mode 100644 index 000000000000..bed2742c25fc --- /dev/null +++ b/crates/stages/src/metrics/mod.rs @@ -0,0 +1,5 @@ +mod listener; +mod sync_metrics; + +pub use listener::{MetricEvent, MetricEventsSender, MetricsListener}; +use sync_metrics::*; diff --git a/crates/stages/src/metrics/sync_metrics.rs b/crates/stages/src/metrics/sync_metrics.rs new file mode 100644 index 000000000000..859a7e6d778a --- /dev/null +++ b/crates/stages/src/metrics/sync_metrics.rs @@ -0,0 +1,22 @@ +use reth_metrics::{ + metrics::{self, Gauge}, + Metrics, +}; +use reth_primitives::stage::StageId; +use std::collections::HashMap; + +#[derive(Debug, Default)] +pub(crate) struct SyncMetrics { + pub(crate) stages: HashMap, +} + +#[derive(Metrics)] +#[metrics(scope = "sync")] +pub(crate) struct StageMetrics { + /// The block number of the last commit for a stage. + pub(crate) checkpoint: Gauge, + /// The number of processed entities of the last commit for a stage, if applicable. + pub(crate) entities_processed: Gauge, + /// The number of total entities of the last commit for a stage, if applicable. + pub(crate) entities_total: Gauge, +} diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages/src/pipeline/builder.rs index 3cedb35b0ad2..5e72da6b0301 100644 --- a/crates/stages/src/pipeline/builder.rs +++ b/crates/stages/src/pipeline/builder.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{pipeline::BoxedStage, Pipeline, Stage, StageSet}; +use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageSet}; use reth_db::database::Database; use reth_primitives::{stage::StageId, BlockNumber, ChainSpec, H256}; use tokio::sync::watch; @@ -17,6 +17,7 @@ where max_block: Option, /// A receiver for the current chain tip to sync to. tip_tx: Option>, + metrics_tx: Option, } impl PipelineBuilder @@ -60,11 +61,17 @@ where self } + /// Set the metric events sender. + pub fn with_metric_events(mut self, metrics_tx: MetricEventsSender) -> Self { + self.metrics_tx = Some(metrics_tx); + self + } + /// Builds the final [`Pipeline`] using the given database. /// /// Note: it's expected that this is either an [Arc](std::sync::Arc) or an Arc wrapper type. pub fn build(self, db: DB, chain_spec: Arc) -> Pipeline { - let Self { stages, max_block, tip_tx } = self; + let Self { stages, max_block, tip_tx, metrics_tx } = self; Pipeline { db, chain_spec, @@ -73,14 +80,14 @@ where tip_tx, listeners: Default::default(), progress: Default::default(), - metrics: Default::default(), + metrics_tx, } } } impl Default for PipelineBuilder { fn default() -> Self { - Self { stages: Vec::new(), max_block: None, tip_tx: None } + Self { stages: Vec::new(), max_block: None, tip_tx: None, metrics_tx: None } } } diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index 454fa377b22a..fa2ec285e0ad 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -1,4 +1,7 @@ -use crate::{error::*, ExecInput, ExecOutput, Stage, StageError, UnwindInput}; +use crate::{ + error::*, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, + UnwindInput, +}; use futures_util::Future; use reth_db::database::Database; use reth_interfaces::executor::BlockExecutionError; @@ -17,14 +20,12 @@ mod ctrl; mod event; mod progress; mod set; -mod sync_metrics; pub use crate::pipeline::ctrl::ControlFlow; pub use builder::*; pub use event::*; use progress::*; pub use set::*; -use sync_metrics::*; /// A container for a queued stage. pub(crate) type BoxedStage = Box>; @@ -105,7 +106,7 @@ pub struct Pipeline { progress: PipelineProgress, /// A receiver for the current chain tip to sync to. tip_tx: Option>, - metrics: Metrics, + metrics_tx: Option, } impl Pipeline @@ -138,16 +139,17 @@ where /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { + let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); let provider = factory.provider()?; for stage in &self.stages { let stage_id = stage.id(); - self.metrics.stage_checkpoint( + let _ = metrics_tx.send(MetricEvent::StageCheckpoint { stage_id, - provider.get_stage_checkpoint(stage_id)?.unwrap_or_default(), - None, - ); + checkpoint: provider.get_stage_checkpoint(stage_id)?.unwrap_or_default(), + max_block_number: None, + }); } Ok(()) } @@ -288,12 +290,15 @@ where done = checkpoint.block_number == to, "Stage unwound" ); - self.metrics.stage_checkpoint( - stage_id, checkpoint, - // We assume it was set in the previous execute iteration, so it - // doesn't change when we unwind. - None, - ); + if let Some(metrics_tx) = &mut self.metrics_tx { + let _ = metrics_tx.send(MetricEvent::StageCheckpoint { + stage_id, + checkpoint, + // We assume it was set in the previous execute iteration, so it + // doesn't change when we unwind. + max_block_number: None, + }); + } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; self.listeners @@ -372,7 +377,13 @@ where %done, "Stage committed progress" ); - self.metrics.stage_checkpoint(stage_id, checkpoint, target); + if let Some(metrics_tx) = &mut self.metrics_tx { + let _ = metrics_tx.send(MetricEvent::StageCheckpoint { + stage_id, + checkpoint, + max_block_number: target, + }); + } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; self.listeners.notify(PipelineEvent::Ran { diff --git a/crates/stages/src/pipeline/sync_metrics.rs b/crates/stages/src/pipeline/sync_metrics.rs deleted file mode 100644 index 04a7d63585fe..000000000000 --- a/crates/stages/src/pipeline/sync_metrics.rs +++ /dev/null @@ -1,63 +0,0 @@ -use reth_metrics::{ - metrics::{self, Gauge}, - Metrics, -}; -use reth_primitives::{ - stage::{ - AccountHashingCheckpoint, EntitiesCheckpoint, ExecutionCheckpoint, HeadersCheckpoint, - IndexHistoryCheckpoint, StageCheckpoint, StageId, StageUnitCheckpoint, - StorageHashingCheckpoint, - }, - BlockNumber, -}; -use std::collections::HashMap; - -#[derive(Metrics)] -#[metrics(scope = "sync")] -pub(crate) struct StageMetrics { - /// The block number of the last commit for a stage. - checkpoint: Gauge, - /// The number of processed entities of the last commit for a stage, if applicable. - entities_processed: Gauge, - /// The number of total entities of the last commit for a stage, if applicable. - entities_total: Gauge, -} - -#[derive(Default)] -pub(crate) struct Metrics { - stages: HashMap, -} - -impl Metrics { - pub(crate) fn stage_checkpoint( - &mut self, - stage_id: StageId, - checkpoint: StageCheckpoint, - max_block_number: Option, - ) { - let stage_metrics = self - .stages - .entry(stage_id) - .or_insert_with(|| StageMetrics::new_with_labels(&[("stage", stage_id.to_string())])); - - stage_metrics.checkpoint.set(checkpoint.block_number as f64); - - let (processed, total) = match checkpoint.stage_checkpoint { - Some( - StageUnitCheckpoint::Account(AccountHashingCheckpoint { progress, .. }) | - StageUnitCheckpoint::Storage(StorageHashingCheckpoint { progress, .. }) | - StageUnitCheckpoint::Entities(progress @ EntitiesCheckpoint { .. }) | - StageUnitCheckpoint::Execution(ExecutionCheckpoint { progress, .. }) | - StageUnitCheckpoint::Headers(HeadersCheckpoint { progress, .. }) | - StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint { progress, .. }), - ) => (progress.processed, Some(progress.total)), - None => (checkpoint.block_number, max_block_number), - }; - - stage_metrics.entities_processed.set(processed as f64); - - if let Some(total) = total { - stage_metrics.entities_total.set(total as f64); - } - } -} From 7e7f5e63dbccbbfd54d94481c911d9380cfdf89b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 18:54:26 +0300 Subject: [PATCH 012/722] chore: fix docs for bodies downloader task (#3506) --- crates/net/downloaders/src/bodies/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/downloaders/src/bodies/mod.rs b/crates/net/downloaders/src/bodies/mod.rs index 147bf2525237..f8931ea81a42 100644 --- a/crates/net/downloaders/src/bodies/mod.rs +++ b/crates/net/downloaders/src/bodies/mod.rs @@ -2,7 +2,7 @@ #[allow(clippy::module_inception)] pub mod bodies; -/// TODO: +/// A downloader implementation that spawns a downloader to a task pub mod task; mod queue; From 777fce5ba9d170808a30d70d5c6d73ea11d2b9a9 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 30 Jun 2023 10:40:55 -0700 Subject: [PATCH 013/722] docs: add note on hardware + latitude discount code --- book/installation/installation.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/book/installation/installation.md b/book/installation/installation.md index 7f6dc4decde8..c27ed8ce1f53 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -53,3 +53,9 @@ A stable and dependable internet connection is crucial for both syncing a node f Note that due to Reth's staged sync, you only need an internet connection for the Headers and Bodies stages. This means that the first 1-3 hours (depending on your internet connection) will be online, downloading all necessary data, and the rest will be done offline and does not require an internet connection. Once you're synced to the tip you will need a reliable connection, especially if you're operating a validator. A 24Mbps connection is recommended, but you can probably get away with less. Make sure your ISP does not cap your bandwidth. + +## What hardware can I get? + +If you are buying your own NVMe SSD, please consult [this hardware comparison](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) which is being actively maintained. We recommend against buying DRAM-less or QLC devices as these are noticeably slower. + +All our benchmarks have been produced on [Latitude](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude, so we are for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) From d448bf4f6317b35b1d8945f55332ae74fff4db95 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 30 Jun 2023 10:44:23 -0700 Subject: [PATCH 014/722] chore: docs typo --- book/installation/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index c27ed8ce1f53..fc735cb6ea9c 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -58,4 +58,4 @@ Once you're synced to the tip you will need a reliable connection, especially if If you are buying your own NVMe SSD, please consult [this hardware comparison](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) which is being actively maintained. We recommend against buying DRAM-less or QLC devices as these are noticeably slower. -All our benchmarks have been produced on [Latitude](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude, so we are for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) +All our benchmarks have been produced on [Latitude](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude, so for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) From 927b47e65598b0c777922345067143141e9269c9 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 30 Jun 2023 10:51:58 -0700 Subject: [PATCH 015/722] chore: docs typo (again) --- book/installation/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index fc735cb6ea9c..0e0d2feb131a 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -58,4 +58,4 @@ Once you're synced to the tip you will need a reliable connection, especially if If you are buying your own NVMe SSD, please consult [this hardware comparison](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) which is being actively maintained. We recommend against buying DRAM-less or QLC devices as these are noticeably slower. -All our benchmarks have been produced on [Latitude](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude, so for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) +All our benchmarks have been produced on [Latitude.sh](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude.sh, so for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) From 8be06843e468a5389e20082ffcfc09e8edafa208 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 21:01:50 +0300 Subject: [PATCH 016/722] fix(rpc): limit cache by length (#3505) --- bin/reth/src/args/rpc_server_args.rs | 36 ++++---------- crates/rpc/rpc/src/eth/cache.rs | 71 +++++++++++++++------------- 2 files changed, 47 insertions(+), 60 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index c08aebe0e54d..bcf10cd037d0 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -13,8 +13,7 @@ use reth_provider::{ use reth_rpc::{ eth::{ cache::{ - DEFAULT_BLOCK_CACHE_SIZE_BYTES_MB, DEFAULT_ENV_CACHE_SIZE_BYTES_MB, - DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_ENV_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }, gas_oracle::GasPriceOracleConfig, }, @@ -136,17 +135,17 @@ pub struct RpcServerArgs { #[clap(flatten)] pub gas_price_oracle: GasPriceOracleArgs, - /// Max size for cached block data in megabytes. - #[arg(long, default_value_t = DEFAULT_BLOCK_CACHE_SIZE_BYTES_MB)] - pub block_cache_size: usize, + /// Maximum number of block cache entries. + #[arg(long, default_value_t = DEFAULT_BLOCK_CACHE_MAX_LEN)] + pub block_cache_len: u32, - /// Max size for cached receipt data in megabytes. - #[arg(long, default_value_t = DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB)] - pub receipt_cache_size: usize, + /// Maximum number of receipt cache entries. + #[arg(long, default_value_t = DEFAULT_RECEIPT_CACHE_MAX_LEN)] + pub receipt_cache_len: u32, - /// Max size for cached evm env data in megabytes. - #[arg(long, default_value_t = DEFAULT_ENV_CACHE_SIZE_BYTES_MB)] - pub env_cache_size: usize, + /// Maximum number of env cache entries. + #[arg(long, default_value_t = DEFAULT_ENV_CACHE_MAX_LEN)] + pub env_cache_len: u32, } impl RpcServerArgs { @@ -160,21 +159,6 @@ impl RpcServerArgs { self.rpc_max_response_size * 1024 * 1024 } - /// Returns the max number of bytes for cached block data in bytes - pub fn block_cache_size_bytes(&self) -> usize { - self.block_cache_size * 1024 * 1024 - } - - /// Returns the max number of bytes for cached receipt data in bytes - pub fn receipt_cache_size_bytes(&self) -> usize { - self.receipt_cache_size * 1024 * 1024 - } - - /// Returns the max number of bytes for cached evm env data in bytes - pub fn env_cache_size_bytes(&self) -> usize { - self.env_cache_size * 1024 * 1024 - } - /// Extracts the gas price oracle config from the args. pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig { GasPriceOracleConfig::new( diff --git a/crates/rpc/rpc/src/eth/cache.rs b/crates/rpc/rpc/src/eth/cache.rs index ffa0e4ec441e..b23a677cf55f 100644 --- a/crates/rpc/rpc/src/eth/cache.rs +++ b/crates/rpc/rpc/src/eth/cache.rs @@ -10,7 +10,7 @@ use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, H256}; use reth_provider::{BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv}; -use schnellru::{ByMemoryUsage, Limiter, LruMap}; +use schnellru::{ByLength, Limiter, LruMap}; use serde::{Deserialize, Serialize}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -25,6 +25,7 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; +// TODO: memory based limiter is currently disabled pending /// Default cache size for the block cache: 500MB /// /// With an average block size of ~100kb this should be able to cache ~5000 blocks. @@ -36,6 +37,15 @@ pub const DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB: usize = 500; /// Default cache size for the env cache: 1MB pub const DEFAULT_ENV_CACHE_SIZE_BYTES_MB: usize = 1; +/// Default cache size for the block cache: 5000 blocks. +pub const DEFAULT_BLOCK_CACHE_MAX_LEN: u32 = 5000; + +/// Default cache size for the receipts cache: 2000 receipts. +pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; + +/// Default cache size for the env cache: 1000 envs. +pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; + /// The type that can send the response to a requested [Block] type BlockResponseSender = oneshot::Sender>>; @@ -63,26 +73,26 @@ type EnvLruCache = MultiConsumerLruCache Self { Self { - max_block_bytes: DEFAULT_BLOCK_CACHE_SIZE_BYTES_MB * 1024 * 1024, - max_receipt_bytes: DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB * 1024 * 1024, - max_env_bytes: DEFAULT_ENV_CACHE_SIZE_BYTES_MB * 1024 * 1024, + max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, + max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, + max_envs: DEFAULT_ENV_CACHE_MAX_LEN, } } } @@ -101,16 +111,16 @@ impl EthStateCache { fn create( provider: Provider, action_task_spawner: Tasks, - max_block_bytes: usize, - max_receipt_bytes: usize, - max_env_bytes: usize, + max_blocks: u32, + max_receipts: u32, + max_envs: u32, ) -> (Self, EthStateCacheService) { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, - full_block_cache: BlockLruCache::new(max_block_bytes, "blocks"), - receipts_cache: ReceiptsLruCache::new(max_receipt_bytes, "receipts"), - evm_env_cache: EnvLruCache::new(max_env_bytes, "evm_env"), + full_block_cache: BlockLruCache::new(max_blocks, "blocks"), + receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"), + evm_env_cache: EnvLruCache::new(max_envs, "evm_env"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, @@ -143,14 +153,9 @@ impl EthStateCache { Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { - let EthStateCacheConfig { max_block_bytes, max_receipt_bytes, max_env_bytes } = config; - let (this, service) = Self::create( - provider, - executor.clone(), - max_block_bytes, - max_receipt_bytes, - max_env_bytes, - ); + let EthStateCacheConfig { max_blocks, max_receipts, max_envs } = config; + let (this, service) = + Self::create(provider, executor.clone(), max_blocks, max_receipts, max_envs); executor.spawn_critical("eth state cache", Box::pin(service)); this } @@ -248,9 +253,9 @@ impl EthStateCache { pub(crate) struct EthStateCacheService< Provider, Tasks, - LimitBlocks = ByMemoryUsage, - LimitReceipts = ByMemoryUsage, - LimitEnvs = ByMemoryUsage, + LimitBlocks = ByLength, + LimitReceipts = ByLength, + LimitEnvs = ByLength, > where LimitBlocks: Limiter, LimitReceipts: Limiter>, @@ -513,16 +518,14 @@ where } } -impl MultiConsumerLruCache +impl MultiConsumerLruCache where K: Hash + Eq, { /// Creates a new empty map with a given `memory_budget` and metric label. - /// - /// See also [LruMap::with_memory_budget] - fn new(memory_budget: usize, cache_id: &str) -> Self { + fn new(max_len: u32, cache_id: &str) -> Self { Self { - cache: LruMap::with_memory_budget(memory_budget), + cache: LruMap::new(ByLength::new(max_len)), queued: Default::default(), metrics: CacheMetrics::new_with_labels(&[("cache", cache_id.to_string())]), } From 02492daac9bea3a31b21712e406e392160baf8a4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 30 Jun 2023 21:26:14 +0300 Subject: [PATCH 017/722] fix(cli): consistent max block across pipeline and engine (#3510) Co-authored-by: Matthias Seitz --- bin/reth/src/node/mod.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index d8c3cbf5a0ce..7a67b006e4f0 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -36,7 +36,9 @@ use reth_interfaces::{ }; use reth_network::{error::NetworkError, NetworkConfig, NetworkHandle, NetworkManager}; use reth_network_api::NetworkInfo; -use reth_primitives::{stage::StageId, BlockHashOrNumber, ChainSpec, Head, SealedHeader, H256}; +use reth_primitives::{ + stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, H256, +}; use reth_provider::{ BlockHashReader, BlockReader, CanonStateSubscriptions, HeaderProvider, ProviderFactory, StageCheckpointReader, @@ -281,6 +283,14 @@ impl Command { let metrics_listener = MetricsListener::new(metrics_rx); ctx.task_executor.spawn_critical("metrics listener task", metrics_listener); + let max_block = if let Some(block) = self.debug.max_block { + Some(block) + } else if let Some(tip) = self.debug.tip { + Some(self.lookup_or_fetch_tip(&db, &network_client, tip).await?) + } else { + None + }; + // Configure the pipeline let (mut pipeline, client) = if self.auto_mine { let (_, client, mut task) = AutoSealBuilder::new( @@ -300,6 +310,7 @@ impl Command { db.clone(), &ctx.task_executor, metrics_tx, + max_block, ) .await?; @@ -318,6 +329,7 @@ impl Command { db.clone(), &ctx.task_executor, metrics_tx, + max_block, ) .await?; @@ -346,7 +358,7 @@ impl Command { blockchain_db.clone(), Box::new(ctx.task_executor.clone()), Box::new(network.clone()), - self.debug.max_block, + max_block, self.debug.continuous, payload_builder.clone(), initial_target, @@ -422,6 +434,7 @@ impl Command { } /// Constructs a [Pipeline] that's wired to the network + #[allow(clippy::too_many_arguments)] async fn build_networked_pipeline( &self, config: &mut Config, @@ -430,19 +443,12 @@ impl Command { db: DB, task_executor: &TaskExecutor, metrics_tx: MetricEventsSender, + max_block: Option, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, Client: HeadersClient + BodiesClient + Clone + 'static, { - let max_block = if let Some(block) = self.debug.max_block { - Some(block) - } else if let Some(tip) = self.debug.tip { - Some(self.lookup_or_fetch_tip(&db, &client, tip).await?) - } else { - None - }; - // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::from(config.stages.headers) .build(client.clone(), Arc::clone(&consensus)) From 7cb4a71cf6d530b342ca3ab641d5297ba643e8d8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 1 Jul 2023 00:27:17 +0200 Subject: [PATCH 018/722] chore: rm cached bytes gauge (#3513) --- crates/rpc/rpc/src/eth/cache.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/rpc/rpc/src/eth/cache.rs b/crates/rpc/rpc/src/eth/cache.rs index b23a677cf55f..9a8c3a7ee83b 100644 --- a/crates/rpc/rpc/src/eth/cache.rs +++ b/crates/rpc/rpc/src/eth/cache.rs @@ -512,9 +512,9 @@ where } } + #[inline] fn update_cached_metrics(&self) { self.metrics.cached_count.set(self.cache.len() as f64); - self.metrics.cached_bytes.set(self.cache.memory_usage() as f64); } } @@ -584,8 +584,6 @@ where struct CacheMetrics { /// The number of entities in the cache. cached_count: Gauge, - /// The memory usage of the cache in bytes. - cached_bytes: Gauge, /// The number of queued consumers. queued_consumers_count: Gauge, } From 56674ade06fb81247b4ef51fcdae875280550207 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 1 Jul 2023 08:39:52 +0300 Subject: [PATCH 019/722] chore(download): bodies downloader size limit (#3508) --- bin/reth/src/stage/run.rs | 4 +- crates/config/src/config.rs | 9 ++-- crates/net/downloaders/src/bodies/bodies.rs | 53 +++++++++++---------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 95760bd7294c..aea66ec0d841 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -176,8 +176,8 @@ impl Command { downloader: BodiesDownloaderBuilder::default() .with_stream_batch_size(batch_size as usize) .with_request_limit(config.stages.bodies.downloader_request_limit) - .with_max_buffered_blocks( - config.stages.bodies.downloader_max_buffered_blocks, + .with_max_buffered_blocks_size_bytes( + config.stages.bodies.downloader_max_buffered_blocks_size_bytes, ) .with_concurrent_requests_range( config.stages.bodies.downloader_min_concurrent_requests..= diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 4593dedd75df..246078759dd6 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -151,8 +151,8 @@ pub struct BodiesConfig { /// Maximum amount of received bodies to buffer internally. /// The response contains multiple bodies. /// - /// Default: ~43_000 or 4GB with block size of 100kb - pub downloader_max_buffered_blocks: usize, + /// Default: 4GB + pub downloader_max_buffered_blocks_size_bytes: usize, /// The minimum number of requests to send concurrently. /// /// Default: 5 @@ -169,8 +169,7 @@ impl Default for BodiesConfig { Self { downloader_request_limit: 200, downloader_stream_batch_size: 10_000, - // With high block sizes at around 100kb this will be ~4GB of buffered blocks: ~43k - downloader_max_buffered_blocks: 4 * 1024 * 1024 * 1024 / 100_000, + downloader_max_buffered_blocks_size_bytes: 4 * 1024 * 1024 * 1024, // ~4GB downloader_min_concurrent_requests: 5, downloader_max_concurrent_requests: 100, } @@ -182,7 +181,7 @@ impl From for BodiesDownloaderBuilder { BodiesDownloaderBuilder::default() .with_stream_batch_size(config.downloader_stream_batch_size) .with_request_limit(config.downloader_request_limit) - .with_max_buffered_blocks(config.downloader_max_buffered_blocks) + .with_max_buffered_blocks_size_bytes(config.downloader_max_buffered_blocks_size_bytes) .with_concurrent_requests_range( config.downloader_min_concurrent_requests..= config.downloader_max_concurrent_requests, diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index e2a108ba9f96..d26523119282 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -48,10 +48,10 @@ pub struct BodiesDownloader { stream_batch_size: usize, /// The allowed range for number of concurrent requests. concurrent_requests_range: RangeInclusive, - /// Maximum amount of received blocks to buffer internally. - max_buffered_blocks: usize, - /// Current number of buffered blocks - num_buffered_blocks: usize, + /// Maximum number of bytes of received blocks to buffer internally. + max_buffered_blocks_size_bytes: usize, + /// Current estimated size of buffered blocks in bytes. + buffered_blocks_size_bytes: usize, /// The range of block numbers for body download. download_range: RangeInclusive, /// The latest block number returned. @@ -172,9 +172,9 @@ where max_requests.min(*self.concurrent_requests_range.end()) } - /// Returns true if the number of buffered blocks is lower than the configured maximum + /// Returns true if the size of buffered blocks is lower than the configured maximum fn has_buffer_capacity(&self) -> bool { - self.num_buffered_blocks < self.max_buffered_blocks + self.buffered_blocks_size_bytes < self.max_buffered_blocks_size_bytes } // Check if the stream is terminated @@ -202,7 +202,7 @@ where self.in_progress_queue.clear(); self.queued_bodies = Vec::new(); self.buffered_responses = BinaryHeap::new(); - self.num_buffered_blocks = 0; + self.buffered_blocks_size_bytes = 0; // reset metrics self.metrics.in_flight_requests.set(0.); @@ -223,21 +223,24 @@ where fn pop_buffered_response(&mut self) -> Option { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); - self.num_buffered_blocks -= resp.len(); - self.metrics.buffered_blocks.set(self.num_buffered_blocks as f64); - self.metrics.buffered_blocks_size_bytes.decrement(resp.size() as f64); + self.buffered_blocks_size_bytes -= resp.size(); + self.metrics.buffered_blocks.decrement(resp.len() as f64); + self.metrics.buffered_blocks_size_bytes.set(resp.size() as f64); Some(resp) } /// Adds a new response to the internal buffer fn buffer_bodies_response(&mut self, response: Vec) { - self.num_buffered_blocks += response.len(); - self.metrics.buffered_blocks.set(self.num_buffered_blocks as f64); let size = response.iter().map(|b| b.size()).sum::(); let response = OrderedBodiesResponse { resp: response, size }; + let response_len = response.len(); + + self.buffered_blocks_size_bytes += size; self.buffered_responses.push(response); + + self.metrics.buffered_blocks.increment(response_len as f64); + self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64); self.metrics.buffered_responses.set(self.buffered_responses.len() as f64); - self.metrics.buffered_blocks_size_bytes.increment(size as f64); } /// Returns a response if it's first block number matches the next expected. @@ -502,8 +505,8 @@ pub struct BodiesDownloaderBuilder { pub request_limit: u64, /// The maximum number of block bodies returned at once from the stream pub stream_batch_size: usize, - /// Maximum amount of received bodies to buffer internally. - pub max_buffered_blocks: usize, + /// Maximum number of bytes of received bodies to buffer internally. + pub max_buffered_blocks_size_bytes: usize, /// The maximum number of requests to send concurrently. pub concurrent_requests_range: RangeInclusive, } @@ -513,8 +516,7 @@ impl Default for BodiesDownloaderBuilder { Self { request_limit: 200, stream_batch_size: 10_000, - // With high block sizes at around 100kb this will be ~4GB of buffered blocks: ~43k - max_buffered_blocks: 4 * 1024 * 1024 * 1024 / 100_000, + max_buffered_blocks_size_bytes: 4 * 1024 * 1024 * 1024, // ~4GB concurrent_requests_range: 5..=100, } } @@ -533,7 +535,7 @@ impl BodiesDownloaderBuilder { self } - /// Set on the downloader. + /// Set concurrent requests range on the downloader. pub fn with_concurrent_requests_range( mut self, concurrent_requests_range: RangeInclusive, @@ -542,9 +544,12 @@ impl BodiesDownloaderBuilder { self } - /// Set on the downloader. - pub fn with_max_buffered_blocks(mut self, max_buffered_responses: usize) -> Self { - self.max_buffered_blocks = max_buffered_responses; + /// Set max buffered block bytes on the downloader. + pub fn with_max_buffered_blocks_size_bytes( + mut self, + max_buffered_blocks_size_bytes: usize, + ) -> Self { + self.max_buffered_blocks_size_bytes = max_buffered_blocks_size_bytes; self } @@ -563,7 +568,7 @@ impl BodiesDownloaderBuilder { request_limit, stream_batch_size, concurrent_requests_range, - max_buffered_blocks: max_buffered_responses, + max_buffered_blocks_size_bytes, } = self; let metrics = BodyDownloaderMetrics::default(); let in_progress_queue = BodiesRequestQueue::new(metrics.clone()); @@ -573,7 +578,7 @@ impl BodiesDownloaderBuilder { db, request_limit, stream_batch_size, - max_buffered_blocks: max_buffered_responses, + max_buffered_blocks_size_bytes, concurrent_requests_range, in_progress_queue, metrics, @@ -581,7 +586,7 @@ impl BodiesDownloaderBuilder { latest_queued_block_number: None, buffered_responses: Default::default(), queued_bodies: Default::default(), - num_buffered_blocks: 0, + buffered_blocks_size_bytes: 0, } } } From de1323921d1b501abe7705f9dd103b04a9f747b6 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 1 Jul 2023 14:16:22 +0300 Subject: [PATCH 020/722] chore(rpc): split cache into multiple files (#3519) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/cache/config.rs | 50 ++++++ crates/rpc/rpc/src/eth/cache/metrics.rs | 13 ++ .../rpc/src/eth/{cache.rs => cache/mod.rs} | 153 ++---------------- .../rpc/rpc/src/eth/cache/multi_consumer.rs | 107 ++++++++++++ 4 files changed, 183 insertions(+), 140 deletions(-) create mode 100644 crates/rpc/rpc/src/eth/cache/config.rs create mode 100644 crates/rpc/rpc/src/eth/cache/metrics.rs rename crates/rpc/rpc/src/eth/{cache.rs => cache/mod.rs} (81%) create mode 100644 crates/rpc/rpc/src/eth/cache/multi_consumer.rs diff --git a/crates/rpc/rpc/src/eth/cache/config.rs b/crates/rpc/rpc/src/eth/cache/config.rs new file mode 100644 index 000000000000..da7037e72a77 --- /dev/null +++ b/crates/rpc/rpc/src/eth/cache/config.rs @@ -0,0 +1,50 @@ +use serde::{Deserialize, Serialize}; + +// TODO: memory based limiter is currently disabled pending +/// Default cache size for the block cache: 500MB +/// +/// With an average block size of ~100kb this should be able to cache ~5000 blocks. +pub const DEFAULT_BLOCK_CACHE_SIZE_BYTES_MB: usize = 500; + +/// Default cache size for the receipts cache: 500MB +pub const DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB: usize = 500; + +/// Default cache size for the env cache: 1MB +pub const DEFAULT_ENV_CACHE_SIZE_BYTES_MB: usize = 1; + +/// Default cache size for the block cache: 5000 blocks. +pub const DEFAULT_BLOCK_CACHE_MAX_LEN: u32 = 5000; + +/// Default cache size for the receipts cache: 2000 receipts. +pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; + +/// Default cache size for the env cache: 1000 envs. +pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; + +/// Settings for the [EthStateCache](crate::eth::cache::EthStateCache). +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EthStateCacheConfig { + /// Max number of blocks in cache. + /// + /// Default is 5000. + pub max_blocks: u32, + /// Max number receipts in cache. + /// + /// Default is 2000. + pub max_receipts: u32, + /// Max number of bytes for cached env data. + /// + /// Default is 1000. + pub max_envs: u32, +} + +impl Default for EthStateCacheConfig { + fn default() -> Self { + Self { + max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, + max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, + max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + } + } +} diff --git a/crates/rpc/rpc/src/eth/cache/metrics.rs b/crates/rpc/rpc/src/eth/cache/metrics.rs new file mode 100644 index 000000000000..eb1f092e8094 --- /dev/null +++ b/crates/rpc/rpc/src/eth/cache/metrics.rs @@ -0,0 +1,13 @@ +use reth_metrics::{ + metrics::{self, Gauge}, + Metrics, +}; + +#[derive(Metrics)] +#[metrics(scope = "rpc.eth_cache")] +pub(crate) struct CacheMetrics { + /// The number of entities in the cache. + pub(crate) cached_count: Gauge, + /// The number of queued consumers. + pub(crate) queued_consumers_count: Gauge, +} diff --git a/crates/rpc/rpc/src/eth/cache.rs b/crates/rpc/rpc/src/eth/cache/mod.rs similarity index 81% rename from crates/rpc/rpc/src/eth/cache.rs rename to crates/rpc/rpc/src/eth/cache/mod.rs index 9a8c3a7ee83b..f5e758f86b70 100644 --- a/crates/rpc/rpc/src/eth/cache.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -2,20 +2,13 @@ use futures::{future::Either, Stream, StreamExt}; use reth_interfaces::{provider::ProviderError, Result}; -use reth_metrics::{ - metrics::{self, Gauge}, - Metrics, -}; use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, H256}; use reth_provider::{BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv}; -use schnellru::{ByLength, Limiter, LruMap}; -use serde::{Deserialize, Serialize}; +use schnellru::{ByLength, Limiter}; use std::{ - collections::{hash_map::Entry, HashMap}, future::Future, - hash::Hash, pin::Pin, task::{ready, Context, Poll}, }; @@ -25,26 +18,13 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -// TODO: memory based limiter is currently disabled pending -/// Default cache size for the block cache: 500MB -/// -/// With an average block size of ~100kb this should be able to cache ~5000 blocks. -pub const DEFAULT_BLOCK_CACHE_SIZE_BYTES_MB: usize = 500; - -/// Default cache size for the receipts cache: 500MB -pub const DEFAULT_RECEIPT_CACHE_SIZE_BYTES_MB: usize = 500; +mod config; +pub use config::*; -/// Default cache size for the env cache: 1MB -pub const DEFAULT_ENV_CACHE_SIZE_BYTES_MB: usize = 1; +mod metrics; -/// Default cache size for the block cache: 5000 blocks. -pub const DEFAULT_BLOCK_CACHE_MAX_LEN: u32 = 5000; - -/// Default cache size for the receipts cache: 2000 receipts. -pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; - -/// Default cache size for the env cache: 1000 envs. -pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; +mod multi_consumer; +pub use multi_consumer::MultiConsumerLruCache; /// The type that can send the response to a requested [Block] type BlockResponseSender = oneshot::Sender>>; @@ -69,34 +49,6 @@ type ReceiptsLruCache = MultiConsumerLruCache, L, Receipts type EnvLruCache = MultiConsumerLruCache; -/// Settings for the [EthStateCache] -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EthStateCacheConfig { - /// Max number of blocks in cache. - /// - /// Default is 5000. - pub max_blocks: u32, - /// Max number receipts in cache. - /// - /// Default is 2000. - pub max_receipts: u32, - /// Max number of bytes for cached env data. - /// - /// Default is 1000. - pub max_envs: u32, -} - -impl Default for EthStateCacheConfig { - fn default() -> Self { - Self { - max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, - max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, - } - } -} - /// Provides async access to cached eth data /// /// This is the frontend for the async caching service which manages cached data on a different @@ -301,7 +253,7 @@ where // cache good block if let Ok(Some(block)) = res { - self.full_block_cache.cache.insert(block_hash, block); + self.full_block_cache.insert(block_hash, block); } } @@ -315,7 +267,7 @@ where // cache good receipts if let Ok(Some(receipts)) = res { - self.receipts_cache.cache.insert(block_hash, receipts); + self.receipts_cache.insert(block_hash, receipts); } } @@ -345,9 +297,7 @@ where match action { CacheAction::GetBlock { block_hash, response_tx } => { // check if block is cached - if let Some(block) = - this.full_block_cache.cache.get(&block_hash).cloned() - { + if let Some(block) = this.full_block_cache.get(&block_hash).cloned() { let _ = response_tx.send(Ok(Some(block))); continue } @@ -365,7 +315,7 @@ where } CacheAction::GetBlockTransactions { block_hash, response_tx } => { // check if block is cached - if let Some(block) = this.full_block_cache.cache.get(&block_hash) { + if let Some(block) = this.full_block_cache.get(&block_hash) { let _ = response_tx.send(Ok(Some(block.body.clone()))); continue } @@ -383,9 +333,7 @@ where } CacheAction::GetReceipts { block_hash, response_tx } => { // check if block is cached - if let Some(receipts) = - this.receipts_cache.cache.get(&block_hash).cloned() - { + if let Some(receipts) = this.receipts_cache.get(&block_hash).cloned() { let _ = response_tx.send(Ok(Some(receipts))); continue } @@ -403,7 +351,7 @@ where } CacheAction::GetEnv { block_hash, response_tx } => { // check if env data is cached - if let Some(env) = this.evm_env_cache.cache.get(&block_hash).cloned() { + if let Some(env) = this.evm_env_cache.get(&block_hash).cloned() { let _ = response_tx.send(Ok(env)); continue } @@ -443,7 +391,7 @@ where // cache good env data if let Ok(data) = res { - this.evm_env_cache.cache.insert(block_hash, data); + this.evm_env_cache.insert(block_hash, data); } } CacheAction::CacheNewCanonicalChain { blocks, receipts } => { @@ -466,72 +414,6 @@ where } } -struct MultiConsumerLruCache -where - K: Hash + Eq, - L: Limiter, -{ - /// The LRU cache for the - cache: LruMap, - /// All queued consumers - queued: HashMap>, - /// Cache metrics - metrics: CacheMetrics, -} - -impl MultiConsumerLruCache -where - K: Hash + Eq, - L: Limiter, -{ - /// Adds the sender to the queue for the given key. - /// - /// Returns true if this is the first queued sender for the key - fn queue(&mut self, key: K, sender: S) -> bool { - self.metrics.queued_consumers_count.increment(1.0); - match self.queued.entry(key) { - Entry::Occupied(mut entry) => { - entry.get_mut().push(sender); - false - } - Entry::Vacant(entry) => { - entry.insert(vec![sender]); - true - } - } - } - - /// Remove consumers for a given key. - fn remove(&mut self, key: &K) -> Option> { - match self.queued.remove(key) { - Some(removed) => { - self.metrics.queued_consumers_count.decrement(removed.len() as f64); - Some(removed) - } - None => None, - } - } - - #[inline] - fn update_cached_metrics(&self) { - self.metrics.cached_count.set(self.cache.len() as f64); - } -} - -impl MultiConsumerLruCache -where - K: Hash + Eq, -{ - /// Creates a new empty map with a given `memory_budget` and metric label. - fn new(max_len: u32, cache_id: &str) -> Self { - Self { - cache: LruMap::new(ByLength::new(max_len)), - queued: Default::default(), - metrics: CacheMetrics::new_with_labels(&[("cache", cache_id.to_string())]), - } - } -} - /// All message variants sent through the channel enum CacheAction { GetBlock { block_hash: H256, response_tx: BlockResponseSender }, @@ -578,12 +460,3 @@ where } } } - -#[derive(Metrics)] -#[metrics(scope = "rpc.eth_cache")] -struct CacheMetrics { - /// The number of entities in the cache. - cached_count: Gauge, - /// The number of queued consumers. - queued_consumers_count: Gauge, -} diff --git a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs new file mode 100644 index 000000000000..32840dc8e33b --- /dev/null +++ b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs @@ -0,0 +1,107 @@ +use super::metrics::CacheMetrics; +use schnellru::{ByLength, Limiter, LruMap}; +use std::{ + collections::{hash_map::Entry, HashMap}, + fmt::{self, Debug, Formatter}, + hash::Hash, +}; + +/// A multi-consumer LRU cache. +pub struct MultiConsumerLruCache +where + K: Hash + Eq, + L: Limiter, +{ + /// The LRU cache for the + cache: LruMap, + /// All queued consumers + queued: HashMap>, + /// Cache metrics + metrics: CacheMetrics, +} + +impl Debug for MultiConsumerLruCache +where + K: Hash + Eq, + L: Limiter, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("MultiConsumerLruCache") + .field("cache_length", &self.cache.len()) + .field("cache_memory_usage", &self.cache.memory_usage()) + .field("queued_length", &self.queued.len()) + .finish() + } +} + +impl MultiConsumerLruCache +where + K: Hash + Eq + Debug, + L: Limiter, +{ + /// Adds the sender to the queue for the given key. + /// + /// Returns true if this is the first queued sender for the key + pub fn queue(&mut self, key: K, sender: S) -> bool { + self.metrics.queued_consumers_count.increment(1.0); + match self.queued.entry(key) { + Entry::Occupied(mut entry) => { + entry.get_mut().push(sender); + false + } + Entry::Vacant(entry) => { + entry.insert(vec![sender]); + true + } + } + } + + /// Remove consumers for a given key. + pub fn remove(&mut self, key: &K) -> Option> { + match self.queued.remove(key) { + Some(removed) => { + self.metrics.queued_consumers_count.decrement(removed.len() as f64); + Some(removed) + } + None => None, + } + } + + /// Returns a reference to the value for a given key and promotes that element to be the most + /// recently used. + pub fn get(&mut self, key: &K) -> Option<&mut V> { + self.cache.get(key) + } + + /// Inserts a new element into the map. + /// + /// Can fail if the element is rejected by the limiter or if we fail to grow an empty map. + /// + /// See [Schnellru::insert](LruMap::insert) for more info. + pub fn insert<'a>(&mut self, key: L::KeyToInsert<'a>, value: V) -> bool + where + L::KeyToInsert<'a>: Hash + PartialEq, + { + self.cache.insert(key, value) + } + + /// Update metrics for the inner cache. + #[inline] + pub fn update_cached_metrics(&self) { + self.metrics.cached_count.set(self.cache.len() as f64); + } +} + +impl MultiConsumerLruCache +where + K: Hash + Eq, +{ + /// Creates a new empty map with a given `max_len` and metric label. + pub fn new(max_len: u32, cache_id: &str) -> Self { + Self { + cache: LruMap::new(ByLength::new(max_len)), + queued: Default::default(), + metrics: CacheMetrics::new_with_labels(&[("cache", cache_id.to_string())]), + } + } +} From e468e15e9c408cdce980de1bdc9d720ee73b6461 Mon Sep 17 00:00:00 2001 From: Pia <76558220+rkdud007@users.noreply.github.com> Date: Sat, 1 Jul 2023 14:37:11 +0300 Subject: [PATCH 021/722] fix(book): typo (#3521) --- book/intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/intro.md b/book/intro.md index 143a8360e66b..3d739ba1789b 100644 --- a/book/intro.md +++ b/book/intro.md @@ -65,7 +65,7 @@ As a data engineer/analyst, or as a data indexer, you'll want to use Archive mod ## Is this secure? -Reth implements the specification of Ethereum as defined in the [ethereum/execution-specs](https://github.com/ethereum/execution-specs/) repository. To make sure the node is built securelty, we run the following tests: +Reth implements the specification of Ethereum as defined in the [ethereum/execution-specs](https://github.com/ethereum/execution-specs/) repository. To make sure the node is built securely, we run the following tests: 1. EVM state tests are run on every [Revm Pull Request](https://github.com/bluealloy/revm/blob/main/.github/workflows/ethereum-tests.yml) 1. Hive tests are [run every 24 hours](https://github.com/paradigmxyz/reth/blob/main/.github/workflows/hive.yml) in the main Reth repository. From 2126c01a4248abb8140f236f3ba6c44bc78f3f56 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sat, 1 Jul 2023 13:49:26 +0200 Subject: [PATCH 022/722] feat(error): add wrappers for `std::fs` methods to track path for errors (#3367) Co-authored-by: Matthias Seitz --- bin/reth/src/args/secret_key.rs | 31 ++----- bin/reth/src/args/utils.rs | 8 +- bin/reth/src/debug_cmd/execution.rs | 4 +- bin/reth/src/debug_cmd/merkle.rs | 3 +- bin/reth/src/stage/drop.rs | 4 +- bin/reth/src/test_vectors/tables.rs | 3 +- bin/reth/src/utils.rs | 6 +- crates/primitives/src/fs.rs | 110 ++++++++++++++++++++++++ crates/primitives/src/lib.rs | 1 + crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/layers/jwt_secret.rs | 24 +++--- crates/storage/db/benches/criterion.rs | 8 +- crates/storage/db/benches/hash_keys.rs | 2 +- crates/storage/db/benches/utils.rs | 3 +- 14 files changed, 155 insertions(+), 54 deletions(-) create mode 100644 crates/primitives/src/fs.rs diff --git a/bin/reth/src/args/secret_key.rs b/bin/reth/src/args/secret_key.rs index 8547b2416f72..644513c3439f 100644 --- a/bin/reth/src/args/secret_key.rs +++ b/bin/reth/src/args/secret_key.rs @@ -1,8 +1,8 @@ use hex::encode as hex_encode; use reth_network::config::rng_secret_key; +use reth_primitives::{fs, fs::FsPathError}; use secp256k1::{Error as SecretKeyBaseError, SecretKey}; use std::{ - fs::read_to_string, io, path::{Path, PathBuf}, }; @@ -14,12 +14,8 @@ use thiserror::Error; pub enum SecretKeyError { #[error(transparent)] SecretKeyDecodeError(#[from] SecretKeyBaseError), - #[error("Failed to create parent directory {dir:?} for secret key: {error}")] - FailedToCreateSecretParentDir { error: io::Error, dir: PathBuf }, - #[error("Failed to write secret key file {secret_file:?}: {error}")] - FailedToWriteSecretKeyFile { error: io::Error, secret_file: PathBuf }, - #[error("Failed to read secret key file {secret_file:?}: {error}")] - FailedToReadSecretKeyFile { error: io::Error, secret_file: PathBuf }, + #[error(transparent)] + SecretKeyFsPathError(#[from] FsPathError), #[error("Failed to access key file {secret_file:?}: {error}")] FailedToAccessKeyFile { error: io::Error, secret_file: PathBuf }, } @@ -32,30 +28,19 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result { - let contents = read_to_string(secret_key_path).map_err(|error| { - SecretKeyError::FailedToReadSecretKeyFile { - error, - secret_file: secret_key_path.to_path_buf(), - } - })?; - (contents.as_str().parse::()).map_err(SecretKeyError::SecretKeyDecodeError) + let contents = fs::read_to_string(secret_key_path)?; + Ok((contents.as_str().parse::()) + .map_err(SecretKeyError::SecretKeyDecodeError)?) } Ok(false) => { if let Some(dir) = secret_key_path.parent() { // Create parent directory - std::fs::create_dir_all(dir).map_err(|error| { - SecretKeyError::FailedToCreateSecretParentDir { error, dir: dir.to_path_buf() } - })?; + fs::create_dir_all(dir)?; } let secret = rng_secret_key(); let hex = hex_encode(secret.as_ref()); - std::fs::write(secret_key_path, hex).map_err(|error| { - SecretKeyError::FailedToWriteSecretKeyFile { - error, - secret_file: secret_key_path.to_path_buf(), - } - })?; + fs::write(secret_key_path, hex)?; Ok(secret) } Err(error) => Err(SecretKeyError::FailedToAccessKeyFile { diff --git a/bin/reth/src/args/utils.rs b/bin/reth/src/args/utils.rs index 031b457d2ef4..4a11339450ed 100644 --- a/bin/reth/src/args/utils.rs +++ b/bin/reth/src/args/utils.rs @@ -1,6 +1,8 @@ //! Clap parser utilities -use reth_primitives::{AllGenesisFormats, BlockHashOrNumber, ChainSpec, GOERLI, MAINNET, SEPOLIA}; +use reth_primitives::{ + fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, GOERLI, MAINNET, SEPOLIA, +}; use reth_revm::primitives::B256 as H256; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, @@ -24,7 +26,7 @@ pub fn chain_spec_value_parser(s: &str) -> eyre::Result, eyre::Er "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), _ => { - let raw = std::fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; + let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; serde_json::from_str(&raw)? } }) @@ -38,7 +40,7 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), _ => { - let raw = std::fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; + let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; let genesis: AllGenesisFormats = serde_json::from_str(&raw)?; Arc::new(genesis.into()) } diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index fa3b8502a1ae..d6fc51add656 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -22,7 +22,7 @@ use reth_interfaces::{ }; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_primitives::{stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256}; +use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256}; use reth_provider::{BlockExecutionWriter, ProviderFactory, StageCheckpointReader}; use reth_staged_sync::utils::init::init_genesis; use reth_stages::{ @@ -200,7 +200,7 @@ impl Command { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); - std::fs::create_dir_all(&db_path)?; + fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path)?); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 9b186506b1ff..5cfbe54fa3af 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -6,6 +6,7 @@ use crate::{ use clap::Parser; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx}; use reth_primitives::{ + fs, stage::{StageCheckpoint, StageId}, ChainSpec, }; @@ -64,7 +65,7 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); - std::fs::create_dir_all(&db_path)?; + fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path)?); let factory = ProviderFactory::new(&db, self.chain.clone()); diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index cd6e94436c6c..efdbaa7b86b6 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -6,7 +6,7 @@ use crate::{ }; use clap::Parser; use reth_db::{database::Database, open_db, tables, transaction::DbTxMut, DatabaseEnv}; -use reth_primitives::{stage::StageId, ChainSpec}; +use reth_primitives::{fs, stage::StageId, ChainSpec}; use reth_staged_sync::utils::init::{insert_genesis_header, insert_genesis_state}; use std::sync::Arc; use tracing::info; @@ -50,7 +50,7 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); - std::fs::create_dir_all(&db_path)?; + fs::create_dir_all(&db_path)?; let db = open_db(db_path.as_ref())?; diff --git a/bin/reth/src/test_vectors/tables.rs b/bin/reth/src/test_vectors/tables.rs index bf447456c420..0b4cde9501f3 100644 --- a/bin/reth/src/test_vectors/tables.rs +++ b/bin/reth/src/test_vectors/tables.rs @@ -11,6 +11,7 @@ use reth_db::{ table::{DupSort, Table}, tables, }; +use reth_primitives::fs; use tracing::error; const VECTORS_FOLDER: &str = "testdata/micro/db"; @@ -19,7 +20,7 @@ const PER_TABLE: usize = 1000; /// Generates test vectors for specified `tables`. If list is empty, then generate for all tables. pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { let mut runner = TestRunner::new(ProptestConfig::default()); - std::fs::create_dir_all(VECTORS_FOLDER)?; + fs::create_dir_all(VECTORS_FOLDER)?; macro_rules! generate_vector { ($table_type:ident, $per_table:expr, TABLE) => { diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index a6c1b6e0fac3..1ac4f4d427fb 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -1,6 +1,6 @@ //! Common CLI utility functions. -use eyre::{Result, WrapErr}; +use eyre::Result; use reth_db::{ cursor::DbCursorRO, database::Database, @@ -11,7 +11,7 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedHeader}; +use reth_primitives::{fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, @@ -98,7 +98,7 @@ impl<'a, DB: Database> DbTool<'a, DB> { pub fn drop(&mut self, path: impl AsRef) -> Result<()> { let path = path.as_ref(); info!(target: "reth::cli", "Dropping database at {:?}", path); - std::fs::remove_dir_all(path).wrap_err("Dropping the database failed")?; + fs::remove_dir_all(path)?; Ok(()) } diff --git a/crates/primitives/src/fs.rs b/crates/primitives/src/fs.rs new file mode 100644 index 000000000000..f31b279c5b58 --- /dev/null +++ b/crates/primitives/src/fs.rs @@ -0,0 +1,110 @@ +//! Wrapper for `std::fs` methods +use std::{ + fs, io, + path::{Path, PathBuf}, +}; + +/// Various error variants for `std::fs` operations that serve as an addition to the io::Error which +/// does not provide any information about the path. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum FsPathError { + /// Provides additional path context for [`std::fs::write`]. + #[error("failed to write to {path:?}: {source}")] + Write { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::read`]. + #[error("failed to read from {path:?}: {source}")] + Read { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::read_link`]. + #[error("failed to read from {path:?}: {source}")] + ReadLink { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::File::create`]. + #[error("failed to create file {path:?}: {source}")] + CreateFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::remove_file`]. + #[error("failed to remove file {path:?}: {source}")] + RemoveFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::create_dir`]. + #[error("failed to create dir {path:?}: {source}")] + CreateDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::remove_dir`]. + #[error("failed to remove dir {path:?}: {source}")] + RemoveDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`std::fs::File::open`]. + #[error("failed to open file {path:?}: {source}")] + Open { source: io::Error, path: PathBuf }, + /// Provides additional path context for the file whose contents should be parsed as JSON. + #[error("failed to parse json file: {path:?}: {source}")] + ReadJson { source: serde_json::Error, path: PathBuf }, + /// Provides additional path context for the new JSON file. + #[error("failed to write to json file: {path:?}: {source}")] + WriteJson { source: serde_json::Error, path: PathBuf }, +} + +impl FsPathError { + /// Returns the complementary error variant for [`std::fs::write`]. + pub fn write(source: io::Error, path: impl Into) -> Self { + FsPathError::Write { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::read`]. + pub fn read(source: io::Error, path: impl Into) -> Self { + FsPathError::Read { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::read_link`]. + pub fn read_link(source: io::Error, path: impl Into) -> Self { + FsPathError::ReadLink { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::File::create`]. + pub fn create_file(source: io::Error, path: impl Into) -> Self { + FsPathError::CreateFile { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::remove_file`]. + pub fn remove_file(source: io::Error, path: impl Into) -> Self { + FsPathError::RemoveFile { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::create_dir`]. + pub fn create_dir(source: io::Error, path: impl Into) -> Self { + FsPathError::CreateDir { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::remove_dir`]. + pub fn remove_dir(source: io::Error, path: impl Into) -> Self { + FsPathError::RemoveDir { source, path: path.into() } + } + + /// Returns the complementary error variant for [`std::fs::File::open`]. + pub fn open(source: io::Error, path: impl Into) -> Self { + FsPathError::Open { source, path: path.into() } + } +} + +type Result = std::result::Result; + +/// Wrapper for `std::fs::read_to_string` +pub fn read_to_string(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_to_string(path).map_err(|err| FsPathError::read(err, path)) +} + +/// Wrapper for `std::fs::write` +pub fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> Result<()> { + let path = path.as_ref(); + fs::write(path, contents).map_err(|err| FsPathError::write(err, path)) +} + +/// Wrapper for `std::fs::remove_dir_all` +pub fn remove_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_dir_all(path).map_err(|err| FsPathError::remove_dir(err, path)) +} + +/// Wrapper for `std::fs::create_dir_all` +pub fn create_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::create_dir_all(path).map_err(|err| FsPathError::create_dir(err, path)) +} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 59c12624794a..eb3bcf1942ee 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -30,6 +30,7 @@ mod compression; pub mod constants; pub mod contract; mod forkid; +pub mod fs; mod genesis; mod hardfork; mod header; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 2b3d1631ab17..c121887924f0 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -64,4 +64,4 @@ futures = { workspace = true } jsonrpsee = { version = "0.18", features = ["client"] } assert_matches = "1.5.0" tempfile = "3.5.0" -reth-interfaces = { workspace = true, features = ["test-utils"] } \ No newline at end of file +reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc/src/layers/jwt_secret.rs b/crates/rpc/rpc/src/layers/jwt_secret.rs index 589308e9d30c..0af6251438cb 100644 --- a/crates/rpc/rpc/src/layers/jwt_secret.rs +++ b/crates/rpc/rpc/src/layers/jwt_secret.rs @@ -1,9 +1,10 @@ use hex::encode as hex_encode; use jsonwebtoken::{decode, errors::ErrorKind, Algorithm, DecodingKey, Validation}; use rand::Rng; +use reth_primitives::{fs, fs::FsPathError}; use serde::{Deserialize, Serialize}; use std::{ - path::{Path, PathBuf}, + path::Path, time::{Duration, SystemTime, UNIX_EPOCH}, }; use thiserror::Error; @@ -26,10 +27,8 @@ pub enum JwtError { MissingOrInvalidAuthorizationHeader, #[error("JWT decoding error {0}")] JwtDecodingError(String), - #[error("IO error occurred while reading {path}: {err}")] - IORead { err: std::io::Error, path: PathBuf }, - #[error("IO error occurred while writing {path}: {err}")] - IOWrite { err: std::io::Error, path: PathBuf }, + #[error(transparent)] + JwtFsPathError(#[from] FsPathError), #[error("An I/O error occurred: {0}")] IOError(#[from] std::io::Error), } @@ -80,8 +79,7 @@ impl JwtSecret { /// I/O or secret validation errors might occur during read operations in the form of /// a [`JwtError`]. pub fn from_file(fpath: &Path) -> Result { - let hex = std::fs::read_to_string(fpath) - .map_err(|err| JwtError::IORead { err, path: fpath.to_path_buf() })?; + let hex = fs::read_to_string(fpath)?; let secret = JwtSecret::from_hex(hex)?; Ok(secret) } @@ -91,14 +89,13 @@ impl JwtSecret { pub fn try_create(fpath: &Path) -> Result { if let Some(dir) = fpath.parent() { // Create parent directory - std::fs::create_dir_all(dir)? + fs::create_dir_all(dir)? } let secret = JwtSecret::random(); let bytes = &secret.0; let hex = hex::encode(bytes); - std::fs::write(fpath, hex) - .map_err(|err| JwtError::IOWrite { err, path: fpath.to_path_buf() })?; + fs::write(fpath, hex)?; Ok(secret) } } @@ -204,6 +201,7 @@ mod tests { use assert_matches::assert_matches; use hex::encode as hex_encode; use jsonwebtoken::{encode, Algorithm, EncodingKey, Header}; + use reth_primitives::fs::FsPathError; use std::{ path::Path, time::{Duration, SystemTime, UNIX_EPOCH}, @@ -375,7 +373,9 @@ mod tests { fn provided_file_not_exists() { let fpath = Path::new("secret3.hex"); let result = JwtSecret::from_file(fpath); - assert_matches!(result, Err(JwtError::IORead {err: _, path}) if path == fpath.to_path_buf()); + assert_matches!(result, + Err(JwtError::JwtFsPathError(FsPathError::Read { source: _, path })) if path == fpath.to_path_buf() + ); assert!(!exists(fpath)); } @@ -383,7 +383,7 @@ mod tests { fn provided_file_is_a_directory() { let dir = tempdir().unwrap(); let result = JwtSecret::from_file(dir.path()); - assert_matches!(result, Err(JwtError::IORead {err: _, path}) if path == dir.into_path()); + assert_matches!(result, Err(JwtError::JwtFsPathError(FsPathError::Read { source: _, path })) if path == dir.into_path()); } fn hex(secret: &JwtSecret) -> String { diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index c1a078f2ceb4..0fb51a583762 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -130,7 +130,7 @@ where b.iter_with_setup( || { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); ( input.clone(), Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(), @@ -156,7 +156,7 @@ where b.iter_with_setup( || { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); (input, Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap()) }, |(input, db)| { @@ -227,7 +227,7 @@ where b.iter_with_setup( || { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); ( input.clone(), Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(), @@ -253,7 +253,7 @@ where b.iter_with_setup( || { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); (input, Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap()) }, diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index a08547f94a63..49440da7cc40 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -84,7 +84,7 @@ where // Setup phase before each benchmark iteration let setup = || { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); let db = Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(); let mut unsorted_input = unsorted_input.clone(); diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 362e48eda8d2..ea330295bdca 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -6,6 +6,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseEnv, }; +use reth_primitives::fs; use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. @@ -59,7 +60,7 @@ where T::Value: Default + Clone, { // Reset DB - let _ = std::fs::remove_dir_all(bench_db_path); + let _ = fs::remove_dir_all(bench_db_path); let db = Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(); { From 419a35e9c97a6ec9fd7d2ab2dbe2193fa3a0b13d Mon Sep 17 00:00:00 2001 From: Lakshman Sankar Date: Sat, 1 Jul 2023 14:57:17 -0700 Subject: [PATCH 023/722] some docs nits on installation (#3528) --- book/installation/binaries.md | 6 +++--- book/run/mainnet.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/book/installation/binaries.md b/book/installation/binaries.md index 20348a0901a8..0adbf634e2fa 100644 --- a/book/installation/binaries.md +++ b/book/installation/binaries.md @@ -20,9 +20,9 @@ As an example, you could install the Linux x86_64 version like so: For example, to obtain the binary file for v0.0.1-alpha, you can run the following commands in a Linux terminal: ```bash cd ~ - curl -LO https://github.com/paradigmxyz/reth/releases/download/v0.0.1-alpha/reth-v0.0.1-alpha-x86_64-unknown-linux-gnu.tar.gz - tar -xvf reth-v0.0.1-alpha-x86_64-unknown-linux-gnu.tar.gz + curl -LO https://github.com/paradigmxyz/reth/releases/download/v0.1.0-alpha.1/reth-v0.1.0-alpha.1-x86_64-unknown-linux-gnu.tar.gz + tar -xvf reth-v0.1.0-alpha.1-x86_64-unknown-linux-gnu.tar.gz ``` 1. Test the binary with `./reth --version` (it should print the version). 2. (Optional) Move the `reth` binary to a location in your `PATH`, so the `reth` command can be called from anywhere. - For most Linux distros, you can move the binary to `/usr/local/bin`: `sudo cp ./reth /usr/local/bin`. \ No newline at end of file + For most Linux distros, you can move the binary to `/usr/local/bin`: `sudo cp ./reth /usr/local/bin`. diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 7aca9aa4eaee..f140947a3d9f 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -50,7 +50,7 @@ Assuming you have done that, run: RUST_LOG=info lighthouse bn \ --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ --execution-endpoint http://localhost:8551 \ - --execution-jwt ~/.local/share/reth/mainnet/jwt.hex + --execution-jwt /path/to/secret ``` If you don't intend on running validators on your node you can add : From 1c796f24fc132c6dc937fddf845dfe72259f2350 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 2 Jul 2023 12:51:43 +0200 Subject: [PATCH 024/722] chore: rustfmt (#3532) --- crates/blockchain-tree/src/block_indices.rs | 4 ++-- crates/blockchain-tree/src/blockchain_tree.rs | 4 +++- crates/net/eth-wire/src/p2pstream.rs | 4 +++- crates/net/nat/src/lib.rs | 6 ++++-- crates/net/network/src/eth_requests.rs | 4 +++- crates/net/network/src/peers/manager.rs | 10 ++++++---- crates/rpc/rpc/src/eth/api/fees.rs | 3 ++- crates/storage/provider/src/chain.rs | 4 +++- crates/trie/src/walker.rs | 6 ++---- 9 files changed, 28 insertions(+), 17 deletions(-) diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 54076974a7a6..b4c8e7548c4e 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -177,7 +177,7 @@ impl BlockIndices { } break }; - let Some(new_block_value) = new_hash else { + let Some(new_block_value) = new_hash else { // Old canonical chain had more block than new chain. // remove all present block. // this is mostly not going to happen as reorg should make new chain in Tree. @@ -185,7 +185,7 @@ impl BlockIndices { removed.push(rem); old_hash = old_hashes.next(); } - break; + break }; // compare old and new canonical block number match new_block_value.0.cmp(&old_block_value.0) { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 31c1e4c56898..70cd1cb22161 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -903,7 +903,9 @@ impl BlockchainTree let Some(chain_id) = self.block_indices.get_blocks_chain_id(block_hash) else { warn!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); // TODO: better error - return Err(BlockExecutionError::BlockHashNotFoundInChain { block_hash: *block_hash }.into()) + return Err( + BlockExecutionError::BlockHashNotFoundInChain { block_hash: *block_hash }.into() + ) }; let chain = self.chains.remove(&chain_id).expect("To be present"); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 55b70877f51c..bc1e34a48381 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -533,7 +533,9 @@ where match ready!(this.inner.as_mut().poll_flush(cx)) { Err(err) => return Poll::Ready(Err(err.into())), Ok(()) => { - let Some(message) = this.outgoing_messages.pop_front() else { return Poll::Ready(Ok(())) }; + let Some(message) = this.outgoing_messages.pop_front() else { + return Poll::Ready(Ok(())) + }; if let Err(err) = this.inner.as_mut().start_send(message) { return Poll::Ready(Err(err.into())) } diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 1822dc7c7322..06e2d4daa043 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -92,9 +92,11 @@ impl FromStr for NatResolver { "none" => NatResolver::None, "publicip" | "public-ip" => NatResolver::PublicIp, s => { - let Some(ip) = s.strip_prefix("extip:") else { return Err(ParseNatResolverError::UnknownVariant(format!( + let Some(ip) = s.strip_prefix("extip:") else { + return Err(ParseNatResolverError::UnknownVariant(format!( "Unknown Nat Resolver: {s}" - ))) }; + ))) + }; NatResolver::ExternalIp(ip.parse::()?) } }; diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 09fc31129c31..7cb4db72b37b 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -81,7 +81,9 @@ where let mut block: BlockHashOrNumber = match start_block { BlockHashOrNumber::Hash(start) => start.into(), BlockHashOrNumber::Number(num) => { - let Some(hash) = self.client.block_hash(num).unwrap_or_default() else { return headers }; + let Some(hash) = self.client.block_hash(num).unwrap_or_default() else { + return headers + }; hash.into() } }; diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 0b6bb854b819..63a89a24682c 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -1139,9 +1139,7 @@ impl PeersConfig { self, optional_file: Option>, ) -> Result { - let Some(file_path) = optional_file else { - return Ok(self) - }; + let Some(file_path) = optional_file else { return Ok(self) }; let reader = match std::fs::File::open(file_path.as_ref()) { Ok(file) => io::BufReader::new(file), Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self), @@ -1875,7 +1873,11 @@ mod test { let mut peer_manager = PeersManager::new(config); peer_manager.on_incoming_session_established(given_peer_id, socket_addr); - let Some(PeerAction::DisconnectBannedIncoming { peer_id }) = peer_manager.queued_actions.pop_front() else { panic!() }; + let Some(PeerAction::DisconnectBannedIncoming { peer_id }) = + peer_manager.queued_actions.pop_front() + else { + panic!() + }; assert_eq!(peer_id, given_peer_id) } diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index 5122fb0627fb..bdd30a0ac3fb 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -64,7 +64,8 @@ where } let Some(end_block) = self.provider().block_number_for_id(newest_block.into())? else { - return Err(EthApiError::UnknownBlockNumber) }; + return Err(EthApiError::UnknownBlockNumber) + }; // Check that we would not be querying outside of genesis if end_block < block_count { diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 605caadedd90..02ca47be6809 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -192,7 +192,9 @@ impl Chain { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); let block_number = match split_at { SplitAt::Hash(block_hash) => { - let Some(block_number) = self.block_number(block_hash) else { return ChainSplit::NoSplitPending(self)}; + let Some(block_number) = self.block_number(block_hash) else { + return ChainSplit::NoSplitPending(self) + }; // If block number is same as tip whole chain is becoming canonical. if block_number == chain_tip { return ChainSplit::NoSplitCanonical(self) diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index b8290109c444..b876f0d93c1e 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -146,7 +146,7 @@ impl<'a, K: Key + From>, C: TrieCursor> TrieWalker<'a, K, C> { let Some((key, node)) = self.node(false)? else { // If no next node is found, clear the stack. self.stack.clear(); - return Ok(()); + return Ok(()) }; // Overwrite the root node's first nibble @@ -178,9 +178,7 @@ impl<'a, K: Key + From>, C: TrieCursor> TrieWalker<'a, K, C> { &mut self, allow_root_to_child_nibble: bool, ) -> Result<(), DatabaseError> { - let Some(subnode) = self.stack.last_mut() else { - return Ok(()); - }; + let Some(subnode) = self.stack.last_mut() else { return Ok(()) }; // Check if the walker needs to backtrack to the previous level in the trie during its // traversal. From 7c3c8ab97713a1d8bd9d856f3d730e48cf82f610 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 2 Jul 2023 12:52:02 +0200 Subject: [PATCH 025/722] docs: add additional context to internal tracing error (#3531) --- crates/rpc/rpc/src/eth/error.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index f7bf25700753..1818f4aea87c 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -68,6 +68,9 @@ pub enum EthApiError { #[error("invalid reward percentiles")] InvalidRewardPercentiles, /// Error thrown when a spawned tracing task failed to deliver an anticipated response. + /// + /// This only happens if the tracing task panics and is aborted before it can return a response + /// back to the request handler. #[error("internal error while tracing")] InternalTracingError, /// Error thrown when a spawned blocking task failed to deliver an anticipated response. From 1e3092b770d20101e3ba1f5e1f6091c86227ea76 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 2 Jul 2023 12:57:33 +0200 Subject: [PATCH 026/722] fix: precompile condition (#3526) --- .../revm/revm-inspectors/src/tracing/mod.rs | 34 +++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 51bbc46f7ae5..0dfee063a492 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -85,6 +85,29 @@ impl TracingInspector { GethTraceBuilder::new(self.traces.arena, self.config) } + /// Returns true if we're no longer in the context of the root call. + fn is_deep(&self) -> bool { + // the root call will always be the first entry in the trace stack + !self.trace_stack.is_empty() + } + + /// Returns true if this a call to a precompile contract. + /// + /// Returns true if the `to` address is a precompile contract and the value is zero. + #[inline] + fn is_precompile_call( + &self, + data: &EVMData<'_, DB>, + to: &Address, + value: U256, + ) -> bool { + if data.precompiles.contains(to) { + // only if this is _not_ the root call + return self.is_deep() && value == U256::ZERO + } + false + } + /// Returns the currently active call trace. /// /// This will be the last call trace pushed to the stack: the call we entered most recently. @@ -387,7 +410,7 @@ where // if calls to precompiles should be excluded, check whether this is a call to a precompile let maybe_precompile = - self.config.exclude_precompile_calls.then(|| is_precompile_call(data, &to, value)); + self.config.exclude_precompile_calls.then(|| self.is_precompile_call(data, &to, value)); self.start_trace_on_call( data.journaled_state.depth() as usize, @@ -486,12 +509,3 @@ struct StackStep { trace_idx: usize, step_idx: usize, } - -/// Returns true if this a call to a precompile contract with `depth > 0 && value == 0`. -#[inline] -fn is_precompile_call(data: &EVMData<'_, DB>, to: &Address, value: U256) -> bool { - if data.precompiles.contains(to) { - return data.journaled_state.depth() > 0 && value == U256::ZERO - } - false -} From 9fc950697d4d5a1f19f3aeb9c0d0097cc97c8bac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 2 Jul 2023 12:58:07 +0200 Subject: [PATCH 027/722] chore: add TaskManager must_use annotation (#3525) --- crates/tasks/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 2d045906fa58..1d27e1477db6 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -138,6 +138,7 @@ impl TaskSpawner for TokioTaskExecutor { /// diagnostic purposes, since tokio task essentially fail silently. Therefore, this type is a /// Stream that yields the name of panicked task, See [`TaskExecutor::spawn_critical`]. In order to /// execute Tasks use the [`TaskExecutor`] type [`TaskManager::executor`]. +#[must_use = "TaskManager must be polled to monitor critical tasks"] pub struct TaskManager { /// Handle to the tokio runtime this task manager is associated with. /// From d14f995e1a94c0e205a409610d84ddcb068cfb65 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Sun, 2 Jul 2023 12:46:16 +0200 Subject: [PATCH 028/722] test: improve slow tests (#3487) Co-authored-by: Matthias Seitz --- crates/net/eth-wire/src/types/blocks.rs | 8 +++++++- crates/net/eth-wire/src/types/receipts.rs | 8 +++++++- crates/primitives/src/block.rs | 18 ++++++++++++++++++ crates/primitives/src/log.rs | 6 ++++++ crates/primitives/src/receipt.rs | 6 ++++++ .../primitives/src/transaction/access_list.rs | 19 ++++++++++++++++--- crates/trie/src/trie.rs | 5 +++-- 7 files changed, 63 insertions(+), 7 deletions(-) diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index c0db52f2d530..47777cd71b12 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -68,11 +68,17 @@ impl From> for GetBlockBodies { /// The response to [`GetBlockBodies`], containing the block bodies that the peer knows about if /// any were found. -#[derive_arbitrary(rlp, 1)] +#[derive_arbitrary(rlp, 16)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + ) + )] pub Vec, ); diff --git a/crates/net/eth-wire/src/types/receipts.rs b/crates/net/eth-wire/src/types/receipts.rs index 87190aa6882b..f1bb3bc1209f 100644 --- a/crates/net/eth-wire/src/types/receipts.rs +++ b/crates/net/eth-wire/src/types/receipts.rs @@ -17,11 +17,17 @@ pub struct GetReceipts( /// The response to [`GetReceipts`], containing receipt lists that correspond to each block /// requested. -#[derive_arbitrary(rlp, 1)] +#[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::collection::vec(proptest::arbitrary::any::(), 0..=50), 0..=5)" + ) + )] pub Vec>, ); diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index f5a2b10b20ec..22bf8e71a1be 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -739,10 +739,28 @@ impl From<(BlockHash, BlockNumber)> for BlockNumHash { #[rlp(trailing)] pub struct BlockBody { /// Transactions in the block + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=100)" + ) + )] pub transactions: Vec, /// Uncle headers for the given block + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::
(), 0..=2)" + ) + )] pub ommers: Vec
, /// Withdrawals in the block. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::option::of(proptest::collection::vec(proptest::arbitrary::any::(), 0..=16))" + ) + )] pub withdrawals: Option>, } diff --git a/crates/primitives/src/log.rs b/crates/primitives/src/log.rs index 2521428fd752..3b72967b686b 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives/src/log.rs @@ -9,6 +9,12 @@ pub struct Log { /// Contract that emitted this log. pub address: Address, /// Topics of the log. The number of logs depend on what `LOG` opcode is used. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=5)" + ) + )] pub topics: Vec, /// Arbitrary length data. pub data: Bytes, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 75f8e6eb44e8..15a5a2308dfb 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -21,6 +21,12 @@ pub struct Receipt { /// Gas used pub cumulative_gas_used: u64, /// Log send from contracts. + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + ) + )] pub logs: Vec, } diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index 0964e0be4588..eaa60b2603f1 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -1,8 +1,7 @@ use crate::{Address, H256}; -use revm_primitives::U256; - use reth_codecs::{main_codec, Compact}; use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; +use revm_primitives::U256; use serde::{Deserialize, Serialize}; /// A list of addresses and storage keys that the transaction plans to access. @@ -14,13 +13,27 @@ pub struct AccessListItem { /// Account addresses that would be loaded at the start of execution pub address: Address, /// Keys of storage that would be loaded at the start of execution + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + ) + )] pub storage_keys: Vec, } /// AccessList as defined in EIP-2930 #[main_codec(rlp)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodableWrapper, RlpEncodableWrapper)] -pub struct AccessList(pub Vec); +pub struct AccessList( + #[cfg_attr( + any(test, feature = "arbitrary"), + proptest( + strategy = "proptest::collection::vec(proptest::arbitrary::any::(), 0..=20)" + ) + )] + pub Vec, +); impl AccessList { /// Converts the list into a vec, expected by revm diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 9ec5ff4cfde7..5359f0949709 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -1212,12 +1212,13 @@ mod tests { assert_trie_updates(&account_updates); } - // TODO: limit the thumber of test cases? proptest! { + #![proptest_config(ProptestConfig { + cases: 128, ..ProptestConfig::default() + })] #[test] fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { tokio::runtime::Runtime::new().unwrap().block_on(async { - let db = create_test_rw_db(); let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); let tx = factory.provider_rw().unwrap(); From e0d5735672af43e10927608bd0ce3c006043940d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 2 Jul 2023 13:55:27 +0200 Subject: [PATCH 029/722] feat: add another distance check (#3501) --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/shareable.rs | 8 ++++++++ crates/consensus/beacon/src/engine/mod.rs | 9 +++++++++ crates/interfaces/src/blockchain_tree/mod.rs | 17 ++++++++++++++++- crates/storage/provider/src/providers/mod.rs | 8 ++++++++ 5 files changed, 42 insertions(+), 2 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 70cd1cb22161..ce10bbd90761 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -592,7 +592,7 @@ impl BlockchainTree } /// Checks the block buffer for the given block. - pub fn get_buffered_block(&mut self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { self.buffered_blocks.block_by_hash(hash) } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index d23dff99544e..ef0ea0323df3 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -109,6 +109,14 @@ impl BlockchainTreeViewer self.tree.read().block_by_hash(block_hash).cloned() } + fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.read().get_buffered_block(&block_hash).map(|b| b.block.clone()) + } + + fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) + } + fn canonical_blocks(&self) -> BTreeMap { trace!(target: "blockchain_tree", "Returning canonical blocks in tree"); self.tree.read().block_indices().canonical_chain().inner().clone() diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index d815d91b319d..57028c941044 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1082,6 +1082,15 @@ where canonical_tip_num, downloaded_num_hash.number, ); + } else if let Some(buffered_finalized) = + self.blockchain.buffered_header_by_hash(state.finalized_block_hash) + { + // if we have buffered the finalized block, we should check how far + // we're off + requires_pipeline = self.exceeds_pipeline_run_threshold( + canonical_tip_num, + buffered_finalized.number, + ); } } diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 8b1eda0a3a61..63f628c49df7 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -172,9 +172,24 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Returns the block with matching hash from the tree, if it exists. /// - /// Caution: This will not return blocks from the canonical chain. + /// Caution: This will not return blocks from the canonical chain or buffered blocks that are + /// disconnected from the canonical chain. fn block_by_hash(&self, hash: BlockHash) -> Option; + /// Returns the _buffered_ (disconnected) block with matching hash from the internal buffer if + /// it exists. + /// + /// Caution: Unlike [Self::block_by_hash] this will only return blocks that are currently + /// disconnected from the canonical chain. + fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option; + + /// Returns the _buffered_ (disconnected) header with matching hash from the internal buffer if + /// it exists. + /// + /// Caution: Unlike [Self::block_by_hash] this will only return headers that are currently + /// disconnected from the canonical chain. + fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option; + /// Returns true if the tree contains the block with matching hash. fn contains(&self, hash: BlockHash) -> bool { self.block_by_hash(hash).is_some() diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 295d9fcc7f3d..df1c04b6633e 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -608,6 +608,14 @@ where self.tree.block_by_hash(block_hash) } + fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.buffered_block_by_hash(block_hash) + } + + fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.buffered_header_by_hash(block_hash) + } + fn canonical_blocks(&self) -> BTreeMap { self.tree.canonical_blocks() } From 951fd0ae0cf644664a76e34bf9b28f288aad978f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 2 Jul 2023 14:21:06 +0100 Subject: [PATCH 030/722] feat(stages, tree): update sync metrics from blockchain tree (#3507) Co-authored-by: Georgios Konstantopoulos --- Cargo.lock | 1 + bin/reth/src/node/mod.rs | 25 +++++++++++-------- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 21 +++++++++++++--- crates/blockchain-tree/src/shareable.rs | 12 ++++----- crates/stages/src/metrics/listener.rs | 17 ++++++++++--- crates/stages/src/metrics/sync_metrics.rs | 9 +++++++ crates/stages/src/pipeline/builder.rs | 2 +- 8 files changed, 63 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc6fb0b6568d..04d8ee92c41b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5069,6 +5069,7 @@ dependencies = [ "reth-metrics", "reth-primitives", "reth-provider", + "reth-stages", "tokio", "tracing", ] diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 7a67b006e4f0..905bf32d0449 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -183,6 +183,11 @@ impl Command { self.init_trusted_nodes(&mut config); + debug!(target: "reth::cli", "Spawning metrics listener task"); + let (metrics_tx, metrics_rx) = unbounded_channel(); + let metrics_listener = MetricsListener::new(metrics_rx); + ctx.task_executor.spawn_critical("metrics listener task", metrics_listener); + // configure blockchain tree let tree_externals = TreeExternals::new( db.clone(), @@ -195,11 +200,14 @@ impl Command { // depth at least N blocks must be sent at once. let (canon_state_notification_sender, _receiver) = tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - let blockchain_tree = ShareableBlockchainTree::new(BlockchainTree::new( - tree_externals, - canon_state_notification_sender.clone(), - tree_config, - )?); + let blockchain_tree = ShareableBlockchainTree::new( + BlockchainTree::new( + tree_externals, + canon_state_notification_sender.clone(), + tree_config, + )? + .with_sync_metrics_tx(metrics_tx.clone()), + ); // setup the blockchain provider let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); @@ -278,11 +286,6 @@ impl Command { debug!(target: "reth::cli", "Spawning payload builder service"); ctx.task_executor.spawn_critical("payload builder service", payload_service); - debug!(target: "reth::cli", "Spawning metrics listener task"); - let (metrics_tx, metrics_rx) = unbounded_channel(); - let metrics_listener = MetricsListener::new(metrics_rx); - ctx.task_executor.spawn_critical("metrics listener task", metrics_listener); - let max_block = if let Some(block) = self.debug.max_block { Some(block) } else if let Some(tip) = self.debug.tip { @@ -687,7 +690,7 @@ impl Command { if continuous { HeaderSyncMode::Continuous } else { HeaderSyncMode::Tip(tip_rx) }; let pipeline = builder .with_tip_sender(tip_tx) - .with_metric_events(metrics_tx) + .with_metrics_tx(metrics_tx) .add_stages( DefaultStages::new( header_mode, diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 0a4bf9559037..f7c0316480ed 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -20,6 +20,7 @@ reth-interfaces = { workspace = true } reth-db = { path = "../storage/db" } reth-metrics = { workspace = true, features = ["common"] } reth-provider = { workspace = true } +reth-stages = { path = "../stages" } # common parking_lot = { version = "0.12" } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ce10bbd90761..22d3e32e6239 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -26,6 +26,7 @@ use reth_provider::{ CanonStateNotificationSender, CanonStateNotifications, Chain, DatabaseProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, }; +use reth_stages::{MetricEvent, MetricEventsSender}; use std::{ collections::{BTreeMap, HashMap}, sync::Arc, @@ -90,6 +91,8 @@ pub struct BlockchainTree { canon_state_notification_sender: CanonStateNotificationSender, /// Metrics for the blockchain tree. metrics: TreeMetrics, + /// Metrics for sync stages. + sync_metrics_tx: Option, } /// A container that wraps chains and block indices to allow searching for block hashes across all @@ -141,9 +144,16 @@ impl BlockchainTree config, canon_state_notification_sender, metrics: Default::default(), + sync_metrics_tx: None, }) } + /// Set the sync metric events sender. + pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { + self.sync_metrics_tx = Some(metrics_tx); + self + } + /// Check if then block is known to blockchain tree or database and return its status. /// /// Function will check: @@ -1074,10 +1084,15 @@ impl BlockchainTree } } - /// Update blockchain tree metrics - pub(crate) fn update_tree_metrics(&self) { + /// Update blockchain tree and sync metrics + pub(crate) fn update_metrics(&mut self) { + let height = self.canonical_chain().tip().number; + self.metrics.sidechains.set(self.chains.len() as f64); - self.metrics.canonical_chain_height.set(self.canonical_chain().tip().number as f64); + self.metrics.canonical_chain_height.set(height as f64); + if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { + let _ = metrics_tx.send(MetricEvent::SyncHeight { height }); + } } } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index ef0ea0323df3..663c2e03bae9 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -44,7 +44,7 @@ impl BlockchainTreeEngine fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); let res = tree.buffer_block(block); - tree.update_tree_metrics(); + tree.update_metrics(); res } @@ -55,7 +55,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", hash=?block.hash, number=block.number, parent_hash=?block.parent_hash, "Inserting block"); let mut tree = self.tree.write(); let res = tree.insert_block(block); - tree.update_tree_metrics(); + tree.update_metrics(); res } @@ -63,14 +63,14 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?finalized_block, "Finalizing block"); let mut tree = self.tree.write(); tree.finalize_block(finalized_block); - tree.update_tree_metrics(); + tree.update_metrics(); } fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<(), Error> { trace!(target: "blockchain_tree", ?last_finalized_block, "Restoring canonical hashes for last finalized block"); let mut tree = self.tree.write(); let res = tree.restore_canonical_hashes(last_finalized_block); - tree.update_tree_metrics(); + tree.update_metrics(); res } @@ -78,7 +78,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?block_hash, "Making block canonical"); let mut tree = self.tree.write(); let res = tree.make_canonical(block_hash); - tree.update_tree_metrics(); + tree.update_metrics(); res } @@ -86,7 +86,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?unwind_to, "Unwinding to block number"); let mut tree = self.tree.write(); let res = tree.unwind(unwind_to); - tree.update_tree_metrics(); + tree.update_metrics(); res } } diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages/src/metrics/listener.rs index f6672a4e68dc..d05560c76f5e 100644 --- a/crates/stages/src/metrics/listener.rs +++ b/crates/stages/src/metrics/listener.rs @@ -1,4 +1,4 @@ -use crate::metrics::{StageMetrics, SyncMetrics}; +use crate::metrics::SyncMetrics; use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, @@ -16,6 +16,11 @@ pub type MetricEventsSender = UnboundedSender; /// Collection of metric events. #[derive(Clone, Copy, Debug)] pub enum MetricEvent { + /// Sync reached new height. All stage checkpoints are updated. + SyncHeight { + /// Maximum height measured in block number that sync reached. + height: BlockNumber, + }, /// Stage reached new checkpoint. StageCheckpoint { /// Stage ID. @@ -44,10 +49,14 @@ impl MetricsListener { fn handle_event(&mut self, event: MetricEvent) { match event { + MetricEvent::SyncHeight { height } => { + for stage_id in StageId::ALL { + let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + stage_metrics.checkpoint.set(height as f64); + } + } MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number } => { - let stage_metrics = self.sync_metrics.stages.entry(stage_id).or_insert_with(|| { - StageMetrics::new_with_labels(&[("stage", stage_id.to_string())]) - }); + let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); stage_metrics.checkpoint.set(checkpoint.block_number as f64); diff --git a/crates/stages/src/metrics/sync_metrics.rs b/crates/stages/src/metrics/sync_metrics.rs index 859a7e6d778a..ba440cb2a3b6 100644 --- a/crates/stages/src/metrics/sync_metrics.rs +++ b/crates/stages/src/metrics/sync_metrics.rs @@ -10,6 +10,15 @@ pub(crate) struct SyncMetrics { pub(crate) stages: HashMap, } +impl SyncMetrics { + /// Returns existing or initializes a new instance of [StageMetrics] for the provided [StageId]. + pub(crate) fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { + self.stages + .entry(stage_id) + .or_insert_with(|| StageMetrics::new_with_labels(&[("stage", stage_id.to_string())])) + } +} + #[derive(Metrics)] #[metrics(scope = "sync")] pub(crate) struct StageMetrics { diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages/src/pipeline/builder.rs index 5e72da6b0301..7679361c839f 100644 --- a/crates/stages/src/pipeline/builder.rs +++ b/crates/stages/src/pipeline/builder.rs @@ -62,7 +62,7 @@ where } /// Set the metric events sender. - pub fn with_metric_events(mut self, metrics_tx: MetricEventsSender) -> Self { + pub fn with_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { self.metrics_tx = Some(metrics_tx); self } From 4f32f5627c5db42f2921dbe58c4cfc446955e15e Mon Sep 17 00:00:00 2001 From: Bjerg Date: Sun, 2 Jul 2023 17:06:52 +0200 Subject: [PATCH 031/722] fix: add block rewards to `trace_block` (#3491) Co-authored-by: Georgios Konstantopoulos --- Cargo.lock | 1 + bin/reth/src/args/rpc_server_args.rs | 5 +- crates/rpc/rpc-builder/src/lib.rs | 44 +++++++++--- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/trace.rs | 67 +++++++++++++++++-- crates/storage/provider/src/lib.rs | 11 +-- .../provider/src/providers/database/mod.rs | 14 +++- crates/storage/provider/src/providers/mod.rs | 20 ++++-- .../storage/provider/src/test_utils/noop.rs | 18 +++-- crates/storage/provider/src/traits/mod.rs | 3 + .../storage/provider/src/traits/receipts.rs | 2 +- crates/storage/provider/src/traits/spec.rs | 8 +++ 12 files changed, 160 insertions(+), 34 deletions(-) create mode 100644 crates/storage/provider/src/traits/spec.rs diff --git a/Cargo.lock b/Cargo.lock index 04d8ee92c41b..3e33d1e1b269 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5651,6 +5651,7 @@ dependencies = [ "jsonwebtoken", "pin-project", "rand 0.8.5", + "reth-consensus-common", "reth-interfaces", "reth-metrics", "reth-network-api", diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index bcf10cd037d0..9e7b890e0cad 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -8,7 +8,8 @@ use clap::{ use futures::TryFutureExt; use reth_network_api::{NetworkInfo, Peers}; use reth_provider::{ - BlockReaderIdExt, CanonStateSubscriptions, EvmEnvProvider, HeaderProvider, StateProviderFactory, + BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, + StateProviderFactory, }; use reth_rpc::{ eth::{ @@ -235,6 +236,7 @@ impl RpcServerArgs { + HeaderProvider + StateProviderFactory + EvmEnvProvider + + ChainSpecProvider + Clone + Unpin + 'static, @@ -295,6 +297,7 @@ impl RpcServerArgs { + HeaderProvider + StateProviderFactory + EvmEnvProvider + + ChainSpecProvider + Clone + Unpin + 'static, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 38fbb76f4f51..9a15440239b9 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -31,13 +31,13 @@ //! //! ``` //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; +//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; //! use reth_rpc_builder::{RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! pub async fn launch(provider: Provider, pool: Pool, network: Network, events: Events) //! where -//! Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, +//! Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, //! Pool: TransactionPool + Clone + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -64,7 +64,7 @@ //! ``` //! use tokio::try_join; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; +//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; //! use reth_rpc::JwtSecret; //! use reth_rpc_builder::{RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig}; //! use reth_tasks::TokioTaskExecutor; @@ -73,7 +73,7 @@ //! use reth_rpc_builder::auth::AuthServerConfig; //! pub async fn launch(provider: Provider, pool: Pool, network: Network, events: Events, engine_api: EngineApi) //! where -//! Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, +//! Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, //! Pool: TransactionPool + Clone + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -113,7 +113,8 @@ use jsonrpsee::{ use reth_ipc::server::IpcServer; use reth_network_api::{NetworkInfo, Peers}; use reth_provider::{ - BlockReader, BlockReaderIdExt, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, + StateProviderFactory, }; use reth_rpc::{ eth::{ @@ -169,7 +170,13 @@ pub async fn launch( events: Events, ) -> Result where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -268,7 +275,13 @@ impl impl RpcModuleBuilder where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -485,8 +498,13 @@ impl RpcModuleSelection { config: RpcModuleConfig, ) -> RpcModule<()> where - Provider: - BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -692,7 +710,13 @@ where impl RethModuleRegistry where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index c121887924f0..c3003e496aec 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -23,6 +23,7 @@ reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-revm = { path = "../../revm" } reth-tasks = { workspace = true } reth-metrics = { workspace = true } +reth-consensus-common = { path = "../../consensus/common" } # eth revm = { workspace = true, features = [ diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index bfa45863c398..fa1372a505ed 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -11,8 +11,11 @@ use crate::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H256}; -use reth_provider::{BlockReader, EvmEnvProvider, StateProviderBox, StateProviderFactory}; +use reth_consensus_common::calc::{base_block_reward, block_reward}; +use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, SealedHeader, H256, U256}; +use reth_provider::{ + BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, +}; use reth_revm::{ database::{State, SubState}, env::tx_env_with_recovered, @@ -77,7 +80,7 @@ impl TraceApi { impl TraceApi where - Provider: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, Eth: EthTransactions + 'static, { /// Executes the future on a new blocking task. @@ -396,7 +399,7 @@ where &self, block_id: BlockId, ) -> EthResult>> { - let traces = self + let mut traces: Option> = self .trace_block_with( block_id, TracingInspectorConfig::default_parity(), @@ -408,6 +411,43 @@ where ) .await? .map(|traces| traces.into_iter().flatten().collect()); + + // Add block reward traces + // TODO: We only really need the header and ommers here to determine the reward + if let (Some(block), Some(traces)) = + (self.inner.eth_api.block_by_id(block_id).await?, traces.as_mut()) + { + if let Some(header_td) = self.provider().header_td(&block.header.hash)? { + if let Some(base_block_reward) = base_block_reward( + self.provider().chain_spec().as_ref(), + block.header.number, + block.header.difficulty, + header_td, + ) { + traces.push(reward_trace( + &block.header, + RewardAction { + author: block.header.beneficiary, + reward_type: RewardType::Block, + value: U256::from(base_block_reward), + }, + )); + + if !block.ommers.is_empty() { + traces.push(reward_trace( + &block.header, + RewardAction { + author: block.header.beneficiary, + reward_type: RewardType::Uncle, + value: block_reward(base_block_reward, block.ommers.len()) - + U256::from(base_block_reward), + }, + )); + } + } + } + } + Ok(traces) } @@ -448,7 +488,7 @@ where #[async_trait] impl TraceApiServer for TraceApi where - Provider: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, Eth: EthTransactions + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -581,3 +621,20 @@ fn tracing_config(trace_types: &HashSet) -> TracingInspectorConfig { .set_state_diffs(trace_types.contains(&TraceType::StateDiff)) .set_steps(trace_types.contains(&TraceType::VmTrace)) } + +/// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block +/// beneficiary. +fn reward_trace(header: &SealedHeader, reward: RewardAction) -> LocalizedTransactionTrace { + LocalizedTransactionTrace { + block_hash: Some(header.hash), + block_number: Some(header.number), + transaction_hash: None, + transaction_position: None, + trace: TransactionTrace { + trace_address: vec![], + subtraces: 0, + action: Action::Reward(reward), + result: None, + }, + } +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 3d00040c0dec..a6a1f6bf2980 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -24,11 +24,12 @@ pub use traits::{ AccountExtReader, AccountReader, BlockExecutionWriter, BlockExecutor, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, BlockWriter, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, EvmEnvProvider, - ExecutorFactory, HashingWriter, HeaderProvider, HistoryWriter, PostStateDataProvider, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StageCheckpointWriter, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageReader, - TransactionsProvider, WithdrawalsProvider, + CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, + ChainSpecProvider, EvmEnvProvider, ExecutorFactory, HashingWriter, HeaderProvider, + HistoryWriter, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StageCheckpointWriter, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, StorageReader, TransactionsProvider, + WithdrawalsProvider, }; /// Provider trait implementations. diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 5170f4b21dc5..e1ec92d5c08e 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -1,8 +1,9 @@ use crate::{ providers::state::{historical::HistoricalStateProvider, latest::LatestStateProvider}, traits::{BlockSource, ReceiptProvider}, - BlockHashReader, BlockNumReader, BlockReader, EvmEnvProvider, HeaderProvider, ProviderError, - StageCheckpointReader, StateProviderBox, TransactionsProvider, WithdrawalsProvider, + BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, EvmEnvProvider, + HeaderProvider, ProviderError, StageCheckpointReader, StateProviderBox, TransactionsProvider, + WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_interfaces::Result; @@ -343,6 +344,15 @@ impl EvmEnvProvider for ProviderFactory { } } +impl ChainSpecProvider for ProviderFactory +where + DB: Send + Sync, +{ + fn chain_spec(&self) -> Arc { + self.chain_spec.clone() + } +} + #[cfg(test)] mod tests { use super::ProviderFactory; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index df1c04b6633e..7dfeae75f21d 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,9 +1,10 @@ use crate::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, EvmEnvProvider, HeaderProvider, PostStateDataProvider, ProviderError, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionsProvider, WithdrawalsProvider, + CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, + PostStateDataProvider, ProviderError, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateProviderBox, StateProviderFactory, TransactionsProvider, + WithdrawalsProvider, }; use reth_db::{database::Database, models::StoredBlockBodyIndices}; use reth_interfaces::{ @@ -14,7 +15,7 @@ use reth_interfaces::{ use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, - BlockNumberOrTag, BlockWithSenders, ChainInfo, Header, Receipt, SealedBlock, + BlockNumberOrTag, BlockWithSenders, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, U256, }; @@ -26,6 +27,7 @@ pub use state::{ use std::{ collections::{BTreeMap, HashSet}, ops::RangeBounds, + sync::Arc, time::Instant, }; use tracing::trace; @@ -431,6 +433,16 @@ where } } +impl ChainSpecProvider for BlockchainProvider +where + DB: Send + Sync, + Tree: Send + Sync, +{ + fn chain_spec(&self) -> Arc { + self.database.chain_spec() + } +} + impl StateProviderFactory for BlockchainProvider where DB: Database, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 31ece7b38b42..b330697b8f84 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,26 +1,32 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - EvmEnvProvider, HeaderProvider, PostState, ReceiptProviderIdExt, StageCheckpointReader, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, - WithdrawalsProvider, + ChainSpecProvider, EvmEnvProvider, HeaderProvider, PostState, ReceiptProviderIdExt, + StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, + StateRootProvider, TransactionsProvider, WithdrawalsProvider, }; use reth_db::models::StoredBlockBodyIndices; use reth_interfaces::Result; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, Bytecode, Bytes, - ChainInfo, Header, Receipt, SealedBlock, SealedHeader, StorageKey, StorageValue, - TransactionMeta, TransactionSigned, TxHash, TxNumber, H256, KECCAK_EMPTY, U256, + ChainInfo, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, StorageKey, StorageValue, + TransactionMeta, TransactionSigned, TxHash, TxNumber, H256, KECCAK_EMPTY, MAINNET, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; -use std::ops::RangeBounds; +use std::{ops::RangeBounds, sync::Arc}; /// Supports various api interfaces for testing purposes. #[derive(Debug, Clone, Default, Copy)] #[non_exhaustive] pub struct NoopProvider; +impl ChainSpecProvider for NoopProvider { + fn chain_spec(&self) -> Arc { + MAINNET.clone() + } +} + /// Noop implementation for testing purposes impl BlockHashReader for NoopProvider { fn block_hash(&self, _number: u64) -> Result> { diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 503162758757..0411c995fcea 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -48,6 +48,9 @@ pub use chain::{ CanonStateSubscriptions, }; +mod spec; +pub use spec::ChainSpecProvider; + mod stage_checkpoint; pub use stage_checkpoint::{StageCheckpointReader, StageCheckpointWriter}; diff --git a/crates/storage/provider/src/traits/receipts.rs b/crates/storage/provider/src/traits/receipts.rs index 3166b620804a..7b3e4640fecb 100644 --- a/crates/storage/provider/src/traits/receipts.rs +++ b/crates/storage/provider/src/traits/receipts.rs @@ -3,7 +3,7 @@ use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt, TxH use crate::BlockIdReader; -/// Client trait for fetching [Receipt] data . +/// Client trait for fetching [Receipt] data . #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { /// Get receipt by transaction number diff --git a/crates/storage/provider/src/traits/spec.rs b/crates/storage/provider/src/traits/spec.rs new file mode 100644 index 000000000000..47d95fbd586b --- /dev/null +++ b/crates/storage/provider/src/traits/spec.rs @@ -0,0 +1,8 @@ +use reth_primitives::ChainSpec; +use std::sync::Arc; + +/// A trait for reading the current chainspec. +pub trait ChainSpecProvider: Send + Sync { + /// Get an [`Arc`] to the chainspec. + fn chain_spec(&self) -> Arc; +} From f2f3425f1c33a260d6e32090d52074c614dd14a0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 3 Jul 2023 14:26:17 +0300 Subject: [PATCH 032/722] chore: fix clippy (#3540) --- .github/workflows/ci.yml | 2 +- Cargo.lock | 2 +- crates/blockchain-tree/src/block_buffer.rs | 4 ++-- crates/blockchain-tree/src/blockchain_tree.rs | 4 ++-- crates/net/downloaders/src/bodies/bodies.rs | 2 +- .../revm/revm-inspectors/src/access_list.rs | 2 +- crates/revm/src/executor.rs | 2 +- crates/rpc/rpc/src/eth/api/fees.rs | 2 +- crates/rpc/rpc/src/eth/pubsub.rs | 2 +- crates/stages/src/stages/hashing_account.rs | 2 +- crates/stages/src/stages/hashing_storage.rs | 2 +- crates/storage/codecs/derive/src/lib.rs | 2 +- crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/storage/libmdbx-rs/src/cursor.rs | 4 ++-- crates/storage/libmdbx-rs/src/transaction.rs | 14 +++++-------- crates/storage/provider/src/chain.rs | 2 +- .../src/providers/database/provider.rs | 21 ++++++++----------- .../provider/src/test_utils/executor.rs | 2 +- crates/trie/src/updates.rs | 2 +- 19 files changed, 34 insertions(+), 41 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2cfb7e690395..9707365066eb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,7 +35,7 @@ jobs: - name: cargo clippy uses: actions-rs/clippy-check@v1 with: - args: --all --all-features + args: --all --all-features -- -A clippy::incorrect_clone_impl_on_copy_type -A clippy::arc_with_non_send_sync token: ${{ secrets.GITHUB_TOKEN }} doc-lint: diff --git a/Cargo.lock b/Cargo.lock index 3e33d1e1b269..014ef5eacb59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5345,7 +5345,7 @@ dependencies = [ name = "reth-libmdbx" version = "0.1.0-alpha.1" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.2", "byteorder", "criterion", "derive_more", diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 9ef9870d960d..8d8bbddeaa10 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -86,7 +86,7 @@ impl BlockBuffer { taken.push(block); } - taken.extend(self.remove_children(vec![parent]).into_iter()); + taken.extend(self.remove_children(vec![parent])); self.metrics.blocks.set(self.len() as f64); taken } @@ -206,7 +206,7 @@ impl BlockBuffer { removed_blocks.push(block); } } - remove_parent_children.extend(parent_childrens.into_iter()); + remove_parent_children.extend(parent_childrens); } } removed_blocks diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 22d3e32e6239..0fad7079917a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -139,7 +139,7 @@ impl BlockchainTree chains: Default::default(), block_indices: BlockIndices::new( last_finalized_block_number, - BTreeMap::from_iter(last_canonical_hashes.into_iter()), + BTreeMap::from_iter(last_canonical_hashes), ), config, canon_state_notification_sender, @@ -274,7 +274,7 @@ impl BlockchainTree .iter() .filter(|&(key, _)| key < first_pending_block_number) .collect::>(); - parent_block_hashed.extend(canonical_chain.into_iter()); + parent_block_hashed.extend(canonical_chain); // get canonical fork. let canonical_fork = self.canonical_fork(chain_id)?; diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index d26523119282..7a0c3aa4b742 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -215,7 +215,7 @@ where /// Queues bodies and sets the latest queued block number fn queue_bodies(&mut self, bodies: Vec) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); - self.queued_bodies.extend(bodies.into_iter()); + self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } diff --git a/crates/revm/revm-inspectors/src/access_list.rs b/crates/revm/revm-inspectors/src/access_list.rs index 873ab3ebc47e..41faf88a2b0f 100644 --- a/crates/revm/revm-inspectors/src/access_list.rs +++ b/crates/revm/revm-inspectors/src/access_list.rs @@ -28,7 +28,7 @@ impl AccessListInspector { precompiles: Vec
, ) -> Self { AccessListInspector { - excluded: vec![from, to].iter().chain(precompiles.iter()).copied().collect(), + excluded: [from, to].iter().chain(precompiles.iter()).copied().collect(), access_list: access_list .0 .iter() diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index dc456d61cfe4..5ce714521fba 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -229,7 +229,7 @@ where let mut cumulative_gas_used = 0; let mut post_state = PostState::with_tx_capacity(block.number, block.body.len()); - for (transaction, sender) in block.body.iter().zip(senders.into_iter()) { + for (transaction, sender) in block.body.iter().zip(senders) { // The sum of the transaction’s gas limit, Tg, and the gas utilised in this block prior, // must be no greater than the block’s gasLimit. let block_available_gas = block.header.gas_limit - cumulative_gas_used; diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index bdd30a0ac3fb..c9a1513535ac 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -140,7 +140,7 @@ where let mut transactions = transactions .into_iter() - .zip(receipts.into_iter()) + .zip(receipts) .scan(0, |previous_gas, (tx, receipt)| { // Convert the cumulative gas used in the receipts // to the gas usage by the transaction diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index a3b5e0a03d03..f474426037e9 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -284,7 +284,7 @@ where let all_logs = logs_utils::matching_block_logs( &filter, block_receipts.block, - block_receipts.tx_receipts.into_iter(), + block_receipts.tx_receipts, removed, ); futures::stream::iter(all_logs) diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 8ef401571946..ccb6fb960fd0 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -250,7 +250,7 @@ impl Stage for AccountHashingStage { // `previous_stage_progress` state. let accounts = provider.basic_accounts(lists)?; // Insert and hash accounts to hashing table - provider.insert_account_for_hashing(accounts.into_iter())?; + provider.insert_account_for_hashing(accounts)?; } // We finished the hashing stage, no future iterations is expected for the same block range, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 88a0043dccd2..040b6375bd02 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -176,7 +176,7 @@ impl Stage for StorageHashingStage { // Assumption we are okay with is that plain state represent // `previous_stage_progress` state. let storages = provider.plainstate_storages(lists)?; - provider.insert_storage_for_hashing(storages.into_iter())?; + provider.insert_storage_for_hashing(storages)?; } // We finished the hashing stage, no future iterations is expected for the same block range, diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index a46081c5748e..6d2c88c90734 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -117,7 +117,7 @@ pub fn use_compact(args: TokenStream, input: TokenStream) -> TokenStream { let mut args = args.into_iter().collect::>(); args.push(TokenTree::Ident(proc_macro::Ident::new("compact", proc_macro::Span::call_site()))); - derive_arbitrary(TokenStream::from_iter(args.into_iter()), compact) + derive_arbitrary(TokenStream::from_iter(args), compact) } /// Adds `Arbitrary` and `proptest::Arbitrary` imports into scope and derives the struct/enum. diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index bac79ec882fb..bf67c037f968 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true name = "reth_libmdbx" [dependencies] -bitflags = "1" +bitflags = "2" byteorder = "1" derive_more = "0.99" indexmap = "1" diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 1e68bf648535..5aa4946d5681 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -13,14 +13,14 @@ use ffi::{ }; use libc::{c_uint, c_void}; use parking_lot::Mutex; -use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr, result, sync::Arc}; +use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr, rc::Rc, result}; /// A cursor for navigating the items within a database. pub struct Cursor<'txn, K> where K: TransactionKind, { - txn: Arc>, + txn: Rc>, cursor: *mut ffi::MDBX_cursor, _marker: PhantomData, } diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 7059a2beb9cd..210f8f15fcfa 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -10,12 +10,8 @@ use indexmap::IndexSet; use libc::{c_uint, c_void}; use parking_lot::Mutex; use std::{ - fmt, - fmt::Debug, - marker::PhantomData, - mem::size_of, - ptr, result, slice, - sync::{mpsc::sync_channel, Arc}, + fmt, fmt::Debug, marker::PhantomData, mem::size_of, ptr, rc::Rc, result, slice, + sync::mpsc::sync_channel, }; mod private { @@ -57,7 +53,7 @@ where K: TransactionKind, E: EnvironmentKind, { - txn: Arc>, + txn: Rc>, primed_dbis: Mutex>, committed: bool, env: &'env Environment, @@ -85,7 +81,7 @@ where pub(crate) fn new_from_ptr(env: &'env Environment, txn: *mut ffi::MDBX_txn) -> Self { Self { - txn: Arc::new(Mutex::new(txn)), + txn: Rc::new(Mutex::new(txn)), primed_dbis: Mutex::new(IndexSet::new()), committed: false, env, @@ -97,7 +93,7 @@ where /// /// The caller **must** ensure that the pointer is not used after the /// lifetime of the transaction. - pub(crate) fn txn_mutex(&self) -> Arc> { + pub(crate) fn txn_mutex(&self) -> Rc> { self.txn.clone() } diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 02ca47be6809..13ba859d05da 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -165,7 +165,7 @@ impl Chain { } // Insert blocks from other chain - self.blocks.extend(chain.blocks.into_iter()); + self.blocks.extend(chain.blocks); self.state.extend(chain.state); Ok(()) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8ce2855e965a..5650b85e3dde 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -518,11 +518,9 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { let mut block_withdrawals = block_withdrawals_iter.next(); let mut blocks = Vec::new(); - for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!( - block_header_iter.into_iter(), - block_header_hashes_iter.into_iter(), - block_tx_iter.into_iter() - ) { + for ((main_block_number, header), (_, header_hash), (_, tx)) in + izip!(block_header_iter.into_iter(), block_header_hashes_iter, block_tx_iter) + { let header = header.seal(header_hash); let (body, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); @@ -1303,15 +1301,15 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider // storage hashing stage { let lists = self.changed_storages_with_range(range.clone())?; - let storages = self.plainstate_storages(lists.into_iter())?; - self.insert_storage_for_hashing(storages.into_iter())?; + let storages = self.plainstate_storages(lists)?; + self.insert_storage_for_hashing(storages)?; } // account hashing stage { let lists = self.changed_accounts_with_range(range.clone())?; - let accounts = self.basic_accounts(lists.into_iter())?; - self.insert_account_for_hashing(accounts.into_iter())?; + let accounts = self.basic_accounts(lists)?; + self.insert_account_for_hashing(accounts)?; } // merkle tree @@ -1649,8 +1647,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP // get execution res let execution_res = self.get_take_block_execution_result_range::(range.clone())?; // combine them - let blocks_with_exec_result: Vec<_> = - blocks.into_iter().zip(execution_res.into_iter()).collect(); + let blocks_with_exec_result: Vec<_> = blocks.into_iter().zip(execution_res).collect(); // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. @@ -1712,7 +1709,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' let senders_len = senders.as_ref().map(|s| s.len()); let tx_iter = if Some(block.body.len()) == senders_len { - block.body.into_iter().zip(senders.unwrap().into_iter()).collect::>() + block.body.into_iter().zip(senders.unwrap()).collect::>() } else { block .body diff --git a/crates/storage/provider/src/test_utils/executor.rs b/crates/storage/provider/src/test_utils/executor.rs index 50d732e06ed2..58bca889e5a7 100644 --- a/crates/storage/provider/src/test_utils/executor.rs +++ b/crates/storage/provider/src/test_utils/executor.rs @@ -41,7 +41,7 @@ impl TestExecutorFactory { /// Extend the mocked execution results pub fn extend(&self, results: Vec) { - self.exec_results.lock().extend(results.into_iter()); + self.exec_results.lock().extend(results); } } diff --git a/crates/trie/src/updates.rs b/crates/trie/src/updates.rs index 49ab105f007f..b60df4aa78a7 100644 --- a/crates/trie/src/updates.rs +++ b/crates/trie/src/updates.rs @@ -111,7 +111,7 @@ impl TrieUpdates { let mut account_trie_cursor = tx.cursor_write::()?; let mut storage_trie_cursor = tx.cursor_dup_write::()?; - let mut trie_operations = Vec::from_iter(self.trie_operations.into_iter()); + let mut trie_operations = Vec::from_iter(self.trie_operations); trie_operations.sort_unstable_by(|a, b| a.0.cmp(&b.0)); for (key, operation) in trie_operations { match key { From 73eeca0e29dda19cc9b481020c5b669fdaaff5e1 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:29:17 +0100 Subject: [PATCH 033/722] fix(codec): fix last field compilation check (#3543) --- .../codecs/derive/src/compact/generator.rs | 7 +++++- .../codecs/derive/src/compact/structs.rs | 25 +++++++++++++------ crates/storage/codecs/derive/src/lib.rs | 6 +++-- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 2b408ec03bf3..bdb81893b47c 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -58,7 +58,12 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To // it's hard to figure out with derive_macro which types have bytes::Bytes fields. // // This removes the requirement of the field to be placed last in the struct. - known_types.append(&mut vec!["TransactionKind", "AccessList", "Signature"]); + known_types.append(&mut vec![ + "TransactionKind", + "AccessList", + "Signature", + "CheckpointBlockRange", + ]); // let mut handle = FieldListHandler::new(fields); let is_enum = fields.iter().any(|field| matches!(field, FieldTypes::EnumVariant(_))); diff --git a/crates/storage/codecs/derive/src/compact/structs.rs b/crates/storage/codecs/derive/src/compact/structs.rs index aec1d7c66e44..1cbdd9045748 100644 --- a/crates/storage/codecs/derive/src/compact/structs.rs +++ b/crates/storage/codecs/derive/src/compact/structs.rs @@ -116,17 +116,26 @@ impl<'a> StructHandler<'a> { format_ident!("specialized_from_compact") }; + // ! Be careful before changing the following assert ! Especially if the type does not + // implement proptest tests. + // + // The limitation of the last placed field applies to fields with potentially large sizes, + // like the `Transaction` field. These fields may have inner "Bytes" fields, sometimes even + // nested further, making it impossible to check with `proc_macro`. The goal is to place + // such fields as the last ones, so we don't need to store their length separately. Instead, + // we can simply read them until the end of the buffer. + // + // However, certain types don't require this approach because they don't contain inner + // "Bytes" fields. For these types, we can add them to a "known_types" list so it doesn't + // trigger this error. These types can handle their own deserialization without + // relying on the length provided by the higher-level deserializer. For example, a + // type "T" with two "u64" fields doesn't need the length parameter from + // "T::from_compact(buf, len)" since the length of "u64" is known internally (bitpacked). assert!( known_types.contains(&ftype.as_str()) || is_flag_type(ftype) || - self.fields_iterator.peek().map_or(true, |ftypes| { - if let FieldTypes::StructField((_, ftype, _, _)) = ftypes { - !known_types.contains(&ftype.as_str()) - } else { - false - } - }), - "`{ftype}` field should be placed as the last one since it's not known. + self.fields_iterator.peek().is_none(), + "`{ftype}` field should be placed as the last one since it's not known. If it's an alias type (which are not supported by proc_macro), be sure to add it to either `known_types` or `get_bit_size` lists in the derive crate." ); diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 6d2c88c90734..071f7499aebc 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -23,11 +23,13 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream { compact::derive(input, is_zstd) } -/// Implements the main codec. If the codec supports it, it will call `derive_arbitrary(..)`. +/// This code implements the main codec. If the codec supports it, it will also provide the [derive_arbitrary()] function, which automatically implements arbitrary traits and roundtrip fuzz tests. +/// +/// If you prefer to manually implement the arbitrary traits, you can still use the [add_arbitrary_tests()] function to add arbitrary fuzz tests. /// /// Example usage: /// * `#[main_codec(rlp)]`: will implement `derive_arbitrary(rlp)` or `derive_arbitrary(compact, rlp)`, if `compact` is the `main_codec`. -/// * `#[main_codec(no_arbitrary)]`: will skip `derive_arbitrary` +/// * `#[main_codec(no_arbitrary)]`: will skip `derive_arbitrary` (both trait implementations and tests) #[proc_macro_attribute] #[rustfmt::skip] #[allow(unreachable_code)] From e0748f7415721729b6fbc4d1cc8383fdb3700b21 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Jul 2023 13:29:35 +0200 Subject: [PATCH 034/722] perf: join futures in tracing (#3541) --- crates/rpc/rpc/src/trace.rs | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index fa1372a505ed..37047e21f569 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -399,24 +399,23 @@ where &self, block_id: BlockId, ) -> EthResult>> { - let mut traces: Option> = self - .trace_block_with( - block_id, - TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _, _| { - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); - Ok(traces) - }, - ) - .await? - .map(|traces| traces.into_iter().flatten().collect()); - - // Add block reward traces - // TODO: We only really need the header and ommers here to determine the reward - if let (Some(block), Some(traces)) = - (self.inner.eth_api.block_by_id(block_id).await?, traces.as_mut()) - { + let traces = self.trace_block_with( + block_id, + TracingInspectorConfig::default_parity(), + |tx_info, inspector, _, _, _| { + let traces = + inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + Ok(traces) + }, + ); + + let block = self.inner.eth_api.block_by_id(block_id); + let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?; + + let mut maybe_traces = + maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); + + if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { if let Some(header_td) = self.provider().header_td(&block.header.hash)? { if let Some(base_block_reward) = base_block_reward( self.provider().chain_spec().as_ref(), @@ -448,7 +447,7 @@ where } } - Ok(traces) + Ok(maybe_traces) } /// Replays all transactions in a block From d80c8a7b5be8d05d0004963000882b0f573f15d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Jul 2023 13:31:47 +0200 Subject: [PATCH 035/722] feat: add NoopTransactionPool impl (#3536) --- crates/transaction-pool/src/lib.rs | 12 +- crates/transaction-pool/src/noop.rs | 195 ++++++++++++++++++ crates/transaction-pool/src/test_utils/mod.rs | 31 +-- crates/transaction-pool/src/traits.rs | 10 +- 4 files changed, 212 insertions(+), 36 deletions(-) create mode 100644 crates/transaction-pool/src/noop.rs diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 65e2e0e958a1..b56bb2042d04 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -115,18 +115,20 @@ pub use crate::{ }, }; -mod config; pub mod error; -mod identifier; pub mod maintain; pub mod metrics; -mod ordering; +pub mod noop; pub mod pool; -mod traits; pub mod validate; +mod config; +mod identifier; +mod ordering; +mod traits; + #[cfg(any(test, feature = "test-utils"))] -/// Common test helpers for mocking A pool +/// Common test helpers for mocking a pool pub mod test_utils; // TX_SLOT_SIZE is used to calculate how many data slots a single transaction diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs new file mode 100644 index 000000000000..e3b7a3e81a9c --- /dev/null +++ b/crates/transaction-pool/src/noop.rs @@ -0,0 +1,195 @@ +//! A transaction pool implementation that does nothing. +//! +//! This is useful for wiring components together that don't require an actual pool but still need +//! to be generic over it. + +use crate::{ + error::PoolError, AllPoolTransactions, BestTransactions, BlockInfo, NewTransactionEvent, + PoolResult, PoolSize, PoolTransaction, PooledTransaction, PropagatedTransactions, + TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, + TransactionValidator, ValidPoolTransaction, +}; +use reth_primitives::{Address, TxHash}; +use std::{marker::PhantomData, sync::Arc}; +use tokio::sync::{mpsc, mpsc::Receiver}; + +/// A [`TransactionPool`] implementation that does nothing. +/// +/// All transactions are rejected and no events are emitted. +/// This type will never hold any transactions and is only useful for wiring components together. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct NoopTransactionPool; + +#[async_trait::async_trait] +impl TransactionPool for NoopTransactionPool { + type Transaction = PooledTransaction; + + fn pool_size(&self) -> PoolSize { + Default::default() + } + + fn block_info(&self) -> BlockInfo { + BlockInfo { + last_seen_block_hash: Default::default(), + last_seen_block_number: 0, + pending_basefee: 0, + } + } + + async fn add_transaction_and_subscribe( + &self, + _origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> PoolResult { + let hash = *transaction.hash(); + Err(PoolError::Other(hash, Box::new(NoopInsertError::new(transaction)))) + } + + async fn add_transaction( + &self, + _origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> PoolResult { + let hash = *transaction.hash(); + Err(PoolError::Other(hash, Box::new(NoopInsertError::new(transaction)))) + } + + async fn add_transactions( + &self, + _origin: TransactionOrigin, + transactions: Vec, + ) -> PoolResult>> { + Ok(transactions + .into_iter() + .map(|transaction| { + let hash = *transaction.hash(); + Err(PoolError::Other(hash, Box::new(NoopInsertError::new(transaction)))) + }) + .collect()) + } + + fn transaction_event_listener(&self, _tx_hash: TxHash) -> Option { + None + } + + fn pending_transactions_listener(&self) -> Receiver { + mpsc::channel(1).1 + } + + fn transactions_listener(&self) -> Receiver> { + mpsc::channel(1).1 + } + + fn pooled_transaction_hashes(&self) -> Vec { + vec![] + } + + fn pooled_transaction_hashes_max(&self, _max: usize) -> Vec { + vec![] + } + + fn pooled_transactions(&self) -> Vec>> { + vec![] + } + + fn pooled_transactions_max( + &self, + _max: usize, + ) -> Vec>> { + vec![] + } + + fn best_transactions( + &self, + ) -> Box>>> { + Box::new(std::iter::empty()) + } + + fn pending_transactions(&self) -> Vec>> { + vec![] + } + + fn queued_transactions(&self) -> Vec>> { + vec![] + } + + fn all_transactions(&self) -> AllPoolTransactions { + AllPoolTransactions::default() + } + + fn remove_transactions( + &self, + _hashes: impl IntoIterator, + ) -> Vec>> { + vec![] + } + + fn retain_unknown(&self, _hashes: &mut Vec) {} + + fn get(&self, _tx_hash: &TxHash) -> Option>> { + None + } + + fn get_all( + &self, + _txs: impl IntoIterator, + ) -> Vec>> { + vec![] + } + + fn on_propagated(&self, _txs: PropagatedTransactions) {} + + fn get_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } +} + +/// A [`TransactionValidator`] that does nothing. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct NoopTransactionValidator(PhantomData); + +#[async_trait::async_trait] +impl TransactionValidator for NoopTransactionValidator { + type Transaction = T; + + async fn validate_transaction( + &self, + _origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + TransactionValidationOutcome::Valid { + balance: Default::default(), + state_nonce: 0, + transaction, + } + } +} + +impl Default for NoopTransactionValidator { + fn default() -> Self { + NoopTransactionValidator(PhantomData) + } +} + +/// An error that contains the transaction that failed to be inserted into the noop pool. +#[derive(Debug, Clone, thiserror::Error)] +#[error("Can't insert transaction into the noop pool that does nothing.")] +pub struct NoopInsertError { + tx: PooledTransaction, +} + +impl NoopInsertError { + fn new(tx: PooledTransaction) -> Self { + Self { tx } + } + + /// Returns the transaction that failed to be inserted. + pub fn into_inner(self) -> PooledTransaction { + self.tx + } +} diff --git a/crates/transaction-pool/src/test_utils/mod.rs b/crates/transaction-pool/src/test_utils/mod.rs index a68b9feb25a6..1a77eb763c4a 100644 --- a/crates/transaction-pool/src/test_utils/mod.rs +++ b/crates/transaction-pool/src/test_utils/mod.rs @@ -5,7 +5,8 @@ mod mock; mod pool; use crate::{ - Pool, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, + noop::NoopTransactionValidator, Pool, PoolTransaction, TransactionOrigin, + TransactionValidationOutcome, TransactionValidator, }; use async_trait::async_trait; pub use mock::*; @@ -18,31 +19,3 @@ pub type TestPool = Pool, MockOrdering pub fn testing_pool() -> TestPool { Pool::new(NoopTransactionValidator::default(), MockOrdering::default(), Default::default()) } - -// A [`TransactionValidator`] that does nothing. -#[derive(Debug, Clone)] -#[non_exhaustive] -pub struct NoopTransactionValidator(PhantomData); - -#[async_trait::async_trait] -impl TransactionValidator for NoopTransactionValidator { - type Transaction = T; - - async fn validate_transaction( - &self, - origin: TransactionOrigin, - transaction: Self::Transaction, - ) -> TransactionValidationOutcome { - TransactionValidationOutcome::Valid { - balance: Default::default(), - state_nonce: 0, - transaction, - } - } -} - -impl Default for NoopTransactionValidator { - fn default() -> Self { - NoopTransactionValidator(PhantomData) - } -} diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 1781e8f5e98f..256afe996410 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -220,7 +220,7 @@ pub trait TransactionPoolExt: TransactionPool { } /// A Helper type that bundles all transactions in the pool. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct AllPoolTransactions { /// Transactions that are ready for inclusion in the next block. pub pending: Vec>>, @@ -244,6 +244,12 @@ impl AllPoolTransactions { } } +impl Default for AllPoolTransactions { + fn default() -> Self { + Self { pending: Default::default(), queued: Default::default() } + } +} + /// Represents a transaction that was propagated over the network. #[derive(Debug, Clone, Eq, PartialEq, Default)] pub struct PropagatedTransactions(pub HashMap>); @@ -577,7 +583,7 @@ impl IntoRecoveredTransaction for PooledTransaction { } /// Represents the current status of the pool. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct PoolSize { /// Number of transactions in the _pending_ sub-pool. pub pending: usize, From 0aeffe96b058ec94c13dfad725d1d0051cd119bf Mon Sep 17 00:00:00 2001 From: ts0yu <120932697+ts0yu@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:32:09 +0100 Subject: [PATCH 036/722] nit: spelling (#3539) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index 3a2953e85f99..b6878c84954c 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -8,7 +8,7 @@ ## Codecs * We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. -* To achieve that, we created the [Encode/Decode/Compress/Decompress trais](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. +* To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. * This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs) and [Iai](https://github.com/bheisler/iai)) * It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). * We implemented that trait for the following encoding formats: From 766f520c1739242d834fdb0c1202ad511967a8b9 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 3 Jul 2023 13:47:43 +0200 Subject: [PATCH 037/722] examples: add examples of using reth-provider and instantiating an RPC on top of the DB (#3533) --- Cargo.lock | 20 ++++ Cargo.toml | 10 ++ examples/Cargo.toml | 35 +++++++ examples/README.md | 17 ++++ examples/db-access.rs | 222 ++++++++++++++++++++++++++++++++++++++++++ examples/rpc-db.rs | 78 +++++++++++++++ 6 files changed, 382 insertions(+) create mode 100644 examples/Cargo.toml create mode 100644 examples/README.md create mode 100644 examples/db-access.rs create mode 100644 examples/rpc-db.rs diff --git a/Cargo.lock b/Cargo.lock index 014ef5eacb59..4383cd628021 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2179,6 +2179,26 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "examples" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth-beacon-consensus", + "reth-blockchain-tree", + "reth-db", + "reth-network-api", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc-builder", + "reth-rpc-types", + "reth-tasks", + "reth-transaction-pool", + "tokio", +] + [[package]] name = "eyre" version = "0.6.8" diff --git a/Cargo.toml b/Cargo.toml index 8bfa8df771fd..52268e8ec0f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,8 @@ members = [ "crates/transaction-pool", "crates/trie", "testing/ef-tests", + + "examples", ] default-members = ["bin/reth"] @@ -59,6 +61,9 @@ rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" +exclude = [ + ".github/", +] # Like release, but with full debug symbols. Useful for e.g. `perf`. [profile.debug-fast] @@ -85,9 +90,14 @@ revm-primitives = "1.1" reth-primitives = { path = "./crates/primitives" } reth-interfaces = { path = "./crates/interfaces" } reth-provider = { path = "./crates/storage/provider" } +reth-db = { path = "./crates/storage/db" } reth-rlp = { path = "./crates/rlp" } reth-rpc-types = { path = "./crates/rpc/rpc-types" } +reth-rpc-builder = { path = "./crates/rpc/rpc-builder" } +reth-blockchain-tree = { path = "./crates/blockchain-tree" } +reth-beacon-consensus = { path = "./crates/consensus/beacon" } reth-metrics = { path = "./crates/metrics" } +reth-revm = { path = "./crates/revm" } reth-payload-builder = { path = "./crates/payload/builder" } reth-transaction-pool = { path = "./crates/transaction-pool" } reth-tasks = { path = "./crates/tasks" } diff --git a/examples/Cargo.toml b/examples/Cargo.toml new file mode 100644 index 000000000000..ed1a18eb6351 --- /dev/null +++ b/examples/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "examples" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dev-dependencies] +reth-primitives = { workspace = true } + +reth-db = { workspace = true } +reth-provider = { workspace = true } + +reth-rpc-builder = { workspace = true } +reth-rpc-types = { workspace = true } + +reth-revm = { workspace = true } +reth-blockchain-tree = { workspace = true } +reth-beacon-consensus = { workspace = true } +reth-network-api = { workspace = true, features = ["test-utils"] } +reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-tasks = { workspace = true } + + +eyre = "0.6.8" +futures = "0.3.0" +tokio = { workspace = true } + +[[example]] +name = "rpc-db" +path = "rpc-db.rs" + +[[example]] +name = "db-access" +path = "db-access.rs" diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000000..270c15fdb09a --- /dev/null +++ b/examples/README.md @@ -0,0 +1,17 @@ +## Examples of how to use the Reth SDK + +This directory contains a number of examples showcasing various capabilities of +the `reth-*` crates. + +All examples can be executed with: + +``` +cargo run --example $name +``` + +A good starting point for the examples would be [`db-access`](db-access.rs) +and [`rpc-db`](rpc-db.rs). + +If you've got an example you'd like to see here, please feel free to open an +issue. Otherwise if you've got an example you'd like to add, please feel free +to make a PR! diff --git a/examples/db-access.rs b/examples/db-access.rs new file mode 100644 index 000000000000..23913acde96b --- /dev/null +++ b/examples/db-access.rs @@ -0,0 +1,222 @@ +use reth_db::open_db_read_only; +use reth_primitives::{Address, ChainSpecBuilder, H256, U256}; +use reth_provider::{ + AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, + StateProvider, TransactionsProvider, +}; +use reth_rpc_types::{Filter, FilteredParams}; + +use std::path::Path; + +// Providers are zero cost abstractions on top of an opened MDBX Transaction +// exposing a familiar API to query the chain's information without requiring knowledge +// of the inner tables. +// +// These abstractions do not include any caching and the user is responsible for doing that. +// Other parts of the code which include caching are parts of the `EthApi` abstraction. +fn main() -> eyre::Result<()> { + // Opens a RO handle to the database file. + // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of + // doing in 2 steps. + let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?; + + // Instantiate a provider factory for Ethereum mainnet using the provided DB. + // TODO: Should the DB version include the spec so that you do not need to specify it here? + let spec = ChainSpecBuilder::mainnet().build(); + let factory = ProviderFactory::new(db, spec.into()); + + // This call opens a RO transaction on the database. To write to the DB you'd need to call + // the `provider_rw` function and look for the `Writer` variants of the traits. + let provider = factory.provider()?; + + // Run basic queryies against the DB + let block_num = 100; + header_provider_example(&provider, block_num)?; + block_provider_example(&provider, block_num)?; + txs_provider_example(&provider)?; + receipts_provider_example(&provider)?; + + // Closes the RO transaction opened in the `factory.provider()` call. This is optional and + // would happen anyway at the end of the function scope. + drop(provider); + + // Run the example against latest state + state_provider_example(factory.latest()?)?; + + // Run it with historical state + state_provider_example(factory.history_by_block_number(block_num)?)?; + + Ok(()) +} + +/// The `HeaderProvider` allows querying the headers-related tables. +fn header_provider_example(provider: T, number: u64) -> eyre::Result<()> { + // Can query the header by number + let header = provider.header_by_number(number)?.ok_or(eyre::eyre!("header not found"))?; + + // We can convert a header to a sealed header which contains the hash w/o needing to re-compute + // it every time. + let sealed_header = header.seal_slow(); + + // Can also query the header by hash! + let header_by_hash = + provider.header(&sealed_header.hash)?.ok_or(eyre::eyre!("header by hash not found"))?; + assert_eq!(sealed_header.header, header_by_hash); + + // The header's total difficulty is stored in a separate table, so we have a separate call for + // it. This is not needed for post PoS transition chains. + let td = provider.header_td_by_number(number)?.ok_or(eyre::eyre!("header td not found"))?; + assert_ne!(td, U256::ZERO); + + // Can query headers by range as well, already sealed! + let headers = provider.sealed_headers_range(100..200)?; + assert_eq!(headers.len(), 100); + + Ok(()) +} + +/// The `TransactionsProvider` allows querying transaction-related information +fn txs_provider_example(provider: T) -> eyre::Result<()> { + // Try the 5th tx + let txid = 5; + + // Query a transaction by its primary ordered key in the db + let tx = provider.transaction_by_id(txid)?.ok_or(eyre::eyre!("transaction not found"))?; + + // Can query the tx by hash + let tx_by_hash = + provider.transaction_by_hash(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx, tx_by_hash); + + // Can query the tx by hash with info about the block it was included in + let (tx, meta) = + provider.transaction_by_hash_with_meta(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx.hash, meta.tx_hash); + + // Can reverse lookup the key too + let id = provider.transaction_id(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(id, txid); + + // Can find the block of a transaction given its key + let _block = provider.transaction_block(txid)?; + + // Can query the txs in the range [100, 200) + let _txs_by_tx_range = provider.transactions_by_tx_range(100..200)?; + // Can query the txs in the _block_ range [100, 200)] + let _txs_by_block_range = provider.transactions_by_block_range(100..200)?; + + Ok(()) +} + +/// The `BlockReader` allows querying the headers-related tables. +fn block_provider_example(provider: T, number: u64) -> eyre::Result<()> { + // Can query a block by number + let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; + assert_eq!(block.number, number); + + // Can query a block with its senders, this is useful when you'd want to execute a block and do + // not want to manually recover the senders for each transaction (as each transaction is + // stored on disk with its v,r,s but not its `from` field.). + let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; + + // Can seal the block to cache the hash, like the Header above. + let sealed_block = block.clone().seal_slow(); + + // Can also query the block by hash directly + let block_by_hash = + provider.block_by_hash(sealed_block.hash)?.ok_or(eyre::eyre!("block by hash not found"))?; + assert_eq!(block, block_by_hash); + + // Or by relying in the internal conversion + let block_by_hash2 = + provider.block(sealed_block.hash.into())?.ok_or(eyre::eyre!("block by hash not found"))?; + assert_eq!(block, block_by_hash2); + + // Or you can also specify the datasource. For this provider this always return `None`, but + // the blockchain tree is also able to access pending state not available in the db yet. + let block_by_hash3 = provider + .find_block_by_hash(sealed_block.hash, BlockSource::Any)? + .ok_or(eyre::eyre!("block hash not found"))?; + assert_eq!(block, block_by_hash3); + + // Can query the block's ommers/uncles + let _ommers = provider.ommers(number.into())?; + + // Can query the block's withdrawals (via the `WithdrawalsProvider`) + let _withdrawals = + provider.withdrawals_by_block(sealed_block.hash.into(), sealed_block.timestamp)?; + + Ok(()) +} + +/// The `ReceiptProvider` allows querying the receipts tables. +fn receipts_provider_example( + provider: T, +) -> eyre::Result<()> { + let txid = 5; + let header_num = 100; + + // Query a receipt by txid + let receipt = provider.receipt(txid)?.ok_or(eyre::eyre!("tx receipt not found"))?; + + // Can query receipt by txhash too + let tx = provider.transaction_by_id(txid)?.unwrap(); + let receipt_by_hash = + provider.receipt_by_hash(tx.hash)?.ok_or(eyre::eyre!("tx receipt by hash not found"))?; + assert_eq!(receipt, receipt_by_hash); + + // Can query all the receipts in a block + let _receipts = provider + .receipts_by_block(100.into())? + .ok_or(eyre::eyre!("no receipts found for block"))?; + + // Can check if a address/topic filter is present in a header, if it is we query the block and + // receipts and do something with the data + // 1. get the bloom from the header + let header = provider.header_by_number(header_num)?.unwrap(); + let bloom = header.logs_bloom; + + // 2. Construct the address/topics filters + // For a hypothetical address, we'll want to filter down for a specific indexed topic (e.g. + // `from`). + let addr = Address::random(); + let topic = H256::random(); + + // TODO: Make it clearer how to choose between topic0 (event name) and the other 3 indexed + // topics. This API is a bit clunky and not obvious to use at the moemnt. + let filter = Filter::new().address(addr).topic0(topic); + let filter_params = FilteredParams::new(Some(filter)); + let address_filter = FilteredParams::address_filter(&Some(addr.into())); + let topics_filter = FilteredParams::topics_filter(&Some(vec![topic.into()])); + + // 3. If the address & topics filters match do something. We use the outer check against the + // bloom filter stored in the header to avoid having to query the receipts table when there + // is no instance of any event that matches the filter in the header. + if FilteredParams::matches_address(bloom, &address_filter) && + FilteredParams::matches_topics(bloom, &topics_filter) + { + let receipts = provider.receipt(header_num)?.ok_or(eyre::eyre!("receipt not found"))?; + for log in &receipts.logs { + if filter_params.filter_address(log) && filter_params.filter_topics(&log) { + // Do something with the log e.g. decode it. + println!("Matching log found! {log:?}") + } + } + } + + Ok(()) +} + +fn state_provider_example(provider: T) -> eyre::Result<()> { + let address = Address::random(); + let storage_key = H256::random(); + + // Can get account / storage state with simple point queries + let _account = provider.basic_account(address)?; + let _code = provider.account_code(address)?; + let _storage = provider.storage(address, storage_key)?; + // TODO: unimplemented. + // let _proof = provider.proof(address, &[])?; + + Ok(()) +} diff --git a/examples/rpc-db.rs b/examples/rpc-db.rs new file mode 100644 index 000000000000..d705fa6c4fcf --- /dev/null +++ b/examples/rpc-db.rs @@ -0,0 +1,78 @@ +// Talking to the DB +use reth_db::open_db_read_only; +use reth_primitives::ChainSpecBuilder; +use reth_provider::{providers::BlockchainProvider, ProviderFactory}; + +// Bringing up the RPC +use reth_rpc_builder::{ + RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, +}; + +// Code which we'd ideally like to not need to import if you're only spinning up +// read-only parts of the API and do not require access to pending state or to +// EVM sims +use reth_beacon_consensus::BeaconConsensus; +use reth_blockchain_tree::{ + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, +}; +use reth_revm::Factory as ExecutionFactory; +// Configuring the network parts, ideally also wouldn't ned to think about this. +use reth_network_api::test_utils::NoopNetwork; +use reth_provider::test_utils::TestCanonStateSubscriptions; +use reth_tasks::TokioTaskExecutor; +use reth_transaction_pool::test_utils::testing_pool; + +use std::{path::Path, sync::Arc}; + +// Example illustrating how to run the ETH JSON RPC API as standalone over a DB file. +// TODO: Add example showing how to spin up your own custom RPC namespace alongside +// the other default name spaces. +#[tokio::main] +async fn main() -> eyre::Result<()> { + // 1. Setup the DB + let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?); + let spec = Arc::new(ChainSpecBuilder::mainnet().build()); + let factory = ProviderFactory::new(db.clone(), spec.clone()); + + // 2. Setup blcokchain tree to be able to receive live notifs + // TODO: Make this easier to configure + let provider = { + let consensus = Arc::new(BeaconConsensus::new(spec.clone())); + let exec_factory = ExecutionFactory::new(spec.clone()); + + let externals = TreeExternals::new(db.clone(), consensus, exec_factory, spec.clone()); + let tree_config = BlockchainTreeConfig::default(); + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + + let tree = ShareableBlockchainTree::new(BlockchainTree::new( + externals, + canon_state_notification_sender.clone(), + tree_config, + )?); + + BlockchainProvider::new(factory, tree)? + }; + + let noop_pool = testing_pool(); + let rpc_builder = RpcModuleBuilder::default() + .with_provider(provider) + // Rest is just defaults + // TODO: How do we make this easier to configure? + .with_pool(noop_pool) + .with_network(NoopNetwork) + .with_executor(TokioTaskExecutor::default()) + .with_events(TestCanonStateSubscriptions::default()); + + // Pick which namespaces to expose. + let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); + let server = rpc_builder.build(config); + + // Start the server & keep it alive + let server_args = + RpcServerConfig::http(Default::default()).with_http_address("0.0.0.0:8545".parse()?); + let _handle = server_args.start(server).await?; + futures::future::pending::<()>().await; + + Ok(()) +} From 6919b3dea647c67ee3372bbafdb0bec83be3a60b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Vincent?= <28714795+leovct@users.noreply.github.com> Date: Mon, 3 Jul 2023 14:11:23 +0200 Subject: [PATCH 038/722] chore: run tests with coverage in `Makefile` (#3418) --- .gitignore | 5 ++++- Makefile | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index dd772483c51c..c2c144f4a1c6 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,7 @@ dist/ db-tools/ # VSCode -.vscode \ No newline at end of file +.vscode + +# Coverage report +lcov.info diff --git a/Makefile b/Makefile index f024ccf556f0..d49d427b9e87 100644 --- a/Makefile +++ b/Makefile @@ -105,6 +105,23 @@ build-release-tarballs: ## Create a series of `.tar.gz` files in the BIN_DIR dir ##@ Test +UNIT_TEST_ARGS := --locked --workspace --all-features -E 'kind(lib)' -E 'kind(bin)' -E 'kind(proc-macro)' +COV_FILE := lcov.info + +.PHONY: test-unit +test-unit: ## Run unit tests. + cargo nextest run $(UNIT_TEST_ARGS) + +.PHONY: cov-unit +cov-unit: ## Run unit tests with coverage. + rm -f $(COV_FILE) + cargo llvm-cov nextest --lcov --output-path $(COV_FILE) $(UNIT_TEST_ARGS) + +.PHONY: cov-report-html +cov-report-html: cov-unit ## Generate a HTML coverage report and open it in the browser. + cargo llvm-cov report --html + open target/llvm-cov/html/index.html + # Downloads and unpacks Ethereum Foundation tests in the `$(EF_TESTS_DIR)` directory. # # Requires `wget` and `tar` From 764e58d65d12c4f982529921d007b4beb13ffb5e Mon Sep 17 00:00:00 2001 From: pistomat Date: Mon, 3 Jul 2023 14:21:36 +0200 Subject: [PATCH 039/722] fix: create a test db with path (#3482) --- crates/stages/src/test_utils/test_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index d5358c302713..e3ad10d15c30 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -5,7 +5,7 @@ use reth_db::{ models::{AccountBeforeTx, StoredBlockBodyIndices}, table::Table, tables, - test_utils::create_test_rw_db, + test_utils::{create_test_rw_db, create_test_rw_db_with_path}, transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, DatabaseEnv, DatabaseError as DbError, }; @@ -47,7 +47,7 @@ impl Default for TestTransaction { impl TestTransaction { pub fn new(path: &Path) -> Self { - let tx = create_test_rw_db(); + let tx = create_test_rw_db_with_path(path); Self { tx: tx.clone(), path: Some(path.to_path_buf()), From 4d3ce3490160500ca93f8ee098b7e94a0a6a26bd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Jul 2023 14:40:15 +0200 Subject: [PATCH 040/722] docs: add payload builder example (#3545) --- crates/payload/builder/src/lib.rs | 82 +++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 3 deletions(-) diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 8bc35c469c5e..fdc57c913b97 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -16,13 +16,89 @@ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] -//! This trait implements the [PayloadBuilderService] responsible for managing payload jobs. -//! -//! It Defines the abstractions to create and update payloads: +//! This crate defines the abstractions to create and update payloads: //! - [PayloadJobGenerator]: a type that knows how to create new jobs for creating payloads based //! on [PayloadAttributes](reth_rpc_types::engine::PayloadAttributes). //! - [PayloadJob]: a type that can yields (better) payloads over time. //! +//! This crate comes with the generic [PayloadBuilderService] responsible for managing payload jobs. +//! +//! ## Node integration +//! +//! In a standard node the [PayloadBuilderService] sits downstream of the engine API or rather the +//! component that handles requests from the Beacon Node like `engine_forkchoiceUpdatedV1`. +//! Payload building is enabled if the forkchoice update request contains payload attributes. +//! See also +//! If the forkchoice update request is VALID and contains payload attributes the +//! [PayloadBuilderService] will create a new [PayloadJob] via the [PayloadJobGenerator] and start +//! polling it until the payload is requested by the CL and the payload job is resolved: +//! [PayloadJob::resolve] +//! +//! ## Example +//! +//! A simple example of a [PayloadJobGenerator] that creates empty blocks: +//! +//! ``` +//! use std::future::Future; +//! use std::pin::Pin; +//! use std::sync::Arc; +//! use std::task::{Context, Poll}; +//! use reth_payload_builder::{BuiltPayload, KeepPayloadJobAlive, PayloadBuilderAttributes, PayloadJob, PayloadJobGenerator}; +//! use reth_payload_builder::error::PayloadBuilderError; +//! use reth_primitives::{Block, Header, U256}; +//! +//! /// The generator type that creates new jobs that builds empty blocks. +//! pub struct EmptyBlockPayloadJobGenerator; +//! +//! impl PayloadJobGenerator for EmptyBlockPayloadJobGenerator { +//! type Job = EmptyBlockPayloadJob; +//! +//! /// This is invoked when the node receives payload attributes from the beacon node via `engine_forkchoiceUpdatedV1` +//! fn new_payload_job(&self, attr: PayloadBuilderAttributes) -> Result { +//! Ok(EmptyBlockPayloadJob{ attributes: attr,}) +//! } +//! +//! } +//! +//! /// A [PayloadJob] that builds empty blocks. +//! pub struct EmptyBlockPayloadJob { +//! attributes: PayloadBuilderAttributes, +//! } +//! +//! impl PayloadJob for EmptyBlockPayloadJob { +//! type ResolvePayloadFuture = futures_util::future::Ready, PayloadBuilderError>>; +//! +//! fn best_payload(&self) -> Result, PayloadBuilderError> { +//! // NOTE: some fields are omitted here for brevity +//! let payload = Block { +//! header: Header { +//! parent_hash: self.attributes.parent, +//! timestamp: self.attributes.timestamp, +//! beneficiary: self.attributes.suggested_fee_recipient, +//! ..Default::default() +//! }, +//! ..Default::default() +//! }; +//! let payload = BuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO); +//! Ok(Arc::new(payload)) +//! } +//! +//! fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { +//! let payload = self.best_payload(); +//! (futures_util::future::ready(payload), KeepPayloadJobAlive::No) +//! } +//! } +//! +//! /// A [PayloadJob] is a a future that's being polled by the `PayloadBuilderService` +//! impl Future for EmptyBlockPayloadJob { +//! type Output = Result<(), PayloadBuilderError>; +//! +//! fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { +//! Poll::Pending +//! } +//! } +//! ``` +//! //! ## Feature Flags //! //! - `test-utils`: Export utilities for testing From 770652a787b78b4965110ed29d2612c7dbdc0cae Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 3 Jul 2023 13:45:32 +0100 Subject: [PATCH 041/722] feat(bin, storage): configurable MDBX log level (#3524) Co-authored-by: Georgios Konstantopoulos --- Cargo.lock | 1 + bin/reth/Cargo.toml | 2 +- bin/reth/src/args/database_args.rs | 13 +++ bin/reth/src/args/debug_args.rs | 2 +- bin/reth/src/args/gas_price_oracle_args.rs | 2 +- bin/reth/src/args/mod.rs | 4 + bin/reth/src/chain/import.rs | 7 +- bin/reth/src/chain/init.rs | 7 +- bin/reth/src/db/mod.rs | 13 +-- bin/reth/src/debug_cmd/execution.rs | 7 +- bin/reth/src/debug_cmd/merkle.rs | 7 +- bin/reth/src/node/mod.rs | 7 +- bin/reth/src/p2p/mod.rs | 13 +-- bin/reth/src/stage/drop.rs | 17 ++-- bin/reth/src/stage/dump/mod.rs | 9 +- bin/reth/src/stage/run.rs | 7 +- bin/reth/src/stage/unwind.rs | 7 +- crates/interfaces/Cargo.toml | 2 + crates/interfaces/src/db.rs | 25 ++++++ .../storage/db/src/implementation/mdbx/mod.rs | 88 +++++++++++++------ .../storage/db/src/implementation/mdbx/tx.rs | 2 +- crates/storage/db/src/lib.rs | 29 +++--- crates/storage/libmdbx-rs/benches/cursor.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 14 +++ crates/storage/libmdbx-rs/src/lib.rs | 4 +- .../provider/src/providers/database/mod.rs | 6 +- examples/db-access.rs | 2 +- examples/rpc-db.rs | 2 +- 28 files changed, 215 insertions(+), 86 deletions(-) create mode 100644 bin/reth/src/args/database_args.rs diff --git a/Cargo.lock b/Cargo.lock index 4383cd628021..1fa48941cc26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5321,6 +5321,7 @@ dependencies = [ "arbitrary", "async-trait", "auto_impl", + "clap 4.1.8", "futures", "hex-literal 0.3.4", "modular-bitfield", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d9955fa8910d..0c1d7f881a74 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -18,7 +18,7 @@ reth-revm = { path = "../../crates/revm" } reth-revm-inspectors = { path = "../../crates/revm/revm-inspectors" } reth-staged-sync = { path = "../../crates/staged-sync" } reth-stages = { path = "../../crates/stages" } -reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-interfaces = { workspace = true, features = ["test-utils", "clap"] } reth-transaction-pool = { workspace = true } reth-beacon-consensus = { path = "../../crates/consensus/beacon" } reth-auto-seal-consensus = { path = "../../crates/consensus/auto-seal" } diff --git a/bin/reth/src/args/database_args.rs b/bin/reth/src/args/database_args.rs new file mode 100644 index 000000000000..b94c36d7eeb7 --- /dev/null +++ b/bin/reth/src/args/database_args.rs @@ -0,0 +1,13 @@ +//! clap [Args](clap::Args) for database configuration + +use clap::Args; +use reth_interfaces::db::LogLevel; + +/// Parameters for database configuration +#[derive(Debug, Args, PartialEq, Default, Clone, Copy)] +#[command(next_help_heading = "Database")] +pub struct DatabaseArgs { + /// Database logging level. Levels higher than "notice" require a debug build. + #[arg(long = "db.log-level", value_enum)] + pub log_level: Option, +} diff --git a/bin/reth/src/args/debug_args.rs b/bin/reth/src/args/debug_args.rs index fd820271b161..aecb693b060b 100644 --- a/bin/reth/src/args/debug_args.rs +++ b/bin/reth/src/args/debug_args.rs @@ -5,7 +5,7 @@ use reth_primitives::{TxHash, H256}; /// Parameters for debugging purposes #[derive(Debug, Args, PartialEq, Default)] -#[command(next_help_heading = "Rpc")] +#[command(next_help_heading = "Debug")] pub struct DebugArgs { /// Prompt the downloader to download blocks one at a time. /// diff --git a/bin/reth/src/args/gas_price_oracle_args.rs b/bin/reth/src/args/gas_price_oracle_args.rs index 4c09903637eb..d56c8ec8cd7f 100644 --- a/bin/reth/src/args/gas_price_oracle_args.rs +++ b/bin/reth/src/args/gas_price_oracle_args.rs @@ -2,7 +2,7 @@ use clap::Args; /// Parameters to configure Gas Price Oracle #[derive(Debug, Args, PartialEq, Eq, Default)] -#[command(next_help_heading = "GAS PRICE ORACLE")] +#[command(next_help_heading = "Gas Price Oracle")] pub struct GasPriceOracleArgs { /// Number of recent blocks to check for gas price #[arg(long = "gpo.blocks", default_value = "20")] diff --git a/bin/reth/src/args/mod.rs b/bin/reth/src/args/mod.rs index ba26ce04bbc4..a732c5c7cc7c 100644 --- a/bin/reth/src/args/mod.rs +++ b/bin/reth/src/args/mod.rs @@ -12,6 +12,10 @@ pub use rpc_server_args::RpcServerArgs; mod debug_args; pub use debug_args::DebugArgs; +/// DatabaseArgs struct for configuring the database +mod database_args; +pub use database_args::DatabaseArgs; + mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index ef430a81d8c4..e18e7716a65a 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -9,7 +9,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; use reth_provider::{ProviderFactory, StageCheckpointReader}; -use crate::args::utils::genesis_value_parser; +use crate::args::{utils::genesis_value_parser, DatabaseArgs}; use reth_config::Config; use reth_db::{database::Database, init_db}; use reth_downloaders::{ @@ -64,6 +64,9 @@ pub struct ImportCommand { )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + /// The path to a block file for import. /// /// The online stages (headers and bodies) are replaced by a file import, after which the @@ -87,7 +90,7 @@ impl ImportCommand { let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); - let db = Arc::new(init_db(db_path)?); + let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); diff --git a/bin/reth/src/chain/init.rs b/bin/reth/src/chain/init.rs index 1a920f78084d..3c2c73015a02 100644 --- a/bin/reth/src/chain/init.rs +++ b/bin/reth/src/chain/init.rs @@ -1,5 +1,5 @@ use crate::{ - args::utils::genesis_value_parser, + args::{utils::genesis_value_parser, DatabaseArgs}, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; @@ -38,6 +38,9 @@ pub struct InitCommand { value_parser = genesis_value_parser )] chain: Arc, + + #[clap(flatten)] + db: DatabaseArgs, } impl InitCommand { @@ -49,7 +52,7 @@ impl InitCommand { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); - let db = Arc::new(init_db(&db_path)?); + let db = Arc::new(init_db(&db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 1ce1bc3262de..ee2e0d12ab53 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -1,6 +1,6 @@ //! Database debugging tool use crate::{ - args::utils::genesis_value_parser, + args::{utils::genesis_value_parser, DatabaseArgs}, dirs::{DataDirPath, MaybePlatformPath}, utils::DbTool, }; @@ -53,6 +53,9 @@ pub struct Command { )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + #[clap(subcommand)] command: Subcommands, } @@ -84,7 +87,7 @@ impl Command { match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats { .. } => { - let db = open_db_read_only(&db_path)?; + let db = open_db_read_only(&db_path, self.db.log_level)?; let tool = DbTool::new(&db, self.chain.clone())?; let mut stats_table = ComfyTable::new(); stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN); @@ -135,17 +138,17 @@ impl Command { println!("{stats_table}"); } Subcommands::List(command) => { - let db = open_db_read_only(&db_path)?; + let db = open_db_read_only(&db_path, self.db.log_level)?; let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } Subcommands::Get(command) => { - let db = open_db_read_only(&db_path)?; + let db = open_db_read_only(&db_path, self.db.log_level)?; let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } Subcommands::Drop => { - let db = open_db(&db_path)?; + let db = open_db(&db_path, self.db.log_level)?; let mut tool = DbTool::new(&db, self.chain.clone())?; tool.drop(db_path)?; } diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index d6fc51add656..fda560715130 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -1,6 +1,6 @@ //! Command for debugging execution. use crate::{ - args::{get_secret_key, utils::genesis_value_parser, NetworkArgs}, + args::{get_secret_key, utils::genesis_value_parser, DatabaseArgs, NetworkArgs}, dirs::{DataDirPath, MaybePlatformPath}, node::events, runner::CliContext, @@ -75,6 +75,9 @@ pub struct Command { #[clap(flatten)] network: NetworkArgs, + #[clap(flatten)] + db: DatabaseArgs, + /// Set the chain tip manually for testing purposes. /// /// NOTE: This is a temporary flag @@ -201,7 +204,7 @@ impl Command { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); fs::create_dir_all(&db_path)?; - let db = Arc::new(init_db(db_path)?); + let db = Arc::new(init_db(db_path, self.db.log_level)?); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(db.clone(), self.chain.clone())?; diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 5cfbe54fa3af..1188dbfcb675 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -1,6 +1,6 @@ //! Command for debugging merkle trie calculation. use crate::{ - args::utils::genesis_value_parser, + args::{utils::genesis_value_parser, DatabaseArgs}, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::Parser; @@ -50,6 +50,9 @@ pub struct Command { )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + /// The height to finish at #[arg(long)] to: u64, @@ -67,7 +70,7 @@ impl Command { let db_path = data_dir.db_path(); fs::create_dir_all(&db_path)?; - let db = Arc::new(init_db(db_path)?); + let db = Arc::new(init_db(db_path, self.db.log_level)?); let factory = ProviderFactory::new(&db, self.chain.clone()); let provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 905bf32d0449..18e13ce2cfb0 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -69,7 +69,7 @@ use tracing::*; use crate::{ args::{ utils::{genesis_value_parser, parse_socket_address}, - PayloadBuilderArgs, + DatabaseArgs, PayloadBuilderArgs, }, dirs::MaybePlatformPath, node::cl_events::ConsensusLayerHealthEvents, @@ -138,6 +138,9 @@ pub struct Command { #[clap(flatten)] debug: DebugArgs, + #[clap(flatten)] + db: DatabaseArgs, + /// Automatically mine blocks for new transactions #[arg(long)] auto_mine: bool, @@ -163,7 +166,7 @@ impl Command { let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); - let db = Arc::new(init_db(&db_path)?); + let db = Arc::new(init_db(&db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); self.start_metrics_endpoint(Arc::clone(&db)).await?; diff --git a/bin/reth/src/p2p/mod.rs b/bin/reth/src/p2p/mod.rs index c853bedafa92..4fc87b294cac 100644 --- a/bin/reth/src/p2p/mod.rs +++ b/bin/reth/src/p2p/mod.rs @@ -3,7 +3,7 @@ use crate::{ args::{ get_secret_key, utils::{chain_spec_value_parser, hash_or_num_value_parser}, - DiscoveryArgs, + DatabaseArgs, DiscoveryArgs, }, dirs::{DataDirPath, MaybePlatformPath}, utils::get_single_header, @@ -74,11 +74,14 @@ pub struct Command { #[arg(long, default_value = "5")] retries: usize, - #[clap(subcommand)] - command: Subcommands, - #[arg(long, default_value = "any")] nat: NatResolver, + + #[clap(flatten)] + db: DatabaseArgs, + + #[clap(subcommand)] + command: Subcommands, } #[derive(Subcommand, Debug)] @@ -101,7 +104,7 @@ impl Command { /// Execute `p2p` command pub async fn execute(&self) -> eyre::Result<()> { let tempdir = tempfile::TempDir::new()?; - let noop_db = Arc::new(open_db(&tempdir.into_path())?); + let noop_db = Arc::new(open_db(&tempdir.into_path(), self.db.log_level)?); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index efdbaa7b86b6..2963c643867f 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -1,6 +1,6 @@ //! Database debugging tool use crate::{ - args::{utils::genesis_value_parser, StageEnum}, + args::{utils::genesis_value_parser, DatabaseArgs, StageEnum}, dirs::{DataDirPath, MaybePlatformPath}, utils::DbTool, }; @@ -33,14 +33,17 @@ pub struct Command { /// - goerli /// - sepolia #[arg( - long, - value_name = "CHAIN_OR_PATH", - verbatim_doc_comment, - default_value = "mainnet", - value_parser = genesis_value_parser + long, + value_name = "CHAIN_OR_PATH", + verbatim_doc_comment, + default_value = "mainnet", + value_parser = genesis_value_parser )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + stage: StageEnum, } @@ -52,7 +55,7 @@ impl Command { let db_path = data_dir.db_path(); fs::create_dir_all(&db_path)?; - let db = open_db(db_path.as_ref())?; + let db = open_db(db_path.as_ref(), self.db.log_level)?; let tool = DbTool::new(&db, self.chain.clone())?; diff --git a/bin/reth/src/stage/dump/mod.rs b/bin/reth/src/stage/dump/mod.rs index d9a33ccc6138..14c5fd259002 100644 --- a/bin/reth/src/stage/dump/mod.rs +++ b/bin/reth/src/stage/dump/mod.rs @@ -22,7 +22,7 @@ mod execution; use execution::dump_execution_stage; mod merkle; -use crate::args::utils::genesis_value_parser; +use crate::args::{utils::genesis_value_parser, DatabaseArgs}; use merkle::dump_merkle_stage; /// `reth dump-stage` command @@ -55,6 +55,9 @@ pub struct Command { )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + #[clap(subcommand)] command: Stages, } @@ -98,7 +101,7 @@ impl Command { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); - let db = Arc::new(init_db(db_path)?); + let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); let mut tool = DbTool::new(&db, self.chain.clone())?; @@ -134,7 +137,7 @@ pub(crate) fn setup( info!(target: "reth::cli", ?output_db, "Creating separate db"); - let output_db = init_db(output_db)?; + let output_db = init_db(output_db, None)?; output_db.update(|tx| { tx.import_table_with_range::( diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index aea66ec0d841..23166e595a28 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -2,7 +2,7 @@ //! //! Stage debugging tool use crate::{ - args::{get_secret_key, utils::chain_spec_value_parser, NetworkArgs, StageEnum}, + args::{get_secret_key, utils::chain_spec_value_parser, DatabaseArgs, NetworkArgs, StageEnum}, dirs::{DataDirPath, MaybePlatformPath}, prometheus_exporter, version::SHORT_VERSION, @@ -92,6 +92,9 @@ pub struct Command { #[clap(flatten)] network: NetworkArgs, + #[clap(flatten)] + db: DatabaseArgs, + /// Commits the changes in the database. WARNING: potentially destructive. /// /// Useful when you want to run diagnostics on the database. @@ -119,7 +122,7 @@ impl Command { let db_path = data_dir.db_path(); info!(target: "reth::cli", path = ?db_path, "Opening database"); - let db = Arc::new(init_db(db_path)?); + let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); let factory = ProviderFactory::new(&db, self.chain.clone()); diff --git a/bin/reth/src/stage/unwind.rs b/bin/reth/src/stage/unwind.rs index 9300f598af51..d88346bc714c 100644 --- a/bin/reth/src/stage/unwind.rs +++ b/bin/reth/src/stage/unwind.rs @@ -1,7 +1,7 @@ //! Unwinding a certain block range use crate::{ - args::utils::genesis_value_parser, + args::{utils::genesis_value_parser, DatabaseArgs}, dirs::{DataDirPath, MaybePlatformPath}, }; use clap::{Parser, Subcommand}; @@ -41,6 +41,9 @@ pub struct Command { )] chain: Arc, + #[clap(flatten)] + db: DatabaseArgs, + #[clap(subcommand)] command: Subcommands, } @@ -55,7 +58,7 @@ impl Command { eyre::bail!("Database {db_path:?} does not exist.") } - let db = open_db(db_path.as_ref())?; + let db = open_db(db_path.as_ref(), self.db.log_level)?; let range = self.command.unwind_range(&db)?; diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index d358cdef0074..be012c7aaf5d 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -38,6 +38,7 @@ secp256k1 = { workspace = true, default-features = false, features = [ ], optional = true } modular-bitfield = "0.11.2" parking_lot = "0.12.1" +clap = { version = "4", features = ["derive"], optional = true } [dev-dependencies] reth-db = { path = "../storage/db", features = ["test-utils"] } @@ -53,3 +54,4 @@ secp256k1 = { workspace = true, features = [ [features] test-utils = ["tokio-stream/sync", "secp256k1", "rand/std_rng"] +cli = ["clap"] \ No newline at end of file diff --git a/crates/interfaces/src/db.rs b/crates/interfaces/src/db.rs index 3b814c0598a1..5249a0ed5cc3 100644 --- a/crates/interfaces/src/db.rs +++ b/crates/interfaces/src/db.rs @@ -31,4 +31,29 @@ pub enum DatabaseError { /// Failed to get database stats. #[error("Database stats error code: {0:?}")] Stats(i32), + /// Failed to use the specified log level, as it's not available. + #[error("Log level is not available: {0:?}")] + LogLevelUnavailable(LogLevel), +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] +/// Database log level. +pub enum LogLevel { + /// Enables logging for critical conditions, i.e. assertion failures. + Fatal, + /// Enables logging for error conditions. + Error, + /// Enables logging for warning conditions. + Warn, + /// Enables logging for normal but significant condition. + Notice, + /// Enables logging for verbose informational. + Verbose, + /// Enables logging for debug-level messages. + Debug, + /// Enables logging for trace debug-level messages. + Trace, + /// Enables logging for extra debug-level messages. + Extra, } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 4bf66c8b4209..9bf544279437 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -6,6 +6,7 @@ use crate::{ utils::default_page_size, DatabaseError, }; +use reth_interfaces::db::LogLevel; use reth_libmdbx::{ DatabaseFlags, Environment, EnvironmentFlags, EnvironmentKind, Geometry, Mode, PageSize, SyncMode, RO, RW, @@ -61,37 +62,66 @@ impl Env { /// Opens the database at the specified path with the given `EnvKind`. /// /// It does not create the tables, for that call [`Env::create_tables`]. - pub fn open(path: &Path, kind: EnvKind) -> Result, DatabaseError> { + pub fn open( + path: &Path, + kind: EnvKind, + log_level: Option, + ) -> Result, DatabaseError> { let mode = match kind { EnvKind::RO => Mode::ReadOnly, EnvKind::RW => Mode::ReadWrite { sync_mode: SyncMode::Durable }, }; - let env = Env { - inner: Environment::new() - .set_max_dbs(Tables::ALL.len()) - .set_geometry(Geometry { - // Maximum database size of 4 terabytes - size: Some(0..(4 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: None, - page_size: Some(PageSize::Set(default_page_size())), - }) - .set_flags(EnvironmentFlags { - mode, - // We disable readahead because it improves performance for linear scans, but - // worsens it for random access (which is our access pattern outside of sync) - no_rdahead: true, - coalesce: true, - ..Default::default() - }) - // configure more readers - .set_max_readers(DEFAULT_MAX_READERS) - .open(path) - .map_err(|e| DatabaseError::FailedToOpen(e.into()))?, - }; + let mut inner_env = Environment::new(); + inner_env.set_max_dbs(Tables::ALL.len()); + inner_env.set_geometry(Geometry { + // Maximum database size of 4 terabytes + size: Some(0..(4 * TERABYTE)), + // We grow the database in increments of 4 gigabytes + growth_step: Some(4 * GIGABYTE as isize), + // The database never shrinks + shrink_threshold: None, + page_size: Some(PageSize::Set(default_page_size())), + }); + inner_env.set_flags(EnvironmentFlags { + mode, + // We disable readahead because it improves performance for linear scans, but + // worsens it for random access (which is our access pattern outside of sync) + no_rdahead: true, + coalesce: true, + ..Default::default() + }); + // configure more readers + inner_env.set_max_readers(DEFAULT_MAX_READERS); + + if let Some(log_level) = log_level { + // Levels higher than [LogLevel::Notice] require libmdbx built with `MDBX_DEBUG` option. + let is_log_level_available = if cfg!(debug_assertions) { + true + } else { + matches!( + log_level, + LogLevel::Fatal | LogLevel::Error | LogLevel::Warn | LogLevel::Notice + ) + }; + if is_log_level_available { + inner_env.set_log_level(match log_level { + LogLevel::Fatal => 0, + LogLevel::Error => 1, + LogLevel::Warn => 2, + LogLevel::Notice => 3, + LogLevel::Verbose => 4, + LogLevel::Debug => 5, + LogLevel::Trace => 6, + LogLevel::Extra => 7, + }); + } else { + return Err(DatabaseError::LogLevelUnavailable(log_level)) + } + } + + let env = + Env { inner: inner_env.open(path).map_err(|e| DatabaseError::FailedToOpen(e.into()))? }; Ok(env) } @@ -117,7 +147,7 @@ impl Env { } impl Deref for Env { - type Target = reth_libmdbx::Environment; + type Target = Environment; fn deref(&self) -> &Self::Target { &self.inner @@ -151,7 +181,7 @@ mod tests { /// Create database for testing with specified path fn create_test_db_with_path(kind: EnvKind, path: &Path) -> Env { - let env = Env::::open(path, kind).expect(ERROR_DB_CREATION); + let env = Env::::open(path, kind, None).expect(ERROR_DB_CREATION); env.create_tables().expect(ERROR_TABLE_CREATION); env } @@ -746,7 +776,7 @@ mod tests { assert!(result.expect(ERROR_RETURN_VALUE) == 200); } - let env = Env::::open(&path, EnvKind::RO).expect(ERROR_DB_CREATION); + let env = Env::::open(&path, EnvKind::RO, None).expect(ERROR_DB_CREATION); // GET let result = diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 5644723b9d65..3a49cc04a97a 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -8,7 +8,7 @@ use crate::{ DatabaseError, }; use parking_lot::RwLock; -use reth_libmdbx::{EnvironmentKind, Transaction, TransactionKind, WriteFlags, DBI, RW}; +use reth_libmdbx::{ffi::DBI, EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW}; use reth_metrics::metrics::{self, histogram}; use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant}; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 511d5d2ac1c0..76650de0385c 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -100,11 +100,12 @@ pub type DatabaseEnv = Env; pub type DatabaseEnvRO = Env; use eyre::WrapErr; +use reth_interfaces::db::LogLevel; use std::path::Path; /// Opens up an existing database or creates a new one at the specified path. Creates tables if /// necessary. Read/Write mode. -pub fn init_db>(path: P) -> eyre::Result { +pub fn init_db>(path: P, log_level: Option) -> eyre::Result { use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError}; let rpath = path.as_ref(); @@ -121,7 +122,7 @@ pub fn init_db>(path: P) -> eyre::Result { } #[cfg(feature = "mdbx")] { - let db = DatabaseEnv::open(rpath, EnvKind::RW)?; + let db = DatabaseEnv::open(rpath, EnvKind::RW, log_level)?; db.create_tables()?; Ok(db) } @@ -132,10 +133,10 @@ pub fn init_db>(path: P) -> eyre::Result { } /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. -pub fn open_db_read_only(path: &Path) -> eyre::Result { +pub fn open_db_read_only(path: &Path, log_level: Option) -> eyre::Result { #[cfg(feature = "mdbx")] { - Env::::open(path, mdbx::EnvKind::RO) + Env::::open(path, EnvKind::RO, log_level) .with_context(|| format!("Could not open database at path: {}", path.display())) } #[cfg(not(feature = "mdbx"))] @@ -146,10 +147,10 @@ pub fn open_db_read_only(path: &Path) -> eyre::Result { /// Opens up an existing database. Read/Write mode. It doesn't create it or create tables if /// missing. -pub fn open_db(path: &Path) -> eyre::Result { +pub fn open_db(path: &Path, log_level: Option) -> eyre::Result { #[cfg(feature = "mdbx")] { - Env::::open(path, mdbx::EnvKind::RW) + Env::::open(path, EnvKind::RW, log_level) .with_context(|| format!("Could not open database at path: {}", path.display())) } #[cfg(not(feature = "mdbx"))] @@ -176,23 +177,23 @@ pub mod test_utils { /// Create read/write database for testing pub fn create_test_rw_db() -> Arc { Arc::new( - init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path()) + init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), None) .expect(ERROR_DB_CREATION), ) } /// Create read/write database for testing pub fn create_test_rw_db_with_path>(path: P) -> Arc { - Arc::new(init_db(path.as_ref()).expect(ERROR_DB_CREATION)) + Arc::new(init_db(path.as_ref(), None).expect(ERROR_DB_CREATION)) } /// Create read only database for testing pub fn create_test_ro_db() -> Arc { let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(); { - init_db(path.as_path()).expect(ERROR_DB_CREATION); + init_db(path.as_path(), None).expect(ERROR_DB_CREATION); } - Arc::new(open_db_read_only(path.as_path()).expect(ERROR_DB_OPEN)) + Arc::new(open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN)) } } @@ -211,13 +212,13 @@ mod tests { // Database is empty { - let db = init_db(&path); + let db = init_db(&path, None); assert_matches!(db, Ok(_)); } // Database is not empty, current version is the same as in the file { - let db = init_db(&path); + let db = init_db(&path, None); assert_matches!(db, Ok(_)); } @@ -225,7 +226,7 @@ mod tests { { std::fs::write(path.path().join(db_version_file_path(&path)), "invalid-version") .unwrap(); - let db = init_db(&path); + let db = init_db(&path, None); assert!(db.is_err()); assert_matches!( db.unwrap_err().downcast_ref::(), @@ -236,7 +237,7 @@ mod tests { // Database is not empty, version file contains not matching version { std::fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap(); - let db = init_db(&path); + let db = init_db(&path, None); assert!(db.is_err()); assert_matches!( db.unwrap_err().downcast_ref::(), diff --git a/crates/storage/libmdbx-rs/benches/cursor.rs b/crates/storage/libmdbx-rs/benches/cursor.rs index 0a6a72b7b503..78044e45b9fe 100644 --- a/crates/storage/libmdbx-rs/benches/cursor.rs +++ b/crates/storage/libmdbx-rs/benches/cursor.rs @@ -1,7 +1,7 @@ mod utils; +use ::ffi::*; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use ffi::*; use pprof::criterion::{Output, PProfProfiler}; use reth_libmdbx::*; use std::ptr; diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index b37285a4d622..fb89beecefde 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -90,6 +90,7 @@ where spill_max_denominator: None, spill_min_denominator: None, geometry: None, + log_level: None, _marker: PhantomData, } } @@ -384,6 +385,7 @@ where spill_max_denominator: Option, spill_min_denominator: Option, geometry: Option, Option)>>, + log_level: Option, _marker: PhantomData, } @@ -408,7 +410,14 @@ where ) -> Result> { let mut env: *mut ffi::MDBX_env = ptr::null_mut(); unsafe { + if let Some(log_level) = self.log_level { + // Returns the previously debug_flags in the 0-15 bits and log_level in the + // 16-31 bits, no need to use `mdbx_result`. + ffi::mdbx_setup_debug(log_level, ffi::MDBX_DBG_DONTCHANGE, None); + } + mdbx_result(ffi::mdbx_env_create(&mut env))?; + if let Err(e) = (|| { if let Some(geometry) = &self.geometry { let mut min_size = -1; @@ -618,4 +627,9 @@ where }); self } + + pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self { + self.log_level = Some(log_level); + self + } } diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index 2e8c1002b740..e64d868b8790 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -19,7 +19,9 @@ pub use crate::{ flags::*, transaction::{Transaction, TransactionKind, RO, RW}, }; -pub use ffi::MDBX_dbi as DBI; +pub mod ffi { + pub use ffi::{MDBX_dbi as DBI, MDBX_log_level_t as LogLevel}; +} mod codec; mod cursor; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index e1ec92d5c08e..686257c10368 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,6 +19,7 @@ use tracing::trace; mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; +use reth_interfaces::db::LogLevel; /// A common provider that fetches data from a database. /// @@ -61,9 +62,11 @@ impl ProviderFactory { pub fn new_with_database_path>( path: P, chain_spec: Arc, + log_level: Option, ) -> Result> { Ok(ProviderFactory:: { - db: init_db(path).map_err(|e| reth_interfaces::Error::Custom(e.to_string()))?, + db: init_db(path, log_level) + .map_err(|e| reth_interfaces::Error::Custom(e.to_string()))?, chain_spec, }) } @@ -402,6 +405,7 @@ mod tests { let factory = ProviderFactory::::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), Arc::new(chain_spec), + None, ) .unwrap(); diff --git a/examples/db-access.rs b/examples/db-access.rs index 23913acde96b..fee4239a1661 100644 --- a/examples/db-access.rs +++ b/examples/db-access.rs @@ -18,7 +18,7 @@ fn main() -> eyre::Result<()> { // Opens a RO handle to the database file. // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of // doing in 2 steps. - let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?; + let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?; // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? diff --git a/examples/rpc-db.rs b/examples/rpc-db.rs index d705fa6c4fcf..57145106ebc2 100644 --- a/examples/rpc-db.rs +++ b/examples/rpc-db.rs @@ -30,7 +30,7 @@ use std::{path::Path, sync::Arc}; #[tokio::main] async fn main() -> eyre::Result<()> { // 1. Setup the DB - let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?); + let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?); let spec = Arc::new(ChainSpecBuilder::mainnet().build()); let factory = ProviderFactory::new(db.clone(), spec.clone()); From da58e39c26da77fe4e8f8ffdc0c60f49555a28e7 Mon Sep 17 00:00:00 2001 From: Paolo Facchinetti <51409747+paolofacchinetti@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:04:17 +0200 Subject: [PATCH 042/722] feat: add docker compose with docs (#3496) Co-authored-by: Oliver Nordbjerg --- book/installation/docker.md | 71 ++++++++++++++++++++++++++ etc/.gitignore | 1 + etc/README.md | 12 +---- etc/docker-compose.yml | 95 +++++++++++++++++++++++++++++++++++ etc/docker-monitoring.yml | 39 -------------- etc/generate-jwt.sh | 4 ++ etc/prometheus/prometheus.yml | 2 +- 7 files changed, 173 insertions(+), 51 deletions(-) create mode 100644 etc/.gitignore create mode 100644 etc/docker-compose.yml delete mode 100644 etc/docker-monitoring.yml create mode 100755 etc/generate-jwt.sh diff --git a/book/installation/docker.md b/book/installation/docker.md index 936e226c6bc7..f216aa0cfbf4 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -45,3 +45,74 @@ The build will likely take several minutes. Once it's built, test it with: ```bash docker run reth:local --version ``` + +## Using the Docker image + +There are two ways to use the Docker image: +1. [Using Docker](#using-plain-docker) +2. [Using Docker Compose](#using-docker-compose) + +### Using Plain Docker + +To run Reth with Docker, run: + +```bash +docker run \ + -v rethdata:/root/.local/share/reth/db \ + -d \ + -p 9000:9000 \ + --name reth \ + reth:local \ + node \ + --metrics 0.0.0.0:9000 +``` + +The above command will create a container named `reth` and a named volume called `rethdata` for data persistence. + +It will use the local image `reth:local`. If you want to use the GitHub Container Registry remote image, use `ghcr.io/paradigmxyz/reth` with your preferred tag. + +### Using Docker Compose + +To run Reth with Docker Compose, run the following command from a shell inside the root directory of this repository: + +```bash +./etc/generate-jwt.sh +docker compose -f etc/docker-compose.yml up -d +``` + +To check if Reth is running correctly, run: + +```bash +docker compose logs -f reth +``` + +The default `docker-compose.yml` file will create four containers: + +- Reth +- Prometheus +- Grafana +- Lighthouse + +Grafana will be exposed on `localhost:3000` and accessible via default credentials (username and password is `admin`) + +## Interacting with Reth inside Docker + +To interact with Reth you must first open a shell inside the Reth container by running: + +```bash +docker exec -it reth bash +``` + +**If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command** + +### Listing the tables + +```bash +reth db stats +``` + +### Viewing some records + +```bash +reth db list --start=1 --len=2 Headers +``` \ No newline at end of file diff --git a/etc/.gitignore b/etc/.gitignore new file mode 100644 index 000000000000..9aceb4ea3bf1 --- /dev/null +++ b/etc/.gitignore @@ -0,0 +1 @@ +jwttoken diff --git a/etc/README.md b/etc/README.md index 10df5260b8ec..13315fbe1fda 100644 --- a/etc/README.md +++ b/etc/README.md @@ -11,14 +11,4 @@ The files in this directory may undergo a lot of changes while reth is unstable, ### Docker Compose -To run Grafana dashboard with example dashboard and pre-configured Prometheus data source pointing at -the locally running Reth instance with metrics exposed on `localhost:9001`: -```sh -docker compose -p reth -f ./etc/docker-monitoring.yml up -``` - -After that, Grafana will be exposed on `localhost:3000` and accessible via default credentials: -``` -username: admin -password: admin -``` \ No newline at end of file +To run Reth, Grafana and Prometheus with Docker Compose, refer to the [docker docs](/book/installation/docker.md#using-docker-compose) \ No newline at end of file diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml new file mode 100644 index 000000000000..a19e253f7c83 --- /dev/null +++ b/etc/docker-compose.yml @@ -0,0 +1,95 @@ +version: '3.9' +name: 'reth' + +services: + reth: + restart: unless-stopped + image: ghcr.io/paradigmxyz/reth + ports: + - '9001:9001' # metrics + - '30303:30303' # eth/66 peering + - '8545:8545' # rpc + volumes: + - rethdata:/root/.local/share/reth/mainnet/db + - rethlogs:/root/rethlogs + - ./jwttoken:/root/jwt:ro + command: > + node + --metrics 0.0.0.0:9001 + --log.persistent + --log.directory /root/rethlogs + --authrpc.addr 0.0.0.0 + --authrpc.port 8551 + --authrpc.jwtsecret /root/jwt/jwt.hex + --http --http.addr 0.0.0.0 --http.port 8545 + --http.api "eth,net" + + lighthouse: + restart: unless-stopped + image: sigp/lighthouse + depends_on: + - reth + ports: + - '5052:5052/tcp' + - '5053:5053/tcp' + - '5054:5054/tcp' # metrics + - '9000:9000/tcp' + - '9000:9000/udp' + volumes: + - lighthousedata:/root/.lighthouse + - ./jwttoken:/root/jwt:ro + command: > + lighthouse bn + --http --http-address 0.0.0.0 + --execution-endpoint http://reth:8551 + --metrics --metrics-address 0.0.0.0 + --execution-jwt /root/jwt/jwt.hex + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io + + prometheus: + restart: unless-stopped + image: prom/prometheus + depends_on: + - reth + ports: + - 9090:9090 + volumes: + - ./prometheus/:/etc/prometheus/ + - prometheusdata:/prometheus + command: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.path=/prometheus + + grafana: + restart: unless-stopped + image: grafana/grafana + depends_on: + - reth + - prometheus + ports: + - 3000:3000 + environment: + PROMETHEUS_URL: http://prometheus:9090 + volumes: + - grafanadata:/var/lib/grafana + - ./grafana/datasources:/etc/grafana/provisioning/datasources + - ./grafana/dashboards:/etc/grafana/provisioning_temp/dashboards + # 1. Copy dashboards from temp directory to prevent modifying original host files + # 2. Replace Prometheus datasource placeholder with the actual name + # 3. Run Grafana + entrypoint: > + sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && + find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && + /run.sh" + +volumes: + rethdata: + driver: local + rethlogs: + driver: local + lighthousedata: + driver: local + prometheusdata: + driver: local + grafanadata: + driver: local \ No newline at end of file diff --git a/etc/docker-monitoring.yml b/etc/docker-monitoring.yml deleted file mode 100644 index d9c439d01a17..000000000000 --- a/etc/docker-monitoring.yml +++ /dev/null @@ -1,39 +0,0 @@ -version: '3' - -services: - prometheus: - image: prom/prometheus - ports: - - 9090:9090 - volumes: - - ./prometheus/:/etc/prometheus/ - - prometheus_data:/prometheus - command: - - --config.file=/etc/prometheus/prometheus.yml - - --storage.tsdb.path=/prometheus - extra_hosts: - - "host.docker.internal:host-gateway" # https://stackoverflow.com/a/43541732/5204678 - - grafana: - image: grafana/grafana - ports: - - 3000:3000 - environment: - PROMETHEUS_URL: http://prometheus:9090 - # 1. Copy dashboards from temp directory to prevent modifying original host files - # 2. Replace Prometheus datasource placeholder with the actual name - # 3. Run Grafana - entrypoint: > - sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && - find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && - /run.sh" - volumes: - - grafana_data:/var/lib/grafana - - ./grafana/datasources:/etc/grafana/provisioning/datasources - - ./grafana/dashboards:/etc/grafana/provisioning_temp/dashboards - depends_on: - - prometheus - -volumes: - prometheus_data: - grafana_data: diff --git a/etc/generate-jwt.sh b/etc/generate-jwt.sh new file mode 100755 index 000000000000..711b5b55d41d --- /dev/null +++ b/etc/generate-jwt.sh @@ -0,0 +1,4 @@ +# Borrowed from EthStaker's prepare for the merge guide +# See https://github.com/remyroy/ethstaker/blob/main/prepare-for-the-merge.md#configuring-a-jwt-token-file +mkdir -p jwttoken +openssl rand -hex 32 | tr -d "\n" | tee > jwttoken/jwt.hex \ No newline at end of file diff --git a/etc/prometheus/prometheus.yml b/etc/prometheus/prometheus.yml index 6444d4d87eca..8c578af3ad7f 100644 --- a/etc/prometheus/prometheus.yml +++ b/etc/prometheus/prometheus.yml @@ -3,4 +3,4 @@ scrape_configs: metrics_path: "/" scrape_interval: 5s static_configs: - - targets: ['localhost:9001', 'host.docker.internal:9001'] + - targets: ['reth:9001'] \ No newline at end of file From 228f6479b0ef067322b4e77484b86c8c2c6484d1 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 3 Jul 2023 16:12:56 +0300 Subject: [PATCH 043/722] chore: export `TransactionPoolExt` trait (#3548) --- crates/transaction-pool/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index b56bb2042d04..fbfe24433243 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -97,7 +97,6 @@ use reth_provider::StateProviderFactory; use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; -use traits::TransactionPoolExt; pub use crate::{ config::PoolConfig, @@ -107,7 +106,7 @@ pub use crate::{ traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, NewTransactionEvent, PoolSize, PoolTransaction, PooledTransaction, PropagateKind, - PropagatedTransactions, TransactionOrigin, TransactionPool, + PropagatedTransactions, TransactionOrigin, TransactionPool, TransactionPoolExt, }, validate::{ EthTransactionValidator, TransactionValidationOutcome, TransactionValidator, From d848668fb23d2c55a57821efb6a7093a29820ec7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Jul 2023 15:21:17 +0200 Subject: [PATCH 044/722] fix(rpc): fix parity tracing config (#3549) --- .../revm-inspectors/src/tracing/config.rs | 10 ++++++ crates/rpc/rpc/src/trace.rs | 35 +++++++++++++++++-- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/config.rs b/crates/revm/revm-inspectors/src/tracing/config.rs index 2a0cb69bd679..521786451a2c 100644 --- a/crates/revm/revm-inspectors/src/tracing/config.rs +++ b/crates/revm/revm-inspectors/src/tracing/config.rs @@ -103,6 +103,16 @@ impl TracingInspectorConfig { self } + /// Configure whether the tracer should record steps and state diffs. + /// + /// This is a convenience method for setting both [TracingInspectorConfig::set_steps] and + /// [TracingInspectorConfig::set_state_diffs] since tracking state diffs requires steps tracing. + pub fn set_steps_and_state_diffs(mut self, steps_and_diffs: bool) -> Self { + self.record_steps = steps_and_diffs; + self.record_state_diff = steps_and_diffs; + self + } + /// Configure whether the tracer should record logs pub fn set_record_logs(mut self, record_logs: bool) -> Self { self.record_logs = record_logs; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 37047e21f569..3158f6d0b2d6 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -615,10 +615,12 @@ struct TraceApiInner { } /// Returns the [TracingInspectorConfig] depending on the enabled [TraceType]s +#[inline] fn tracing_config(trace_types: &HashSet) -> TracingInspectorConfig { - TracingInspectorConfig::default_parity() - .set_state_diffs(trace_types.contains(&TraceType::StateDiff)) - .set_steps(trace_types.contains(&TraceType::VmTrace)) + let needs_diff = trace_types.contains(&TraceType::StateDiff); + let needs_vm_trace = trace_types.contains(&TraceType::VmTrace); + let needs_steps = needs_vm_trace || needs_diff; + TracingInspectorConfig::default_parity().set_steps(needs_steps).set_state_diffs(needs_diff) } /// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block @@ -637,3 +639,30 @@ fn reward_trace(header: &SealedHeader, reward: RewardAction) -> LocalizedTransac }, } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parity_config() { + let mut s = HashSet::new(); + s.insert(TraceType::StateDiff); + let config = tracing_config(&s); + assert!(config.record_steps); + assert!(config.record_state_diff); + + let mut s = HashSet::new(); + s.insert(TraceType::VmTrace); + let config = tracing_config(&s); + assert!(config.record_steps); + assert!(!config.record_state_diff); + + let mut s = HashSet::new(); + s.insert(TraceType::VmTrace); + s.insert(TraceType::StateDiff); + let config = tracing_config(&s); + assert!(config.record_steps); + assert!(config.record_state_diff); + } +} From 7de2846a920366c560e4d080f7de498c469ad636 Mon Sep 17 00:00:00 2001 From: Paolo Facchinetti <51409747+paolofacchinetti@users.noreply.github.com> Date: Mon, 3 Jul 2023 16:18:35 +0200 Subject: [PATCH 045/722] docs: minimum docker engine version (#3546) Co-authored-by: Bjerg --- book/installation/docker.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/book/installation/docker.md b/book/installation/docker.md index f216aa0cfbf4..ac97c710eb6b 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -8,6 +8,9 @@ There are two ways to obtain a Reth Docker image: Once you have obtained the Docker image, proceed to [Using the Docker image](#using-the-docker-image). +> **Note** +> +> Reth requires Docker Engine version 20.10.10 or higher due to [missing support](https://docs.docker.com/engine/release-notes/20.10/#201010) for the `clone3` syscall in previous versions. ## GitHub Reth docker images for both x86_64 and ARM64 machines are published with every release of reth on GitHub Container Registry. From d7259f002957fb051bb9e6ed6a937b6625f0b4bb Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 3 Jul 2023 16:44:17 +0200 Subject: [PATCH 046/722] docs: update rpc cache flags (#3552) --- book/cli/node.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/book/cli/node.md b/book/cli/node.md index f68b16b5d6c6..a6b53dcf05f8 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -159,14 +159,14 @@ Rpc: --gas-price-oracle Gas price oracle configuration. - --block-cache-size - Max size for cached block data in megabytes. + --block-cache-len + Maximum number of block cache entries. - --receipt-cache-size - Max size for cached receipt data in megabytes. + --receipt-cache-len + Maximum number of receipt cache entries. - --env-cache-size - Max size for cached evm env data in megabytes. + --env-cache-len + Maximum number of env cache entries. Builder: --builder.extradata From 8025b05472ede7498bb97a99430387fbf01e4332 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 3 Jul 2023 17:01:09 +0200 Subject: [PATCH 047/722] docs: update bodies config (#3553) --- book/run/config.md | 6 ++++-- crates/config/src/config.rs | 3 +-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/book/run/config.md b/book/run/config.md index fe9be5bf4a4b..43319d9d2faa 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -90,14 +90,16 @@ downloader_request_limit = 200 # A lower value means more frequent disk I/O (writes), but also # lowers memory usage. downloader_stream_batch_size = 10000 -# The maximum amount of blocks to keep in the internal buffer of the downloader. +# The size of the internal block buffer in bytes. # # A bigger buffer means that bandwidth can be saturated for longer periods, # but also increases memory consumption. # # If the buffer is full, no more requests will be made to peers until # space is made for new blocks in the buffer. -downloader_max_buffered_blocks = 42949 +# +# Defaults to around 4GB. +downloader_max_buffered_blocks_size_bytes = 4294967296 # The minimum and maximum number of concurrent requests to have in flight at a time. # # The downloader uses these as best effort targets, which means that the number diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 246078759dd6..f0dfb4ac2bbb 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -148,8 +148,7 @@ pub struct BodiesConfig { /// /// Default: 10_000 pub downloader_stream_batch_size: usize, - /// Maximum amount of received bodies to buffer internally. - /// The response contains multiple bodies. + /// The size of the internal block buffer in bytes. /// /// Default: 4GB pub downloader_max_buffered_blocks_size_bytes: usize, From 64554dd0f111014ef5063156e942309edd840c32 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Jul 2023 19:58:50 +0200 Subject: [PATCH 048/722] fix: add missing single block body download validation (#3563) --- crates/interfaces/src/p2p/full_block.rs | 105 +++++++++++++++++++++--- crates/primitives/src/peer.rs | 7 ++ 2 files changed, 102 insertions(+), 10 deletions(-) diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index bb13a3e9e0c8..8da7f456f736 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -1,9 +1,12 @@ -use crate::p2p::{ - bodies::client::{BodiesClient, SingleBodyRequest}, - error::PeerRequestResult, - headers::client::{HeadersClient, SingleHeaderRequest}, +use crate::{ + consensus::ConsensusError, + p2p::{ + bodies::client::{BodiesClient, SingleBodyRequest}, + error::PeerRequestResult, + headers::client::{HeadersClient, SingleHeaderRequest}, + }, }; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, H256}; +use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, WithPeerId, H256}; use std::{ fmt::Debug, future::Future, @@ -60,7 +63,7 @@ where hash: H256, request: FullBlockRequest, header: Option, - body: Option, + body: Option, } impl FetchFullBlockFuture @@ -77,15 +80,41 @@ where self.header.as_ref().map(|h| h.number) } - /// Returns the [SealedBlock] if the request is complete. + /// Returns the [SealedBlock] if the request is complete and valid. fn take_block(&mut self) -> Option { if self.header.is_none() || self.body.is_none() { return None } + let header = self.header.take().unwrap(); - let body = self.body.take().unwrap(); + let resp = self.body.take().unwrap(); + match resp { + BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), + BodyResponse::PendingValidation(resp) => { + // ensure the block is valid, else retry + if let Err(err) = ensure_valid_body_response(&header, resp.data()) { + debug!(target: "downloaders", ?err, hash=?header.hash, "Received wrong body"); + self.client.report_bad_message(resp.peer_id()); + self.header = Some(header); + self.request.body = Some(self.client.get_block_body(self.hash)); + return None + } + Some(SealedBlock::new(header, resp.into_data())) + } + } + } - Some(SealedBlock::new(header, body)) + fn on_block_response(&mut self, resp: WithPeerId) { + if let Some(ref header) = self.header { + if let Err(err) = ensure_valid_body_response(header, resp.data()) { + debug!(target: "downloaders", ?err, hash=?header.hash, "Received wrong body"); + self.client.report_bad_message(resp.peer_id()); + return + } + self.body = Some(BodyResponse::Validated(resp.into_data())); + return + } + self.body = Some(BodyResponse::PendingValidation(resp)); } } @@ -128,7 +157,9 @@ where ResponseResult::Body(res) => { match res { Ok(maybe_body) => { - this.body = maybe_body.into_data(); + if let Some(body) = maybe_body.transpose() { + this.on_block_response(body); + } } Err(err) => { debug!(target: "downloaders", %err, ?this.hash, "Body download failed"); @@ -197,6 +228,60 @@ enum ResponseResult { Body(PeerRequestResult>), } +/// The response of a body request. +#[derive(Debug)] +enum BodyResponse { + /// Already validated against transaction root of header + Validated(BlockBody), + /// Still needs to be validated against header + PendingValidation(WithPeerId), +} + +/// Ensures the block response data matches the header. +/// +/// This ensures the body response items match the header's hashes: +/// - ommer hash +/// - transaction root +/// - withdrawals root +fn ensure_valid_body_response( + header: &SealedHeader, + block: &BlockBody, +) -> Result<(), ConsensusError> { + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.ommers); + if header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff { + got: ommers_hash, + expected: header.ommers_hash, + }) + } + + let transaction_root = reth_primitives::proofs::calculate_transaction_root(&block.transactions); + if header.transactions_root != transaction_root { + return Err(ConsensusError::BodyTransactionRootDiff { + got: transaction_root, + expected: header.transactions_root, + }) + } + + let withdrawals = block.withdrawals.as_deref().unwrap_or(&[]); + if let Some(header_withdrawals_root) = header.withdrawals_root { + let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff { + got: withdrawals_root, + expected: header_withdrawals_root, + }) + } + return Ok(()) + } + + if !withdrawals.is_empty() { + return Err(ConsensusError::WithdrawalsRootUnexpected) + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/src/peer.rs b/crates/primitives/src/peer.rs index 6a5f89806976..531d16a2d3b0 100644 --- a/crates/primitives/src/peer.rs +++ b/crates/primitives/src/peer.rs @@ -52,3 +52,10 @@ impl WithPeerId { WithPeerId(self.0, op(self.1)) } } + +impl WithPeerId> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithPeerId(self.0, v)) + } +} From 24f9147fee947a7930c9ee8ead17c8e6fcdd7f80 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 3 Jul 2023 20:39:04 +0200 Subject: [PATCH 049/722] feat(book): adding QLC and TLC NVMEs comparison (#3562) --- book/installation/installation.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index 0e0d2feb131a..916f3fe634e7 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -21,11 +21,19 @@ The most important requirement is by far the disk, whereas CPU and RAM requireme | CPU | Higher clock speed over core count | Higher clock speeds over core count | | Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | +It is then crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement. + +QLC (Quad-Level Cell) NVMe drives utilize four bits of data per cell, allowing for higher storage density and lower manufacturing costs. However, this increased density comes at the expense of performance. QLC drives have slower read and write speeds compared to TLC drives. They also have a lower endurance, meaning they may have a shorter lifespan and be less suitable for heavy workloads or constant data rewriting. + +TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data per cell. While they have a slightly lower storage density compared to QLC drives, TLC drives offer faster performance. They typically have higher read and write speeds, making them more suitable for demanding tasks such as data-intensive applications, gaming, and multimedia editing. TLC drives also tend to have a higher endurance, making them more durable and longer-lasting. + +Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at the following link: https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038 + ### Disk There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode: -* Archive Node: At least 2TB is required to store +* Archive Node: At least 2TB is required to store * Full Node: TBD NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. @@ -40,7 +48,7 @@ As a result, the number of cores matters less, but in general higher clock speed ### Memory -It is recommended to use at least 8GB of RAM. +It is recommended to use at least 8GB of RAM. Most of Reth's components tend to consume a low amount of memory, unless you are under heavy RPC load, so this should matter less than the other requirements. @@ -48,9 +56,9 @@ Higher memory is generally better as it allows for better caching, resulting in ### Bandwidth -A stable and dependable internet connection is crucial for both syncing a node from genesis and for keeping up with the chain's tip. +A stable and dependable internet connection is crucial for both syncing a node from genesis and for keeping up with the chain's tip. -Note that due to Reth's staged sync, you only need an internet connection for the Headers and Bodies stages. This means that the first 1-3 hours (depending on your internet connection) will be online, downloading all necessary data, and the rest will be done offline and does not require an internet connection. +Note that due to Reth's staged sync, you only need an internet connection for the Headers and Bodies stages. This means that the first 1-3 hours (depending on your internet connection) will be online, downloading all necessary data, and the rest will be done offline and does not require an internet connection. Once you're synced to the tip you will need a reliable connection, especially if you're operating a validator. A 24Mbps connection is recommended, but you can probably get away with less. Make sure your ISP does not cap your bandwidth. From b4801f9e2cf6e1b00922ae262c2126b4ba673eca Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 3 Jul 2023 20:40:34 +0200 Subject: [PATCH 050/722] chore: add stale workflow (#3556) --- .github/workflows/stale.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..91d99476b889 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,24 @@ +name: stale issues +on: + workflow_dispatch: {} + schedule: + - cron: "30 1 * * *" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v8 + with: + days-before-stale: 14 + days-before-close: 7 + stale-issue-label: "S-stale" + exempt-issue-labels: "M-prevent-stale" + stale-issue-message: "This issue is stale because it has been open for 14 days with no activity." + close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale." + exempt-all-milestones: true + exempt-all-assignees: true + repo-token: ${{ secrets.GITHUB_TOKEN }} From a7431465fa9c04c25a1cedd9553f375eb3bfa7a8 Mon Sep 17 00:00:00 2001 From: evalir Date: Tue, 4 Jul 2023 06:15:52 -0400 Subject: [PATCH 051/722] chore(`crates`): deprecate `staged-sync` (#3564) --- Cargo.lock | 40 ------- Cargo.toml | 1 - bin/reth/Cargo.toml | 1 - bin/reth/src/chain/import.rs | 2 +- bin/reth/src/chain/init.rs | 2 +- bin/reth/src/debug_cmd/execution.rs | 2 +- .../src/utils => bin/reth/src}/init.rs | 1 + bin/reth/src/lib.rs | 1 + bin/reth/src/node/mod.rs | 2 +- bin/reth/src/stage/drop.rs | 2 +- codecov.yml | 1 - crates/net/network/Cargo.toml | 2 +- .../network/tests/it/clique}/clique.rs | 0 .../tests/it/clique}/clique_middleware.rs | 10 -- .../network/tests/it/clique}/mod.rs | 2 - .../sync.rs => net/network/tests/it/geth.rs} | 2 +- crates/net/network/tests/it/main.rs | 2 + crates/staged-sync/Cargo.toml | 101 ------------------ crates/staged-sync/src/lib.rs | 24 ----- crates/staged-sync/src/utils/mod.rs | 4 - docs/repo/layout.md | 1 - 21 files changed, 11 insertions(+), 192 deletions(-) rename {crates/staged-sync/src/utils => bin/reth/src}/init.rs (99%) rename crates/{staged-sync/src/test_utils => net/network/tests/it/clique}/clique.rs (100%) rename crates/{staged-sync/src/test_utils => net/network/tests/it/clique}/clique_middleware.rs (91%) rename crates/{staged-sync/src/test_utils => net/network/tests/it/clique}/mod.rs (74%) rename crates/{staged-sync/tests/sync.rs => net/network/tests/it/geth.rs} (98%) delete mode 100644 crates/staged-sync/Cargo.toml delete mode 100644 crates/staged-sync/src/lib.rs delete mode 100644 crates/staged-sync/src/utils/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 1fa48941cc26..50a480581a06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4996,7 +4996,6 @@ dependencies = [ "reth-rpc", "reth-rpc-builder", "reth-rpc-engine-api", - "reth-staged-sync", "reth-stages", "reth-tasks", "reth-tracing", @@ -5791,45 +5790,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "reth-staged-sync" -version = "0.1.0-alpha.1" -dependencies = [ - "assert_matches", - "async-trait", - "confy", - "enr", - "ethers-core", - "ethers-middleware", - "ethers-providers", - "ethers-signers", - "eyre", - "futures", - "hex", - "rand 0.8.5", - "reth-db", - "reth-discv4", - "reth-downloaders", - "reth-interfaces", - "reth-net-nat", - "reth-network", - "reth-network-api", - "reth-primitives", - "reth-provider", - "reth-staged-sync", - "reth-stages", - "reth-tracing", - "secp256k1", - "serde", - "serde_json", - "shellexpand", - "tempfile", - "thiserror", - "tokio", - "tracing", - "walkdir", -] - [[package]] name = "reth-stages" version = "0.1.0-alpha.1" diff --git a/Cargo.toml b/Cargo.toml index 52268e8ec0f5..11ba1a26fbb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,6 @@ members = [ "crates/rpc/rpc-engine-api", "crates/rpc/rpc-types", "crates/rpc/rpc-testing-util", - "crates/staged-sync", "crates/stages", "crates/storage/codecs", "crates/storage/db", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 0c1d7f881a74..7009807f75b0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -16,7 +16,6 @@ reth-db = { path = "../../crates/storage/db", features = ["mdbx", "test-utils"] reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { path = "../../crates/revm" } reth-revm-inspectors = { path = "../../crates/revm/revm-inspectors" } -reth-staged-sync = { path = "../../crates/staged-sync" } reth-stages = { path = "../../crates/stages" } reth-interfaces = { workspace = true, features = ["test-utils", "clap"] } reth-transaction-pool = { workspace = true } diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index e18e7716a65a..38bed8996f81 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -1,5 +1,6 @@ use crate::{ dirs::{DataDirPath, MaybePlatformPath}, + init::init_genesis, node::events::{handle_events, NodeEvent}, version::SHORT_VERSION, }; @@ -18,7 +19,6 @@ use reth_downloaders::{ }; use reth_interfaces::consensus::Consensus; use reth_primitives::{stage::StageId, ChainSpec, H256}; -use reth_staged_sync::utils::init::init_genesis; use reth_stages::{ prelude::*, stages::{ diff --git a/bin/reth/src/chain/init.rs b/bin/reth/src/chain/init.rs index 3c2c73015a02..e25cbf8a2cfb 100644 --- a/bin/reth/src/chain/init.rs +++ b/bin/reth/src/chain/init.rs @@ -1,11 +1,11 @@ use crate::{ args::{utils::genesis_value_parser, DatabaseArgs}, dirs::{DataDirPath, MaybePlatformPath}, + init::init_genesis, }; use clap::Parser; use reth_db::init_db; use reth_primitives::ChainSpec; -use reth_staged_sync::utils::init::init_genesis; use std::sync::Arc; use tracing::info; diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index fda560715130..7a6f61334626 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -2,6 +2,7 @@ use crate::{ args::{get_secret_key, utils::genesis_value_parser, DatabaseArgs, NetworkArgs}, dirs::{DataDirPath, MaybePlatformPath}, + init::init_genesis, node::events, runner::CliContext, utils::get_single_header, @@ -24,7 +25,6 @@ use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256}; use reth_provider::{BlockExecutionWriter, ProviderFactory, StageCheckpointReader}; -use reth_staged_sync::utils::init::init_genesis; use reth_stages::{ sets::DefaultStages, stages::{ diff --git a/crates/staged-sync/src/utils/init.rs b/bin/reth/src/init.rs similarity index 99% rename from crates/staged-sync/src/utils/init.rs rename to bin/reth/src/init.rs index e3c1d70b2acd..30373b3100be 100644 --- a/crates/staged-sync/src/utils/init.rs +++ b/bin/reth/src/init.rs @@ -1,3 +1,4 @@ +//! Reth genesis initialization utility functions. use reth_db::{ cursor::DbCursorRO, database::{Database, DatabaseGAT}, diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index d42274e28657..46d2c76a16e0 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -30,6 +30,7 @@ pub mod config; pub mod db; pub mod debug_cmd; pub mod dirs; +pub mod init; pub mod node; pub mod p2p; pub mod prometheus_exporter; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 18e13ce2cfb0..3362bfe3e86d 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -4,6 +4,7 @@ use crate::{ args::{get_secret_key, DebugArgs, NetworkArgs, RpcServerArgs}, dirs::DataDirPath, + init::init_genesis, prometheus_exporter, runner::CliContext, utils::get_single_header, @@ -46,7 +47,6 @@ use reth_provider::{ use reth_revm::Factory; use reth_revm_inspectors::stack::Hook; use reth_rpc_engine_api::EngineApi; -use reth_staged_sync::utils::init::init_genesis; use reth_stages::{ prelude::*, stages::{ diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index 2963c643867f..771dff1d2049 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -2,12 +2,12 @@ use crate::{ args::{utils::genesis_value_parser, DatabaseArgs, StageEnum}, dirs::{DataDirPath, MaybePlatformPath}, + init::{insert_genesis_header, insert_genesis_state}, utils::DbTool, }; use clap::Parser; use reth_db::{database::Database, open_db, tables, transaction::DbTxMut, DatabaseEnv}; use reth_primitives::{fs, stage::StageId, ChainSpec}; -use reth_staged_sync::utils::init::{insert_genesis_header, insert_genesis_state}; use std::sync::Arc; use tracing::info; diff --git a/codecov.yml b/codecov.yml index 6392d83a8d60..5bd75590b504 100644 --- a/codecov.yml +++ b/codecov.yml @@ -27,7 +27,6 @@ component_management: name: pipeline paths: - crates/stages/** - - crates/staged-sync/** - component_id: storage name: storage (db) paths: diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 8a62fec063a0..3e5195c1cc29 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -77,7 +77,7 @@ reth-tracing = { path = "../../tracing" } reth-transaction-pool = { workspace = true, features = ["test-utils"] } ethers-core = { workspace = true, default-features = false } -ethers-providers = { workspace = true, default-features = false } +ethers-providers = { workspace = true, default-features = false, features = ["ws"] } ethers-signers = { workspace = true, default-features = false } ethers-middleware = { workspace = true, default-features = false } diff --git a/crates/staged-sync/src/test_utils/clique.rs b/crates/net/network/tests/it/clique/clique.rs similarity index 100% rename from crates/staged-sync/src/test_utils/clique.rs rename to crates/net/network/tests/it/clique/clique.rs diff --git a/crates/staged-sync/src/test_utils/clique_middleware.rs b/crates/net/network/tests/it/clique/clique_middleware.rs similarity index 91% rename from crates/staged-sync/src/test_utils/clique_middleware.rs rename to crates/net/network/tests/it/clique/clique_middleware.rs index 315788c2e26c..ac06c7b9110e 100644 --- a/crates/staged-sync/src/test_utils/clique_middleware.rs +++ b/crates/net/network/tests/it/clique/clique_middleware.rs @@ -26,10 +26,6 @@ pub enum CliqueError { #[error("no genesis block returned from the provider")] NoGenesis, - /// No tip block returned from the provider - #[error("no tip block returned from the provider")] - NoTip, - /// Account was not successfully unlocked on the provider #[error("account was not successfully unlocked on the provider")] AccountNotUnlocked, @@ -94,12 +90,6 @@ pub trait CliqueMiddleware: Send + Sync + Middleware { Ok(()) } - /// Returns the chain tip of the [`Geth`](ethers_core::utils::Geth) instance by calling - /// geth's `eth_getBlock`. - async fn remote_tip_block(&self) -> Result, CliqueMiddlewareError> { - self.get_block(BlockNumber::Latest).await?.ok_or(CliqueError::NoTip) - } - /// Returns the genesis block of the [`Geth`](ethers_core::utils::Geth) instance by calling /// geth's `eth_getBlock`. async fn remote_genesis_block(&self) -> Result, CliqueMiddlewareError> { diff --git a/crates/staged-sync/src/test_utils/mod.rs b/crates/net/network/tests/it/clique/mod.rs similarity index 74% rename from crates/staged-sync/src/test_utils/mod.rs rename to crates/net/network/tests/it/clique/mod.rs index ff1105afade9..fd635c3cab2c 100644 --- a/crates/staged-sync/src/test_utils/mod.rs +++ b/crates/net/network/tests/it/clique/mod.rs @@ -1,5 +1,3 @@ -//! Common helpers for staged sync integration testing. - pub mod clique; pub mod clique_middleware; diff --git a/crates/staged-sync/tests/sync.rs b/crates/net/network/tests/it/geth.rs similarity index 98% rename from crates/staged-sync/tests/sync.rs rename to crates/net/network/tests/it/geth.rs index 66ef0f763bbb..3c9f8d41ff2c 100644 --- a/crates/staged-sync/tests/sync.rs +++ b/crates/net/network/tests/it/geth.rs @@ -1,3 +1,4 @@ +use crate::clique::{CliqueGethInstance, CliqueMiddleware}; use ethers_core::{ types::{transaction::eip2718::TypedTransaction, Eip1559TransactionRequest, H160, U64}, utils::Geth, @@ -10,7 +11,6 @@ use reth_network::{ use reth_network_api::Peers; use reth_primitives::{ChainSpec, Genesis, PeerId, SealedHeader}; use reth_provider::test_utils::NoopProvider; -use reth_staged_sync::test_utils::{CliqueGethInstance, CliqueMiddleware}; use secp256k1::SecretKey; use std::{net::SocketAddr, sync::Arc}; diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index ccba08a3a24a..106bf61301db 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -1,4 +1,6 @@ +mod clique; mod connect; +mod geth; mod requests; mod session; mod startup; diff --git a/crates/staged-sync/Cargo.toml b/crates/staged-sync/Cargo.toml deleted file mode 100644 index a88472583a06..000000000000 --- a/crates/staged-sync/Cargo.toml +++ /dev/null @@ -1,101 +0,0 @@ -[package] -name = "reth-staged-sync" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Puts together all the Reth stages in a unified abstraction" - -[dependencies] -# reth -reth-db = { path = "../../crates/storage/db", features = ["mdbx", "test-utils"] } -reth-discv4 = { path = "../../crates/net/discv4" } -reth-network-api = { workspace = true } -reth-network = { path = "../../crates/net/network", features = ["serde"] } -reth-downloaders = { path = "../../crates/net/downloaders" } -reth-primitives = { workspace = true } -reth-provider = { workspace = true, features = ["test-utils"] } -reth-net-nat = { path = "../../crates/net/nat" } -reth-stages = { path = "../stages" } -reth-interfaces = { workspace = true } - -# io -serde = "1.0" -serde_json = { workspace = true } - -# misc -walkdir = "2.3.2" -eyre = "0.6.8" -shellexpand = "3.0.0" -tracing = { workspace = true } - -# crypto -rand = { workspace = true, optional = true } -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } - -# errors -thiserror = { workspace = true } - -# enr -enr = { version = "0.8.1", features = ["serde", "rust-secp256k1"], optional = true } - -# ethers -ethers-core = { workspace = true, default-features = false, optional = true } -ethers-providers = { workspace = true, features = [ - "ws", -], default-features = false, optional = true } -ethers-middleware = { workspace = true, default-features = false, optional = true } -ethers-signers = { workspace = true, default-features = false, optional = true } - -# async / futures -async-trait = { workspace = true, optional = true } -tokio = { workspace = true, features = [ - "io-util", - "net", - "macros", - "rt-multi-thread", - "time", -], optional = true } - -# misc -hex = { version = "0.4", optional = true } - -[dev-dependencies] -# HACK(onbjerg): Workaround to enable test-utils when running tests -# Source: https://github.com/rust-lang/cargo/issues/2911#issuecomment-749580481 -reth-staged-sync = { path = ".", features = ["test-utils"] } - -# reth crates -reth-tracing = { path = "../tracing" } -reth-downloaders = { path = "../net/downloaders" } - -# async/futures -futures = { workspace = true } -tokio = { workspace = true, features = ["io-util", "net", "macros", "rt-multi-thread", "time"] } - -# crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } - -confy = "0.5" - -tempfile = "3.4" -assert_matches = "1.5.0" - -[features] -test-utils = [ - "reth-network/test-utils", - "reth-network/test-utils", - "reth-provider/test-utils", - "dep:enr", - "dep:ethers-core", - "dep:hex", - "dep:rand", - "dep:tokio", - "dep:ethers-signers", - "dep:ethers-providers", - "dep:ethers-middleware", - "dep:async-trait", -] -geth-tests = [] diff --git a/crates/staged-sync/src/lib.rs b/crates/staged-sync/src/lib.rs deleted file mode 100644 index 7654d0bd393a..000000000000 --- a/crates/staged-sync/src/lib.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/" -)] -#![warn(missing_docs, unreachable_pub)] -#![deny(unused_must_use, rust_2018_idioms)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] - -//! Puts together all the Reth stages in a unified abstraction. -//! -//! ## Feature Flags -//! -//! - `test-utils`: Various utilities helpful for writing tests -//! - `geth-tests`: Runs tests that require Geth to be installed locally. -pub mod utils; - -#[cfg(any(test, feature = "test-utils"))] -/// Common helpers for integration testing. -pub mod test_utils; diff --git a/crates/staged-sync/src/utils/mod.rs b/crates/staged-sync/src/utils/mod.rs deleted file mode 100644 index 8c15ca28e8c3..000000000000 --- a/crates/staged-sync/src/utils/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Utility functions. - -/// Utilities for initializing parts of the chain -pub mod init; diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 01ec7bd0ff77..8418518fb4d2 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -86,7 +86,6 @@ These crates implement the main syncing drivers of reth. - [`blockchain-tree`](../../crates/blockchain-tree): A tree-like structure for handling multiple chains of unfinalized blocks. This is the main component during live sync (i.e. syncing at the tip) - [`stages`](../../crates/stages): A pipelined sync, including implementation of various stages. This is used during initial sync and is faster than the tree-like structure for longer sync ranges. -- [`staged-sync`](../../crates/staged-sync): A catch-all for various things currently, to be removed ### RPC From 698059d8b5106d73a3dc2cfb91e1f18bb10472f1 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 4 Jul 2023 14:23:46 +0300 Subject: [PATCH 052/722] feat(cli): txpool args (#3575) --- bin/reth/src/args/mod.rs | 11 +++-- bin/reth/src/args/txpool_args.rs | 57 ++++++++++++++++++++++ bin/reth/src/node/mod.rs | 7 ++- crates/transaction-pool/src/config.rs | 15 ++++-- crates/transaction-pool/src/lib.rs | 5 +- crates/transaction-pool/src/pool/txpool.rs | 4 +- 6 files changed, 88 insertions(+), 11 deletions(-) create mode 100644 bin/reth/src/args/txpool_args.rs diff --git a/bin/reth/src/args/mod.rs b/bin/reth/src/args/mod.rs index a732c5c7cc7c..05d691e8a371 100644 --- a/bin/reth/src/args/mod.rs +++ b/bin/reth/src/args/mod.rs @@ -19,7 +19,7 @@ pub use database_args::DatabaseArgs; mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; -/// MinerArgs struct for configuring the miner +/// PayloadBuilderArgs struct for configuring the payload builder mod payload_builder_args; pub use payload_builder_args::PayloadBuilderArgs; @@ -27,7 +27,12 @@ pub use payload_builder_args::PayloadBuilderArgs; mod stage_args; pub use stage_args::StageEnum; +/// Gas price oracle related arguments mod gas_price_oracle_args; -pub mod utils; - pub use gas_price_oracle_args::GasPriceOracleArgs; + +/// TxPoolArgs for congiguring the transaction pool +mod txpool_args; +pub use txpool_args::TxPoolArgs; + +pub mod utils; diff --git a/bin/reth/src/args/txpool_args.rs b/bin/reth/src/args/txpool_args.rs new file mode 100644 index 000000000000..d1bd2326b692 --- /dev/null +++ b/bin/reth/src/args/txpool_args.rs @@ -0,0 +1,57 @@ +//! Transaction pool arguments + +use clap::Args; +use reth_transaction_pool::{ + PoolConfig, SubPoolLimit, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, +}; + +/// Parameters for debugging purposes +#[derive(Debug, Args, PartialEq, Default)] +pub struct TxPoolArgs { + /// Max number of transaction in the pending sub-pool. + #[arg(long = "txpool.pending_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + pub pending_max_count: usize, + /// Max size of the pending sub-pool in megabytes. + #[arg(long = "txpool.pending_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + pub pending_max_size: usize, + + /// Max number of transaction in the basefee sub-pool + #[arg(long = "txpool.basefee_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + pub basefee_max_count: usize, + /// Max size of the basefee sub-pool in megabytes. + #[arg(long = "txpool.basefee_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + pub basefee_max_size: usize, + + /// Max number of transaction in the queued sub-pool + #[arg(long = "txpool.queued_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + pub queued_max_count: usize, + /// Max size of the queued sub-pool in megabytes. + #[arg(long = "txpool.queued_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + pub queued_max_size: usize, + + /// Max number of executable transaction slots guaranteed per account + #[arg(long = "txpool.max_account_slots", help_heading = "TxPool", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + pub max_account_slots: usize, +} + +impl TxPoolArgs { + /// Returns transaction pool configuration. + pub fn pool_config(&self) -> PoolConfig { + PoolConfig { + pending_limit: SubPoolLimit { + max_txs: self.pending_max_count, + max_size: self.pending_max_size * 1024 * 1024, + }, + basefee_limit: SubPoolLimit { + max_txs: self.basefee_max_count, + max_size: self.basefee_max_size * 1024 * 1024, + }, + queued_limit: SubPoolLimit { + max_txs: self.queued_max_count, + max_size: self.queued_max_size * 1024 * 1024, + }, + max_account_slots: self.max_account_slots, + } + } +} diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3362bfe3e86d..a91687812610 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -2,7 +2,7 @@ //! //! Starts the client use crate::{ - args::{get_secret_key, DebugArgs, NetworkArgs, RpcServerArgs}, + args::{get_secret_key, DebugArgs, NetworkArgs, RpcServerArgs, TxPoolArgs}, dirs::DataDirPath, init::init_genesis, prometheus_exporter, @@ -132,6 +132,9 @@ pub struct Command { #[clap(flatten)] rpc: RpcServerArgs, + #[clap(flatten)] + txpool: TxPoolArgs, + #[clap(flatten)] builder: PayloadBuilderArgs, @@ -223,7 +226,7 @@ impl Command { ctx.task_executor.clone(), 1, ), - Default::default(), + self.txpool.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 285b2032c4f2..faf0c0156b41 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -1,5 +1,11 @@ /// Guarantees max transactions for one sender, compatible with geth/erigon -pub(crate) const MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16; +pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16; + +/// The default maximum allowed number of transactions in the given subpool. +pub const TXPOOL_SUBPOOL_MAX_TXS_DEFAULT: usize = 10_000; + +/// The default maximum allowed size of the given subpool. +pub const TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT: usize = 20; /// Configuration options for the Transaction pool. #[derive(Debug, Clone)] @@ -20,7 +26,7 @@ impl Default for PoolConfig { pending_limit: Default::default(), basefee_limit: Default::default(), queued_limit: Default::default(), - max_account_slots: MAX_ACCOUNT_SLOTS_PER_SENDER, + max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, } } } @@ -45,6 +51,9 @@ impl SubPoolLimit { impl Default for SubPoolLimit { fn default() -> Self { // either 10k transactions or 20MB - Self { max_txs: 10_000, max_size: 20 * 1024 * 1024 } + Self { + max_txs: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT * 1024 * 1024, + } } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index fbfe24433243..797d448fac2e 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -99,7 +99,10 @@ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; pub use crate::{ - config::PoolConfig, + config::{ + PoolConfig, SubPoolLimit, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + }, error::PoolResult, ordering::{GasCostOrdering, TransactionOrdering}, pool::TransactionEvents, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index aba3e1e698d4..4368a240f26c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1,6 +1,6 @@ //! The internal transaction pool implementation. use crate::{ - config::MAX_ACCOUNT_SLOTS_PER_SENDER, + config::TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, error::{InvalidPoolTransactionError, PoolError}, identifier::{SenderId, TransactionId}, metrics::TxPoolMetrics, @@ -1191,7 +1191,7 @@ impl AllTransactions { impl Default for AllTransactions { fn default() -> Self { Self { - max_account_slots: MAX_ACCOUNT_SLOTS_PER_SENDER, + max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, by_hash: Default::default(), From 9821311585f62ed868b1df2e9516f94d51527f26 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 4 Jul 2023 20:13:51 +0800 Subject: [PATCH 053/722] docker: don't ignore examples (#3570) Signed-off-by: jsvisa --- .dockerignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 0fbda481bfe0..95c7af355cfb 100644 --- a/.dockerignore +++ b/.dockerignore @@ -16,4 +16,7 @@ !/dist # include licenses -!LICENSE-* \ No newline at end of file +!LICENSE-* + +# include example files +!/examples From c236521cff86077c43aa3e01821fbf39df01fa53 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 4 Jul 2023 18:30:34 +0300 Subject: [PATCH 054/722] docs(trie): hashed post state & cursors (#3572) --- crates/storage/provider/src/post_state/mod.rs | 4 +- crates/storage/provider/src/traits/state.rs | 1 + crates/trie/src/hashed_cursor/post_state.rs | 65 +++++++++++++++++-- 3 files changed, 63 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index d16d2db07a27..ba0ba30f3d22 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -215,8 +215,8 @@ impl PostState { /// Calculate the state root for this [PostState]. /// Internally, function calls [Self::hash_state_slow] to obtain the [HashedPostState]. - /// Afterwards, it retrieves the prefixsets from the [HashedPostState] and uses them to - /// calculate the incremental state root. + /// Afterwards, it retrieves the [PrefixSets](reth_trie::prefix_set::PrefixSet) of changed keys + /// from the [HashedPostState] and uses them to calculate the incremental state root. /// /// # Example /// diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index e09b135223d8..139f2b7261b0 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -220,5 +220,6 @@ pub trait PostStateDataProvider: Send + Sync { #[auto_impl[Box,&, Arc]] pub trait StateRootProvider: Send + Sync { /// Returns the state root of the PostState on top of the current state. + /// See [PostState::state_root_slow] for more info. fn state_root(&self, post_state: PostState) -> Result; } diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index ed04570df87c..8bd4527624d2 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -27,7 +27,9 @@ pub struct HashedPostState { } impl HashedPostState { - /// Construct prefix sets from hashed post state. + /// Construct (PrefixSets)[PrefixSet] from hashed post state. + /// The prefix sets contain the hashed account and storage keys that have been changed in the + /// post state. pub fn construct_prefix_sets(&self) -> (PrefixSet, HashMap) { // Initialize prefix sets. let mut account_prefix_set = PrefixSet::default(); @@ -89,11 +91,15 @@ where } /// The cursor to iterate over post state hashed accounts and corresponding database entries. -/// It will always give precedence to the data from the post state. +/// It will always give precedence to the data from the hashed post state. #[derive(Debug, Clone)] pub struct HashedPostStateAccountCursor<'b, C> { + /// The database cursor. cursor: C, + /// The reference to the in-memory [HashedPostState]. post_state: &'b HashedPostState, + /// The last hashed account key that was returned by the cursor. + /// De facto, this is a current cursor position. last_account: Option, } @@ -101,10 +107,19 @@ impl<'b, 'tx, C> HashedPostStateAccountCursor<'b, C> where C: DbCursorRO<'tx, tables::HashedAccount>, { + /// Returns `true` if the account has been destroyed. + /// This check is used for evicting account keys from the state trie. + /// + /// This function only checks the post state, not the database, because the latter does not + /// store destroyed accounts. fn is_account_cleared(&self, account: &H256) -> bool { matches!(self.post_state.accounts.get(account), Some(None)) } + /// Return the account with the lowest hashed account key. + /// + /// Given the next post state and database entries, return the smallest of the two. + /// If the account keys are the same, the post state entry is given precedence. fn next_account( &self, post_state_item: Option<(H256, Account)>, @@ -137,6 +152,14 @@ impl<'b, 'tx, C> HashedAccountCursor for HashedPostStateAccountCursor<'b, C> where C: DbCursorRO<'tx, tables::HashedAccount>, { + /// Seek the next entry for a given hashed account key. + /// + /// If the post state contains the exact match for the key, return it. + /// Otherwise, retrieve the next entries that are greater than or equal to the key from the + /// database and the post state. The two entries are compared and the lowest is returned. + /// + /// The returned account key is memoized and the cursor remains positioned at that key until + /// [HashedAccountCursor::seek] or [HashedAccountCursor::next] are called. fn seek(&mut self, key: H256) -> Result, reth_db::DatabaseError> { self.last_account = None; @@ -171,6 +194,13 @@ where Ok(result) } + /// Retrieve the next entry from the cursor. + /// + /// If the cursor is positioned at the entry, return the entry with next greater key. + /// Returns [None] if the previous memoized or the next greater entries are missing. + /// + /// NOTE: This function will not return any entry unless [HashedAccountCursor::seek] has been + /// called. fn next(&mut self) -> Result, reth_db::DatabaseError> { let last_account = match self.last_account.as_ref() { Some(account) => account, @@ -203,13 +233,20 @@ where /// It will always give precedence to the data from the post state. #[derive(Debug, Clone)] pub struct HashedPostStateStorageCursor<'b, C> { - post_state: &'b HashedPostState, + /// The database cursor. cursor: C, + /// The reference to the post state. + post_state: &'b HashedPostState, + /// The current hashed account key. account: Option, + /// The last slot that has been returned by the cursor. + /// De facto, this is the cursor's position for the given account key. last_slot: Option, } impl<'b, C> HashedPostStateStorageCursor<'b, C> { + /// Returns `true` if the storage for the given + /// The database is not checked since it already has no wiped storage entries. fn is_db_storage_wiped(&self, account: &H256) -> bool { match self.post_state.storages.get(account) { Some(storage) => storage.wiped, @@ -218,7 +255,7 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { } /// Check if the slot was zeroed out in the post state. - /// The database is not checked since we don't insert zero valued slots. + /// The database is not checked since it already has no zero-valued slots. fn is_touched_slot_value_zero(&self, account: &H256, slot: &H256) -> bool { self.post_state .storages @@ -228,6 +265,10 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { .unwrap_or_default() } + /// Return the storage entry with the lowest hashed storage key (hashed slot). + /// + /// Given the next post state and database entries, return the smallest of the two. + /// If the storage keys are the same, the post state entry is given precedence. fn next_slot( &self, post_state_item: Option<(&H256, &U256)>, @@ -260,16 +301,24 @@ impl<'b, 'tx, C> HashedStorageCursor for HashedPostStateStorageCursor<'b, C> where C: DbCursorRO<'tx, tables::HashedStorage> + DbDupCursorRO<'tx, tables::HashedStorage>, { + /// Returns `true` if the account has no storage entries. + /// + /// This function should be called before attempting to call [HashedStorageCursor::seek] or + /// [HashedStorageCursor::next]. fn is_storage_empty(&mut self, key: H256) -> Result { let is_empty = match self.post_state.storages.get(&key) { Some(storage) => { - storage.wiped && storage.storage.iter().all(|(_, value)| *value == U256::ZERO) + // If the storage has been wiped at any point + storage.wiped && + // and the current storage does not contain any non-zero values + storage.storage.iter().all(|(_, value)| *value == U256::ZERO) } None => self.cursor.seek_exact(key)?.is_none(), }; Ok(is_empty) } + /// Seek the next account storage entry for a given hashed key pair. fn seek( &mut self, account: H256, @@ -321,6 +370,12 @@ where Ok(result) } + /// Return the next account storage entry for the current accont key. + /// + /// # Panics + /// + /// If the account key is not set. [HashedStorageCursor::seek] must be called first in order to + /// position the cursor. fn next(&mut self) -> Result, reth_db::DatabaseError> { let account = self.account.expect("`seek` must be called first"); From 7da36e042125397af89b9b477f9292dc676e69f8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 4 Jul 2023 22:11:11 +0300 Subject: [PATCH 055/722] feat(txpool): all transactions event listener (#3565) --- crates/transaction-pool/src/lib.rs | 10 +- crates/transaction-pool/src/noop.rs | 14 +- crates/transaction-pool/src/pool/events.rs | 39 +++++ crates/transaction-pool/src/pool/listener.rs | 160 +++++++++++-------- crates/transaction-pool/src/pool/mod.rs | 17 +- crates/transaction-pool/src/traits.rs | 6 +- 6 files changed, 162 insertions(+), 84 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 797d448fac2e..917c2aecad36 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -105,7 +105,7 @@ pub use crate::{ }, error::PoolResult, ordering::{GasCostOrdering, TransactionOrdering}, - pool::TransactionEvents, + pool::{AllTransactionsEvents, PoolTransactionEvent, TransactionEvent, TransactionEvents}, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, NewTransactionEvent, PoolSize, PoolTransaction, PooledTransaction, PropagateKind, @@ -288,12 +288,16 @@ where self.pool.add_transaction_event_listener(tx_hash) } + fn all_transactions_event_listener(&self) -> AllTransactionsEvents { + self.pool.add_all_transactions_event_listener() + } + fn pending_transactions_listener(&self) -> Receiver { self.pool.add_pending_listener() } - fn transactions_listener(&self) -> Receiver> { - self.pool.add_transaction_listener() + fn new_transactions_listener(&self) -> Receiver> { + self.pool.add_new_transaction_listener() } fn pooled_transaction_hashes(&self) -> Vec { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index e3b7a3e81a9c..50be4a2bcf6d 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,10 +4,10 @@ //! to be generic over it. use crate::{ - error::PoolError, AllPoolTransactions, BestTransactions, BlockInfo, NewTransactionEvent, - PoolResult, PoolSize, PoolTransaction, PooledTransaction, PropagatedTransactions, - TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, - TransactionValidator, ValidPoolTransaction, + error::PoolError, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, + NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use reth_primitives::{Address, TxHash}; use std::{marker::PhantomData, sync::Arc}; @@ -73,11 +73,15 @@ impl TransactionPool for NoopTransactionPool { None } + fn all_transactions_event_listener(&self) -> AllTransactionsEvents { + AllTransactionsEvents { events: mpsc::channel(1).1 } + } + fn pending_transactions_listener(&self) -> Receiver { mpsc::channel(1).1 } - fn transactions_listener(&self) -> Receiver> { + fn new_transactions_listener(&self) -> Receiver> { mpsc::channel(1).1 } diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 8026bdfefc83..62da13392e84 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -5,6 +5,32 @@ use std::sync::Arc; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +/// Wrapper around a transaction hash and the event that happened to it. +#[derive(Debug)] +pub struct PoolTransactionEvent(TxHash, TransactionEvent); + +impl PoolTransactionEvent { + /// Create a new transaction event. + pub fn new(hash: TxHash, event: TransactionEvent) -> Self { + Self(hash, event) + } + + /// The hash of the transaction this event is about. + pub fn hash(&self) -> TxHash { + self.0 + } + + /// The event that happened to the transaction. + pub fn event(&self) -> &TransactionEvent { + &self.1 + } + + /// Split the event into its components. + pub fn split(self) -> (TxHash, TransactionEvent) { + (self.0, self.1) + } +} + /// Various events that describe status changes of a transaction. #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -26,3 +52,16 @@ pub enum TransactionEvent { /// Transaction was propagated to peers. Propagated(Arc>), } + +impl TransactionEvent { + /// Returns `true` if the event is final and no more events are expected for this transaction + /// hash. + pub fn is_final(&self) -> bool { + matches!( + self, + TransactionEvent::Replaced(_) | + TransactionEvent::Mined(_) | + TransactionEvent::Discarded + ) + } +} diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index b2197e432822..f3ff53545397 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,20 +1,30 @@ //! Listeners for the transaction-pool -use crate::{pool::events::TransactionEvent, traits::PropagateKind}; +use crate::{ + pool::events::{PoolTransactionEvent, TransactionEvent}, + traits::PropagateKind, +}; use futures_util::Stream; use reth_primitives::{TxHash, H256}; use std::{ collections::{hash_map::Entry, HashMap}, + pin::Pin, sync::Arc, + task::{Context, Poll}, +}; +use tokio::sync::mpsc::{ + error::TrySendError, Receiver, Sender, UnboundedReceiver, UnboundedSender, }; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -/// A Stream that receives [TransactionEvent] for the transaction with the given hash. +/// The size of the event channel used to propagate transaction events. +const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; + +/// A Stream that receives [TransactionEvent] only for the transaction with the given hash. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct TransactionEvents { hash: TxHash, - events: UnboundedReceiver, + events: UnboundedReceiver, } impl TransactionEvents { @@ -25,7 +35,7 @@ impl TransactionEvents { } impl Stream for TransactionEvents { - type Item = TransactionEvent; + type Item = PoolTransactionEvent; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -35,133 +45,141 @@ impl Stream for TransactionEvents { } } -type EventBroadcast = UnboundedSender; +/// A Stream that receives [PoolTransactionEvent] for _all_ transaction. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct AllTransactionsEvents { + pub(crate) events: Receiver, +} + +impl Stream for AllTransactionsEvents { + type Item = PoolTransactionEvent; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().events.poll_recv(cx) + } +} /// A type that broadcasts [`TransactionEvent`] to installed listeners. /// /// This is essentially a multi-producer, multi-consumer channel where each event is broadcast to /// all active receivers. -#[derive(Debug, Default)] +#[derive(Default, Debug)] pub(crate) struct PoolEventBroadcast { - /// All listeners for certain transaction events. - broadcasters: HashMap, + /// All listeners for all transaction events. + all_events_broadcaster: AllPoolEventsBroadcaster, + /// All listeners for events for a certain transaction hash. + broadcasters_by_hash: HashMap, } impl PoolEventBroadcast { /// Calls the broadcast callback with the `PoolEventBroadcaster` that belongs to the hash. - fn broadcast_with(&mut self, hash: &TxHash, callback: F) - where - F: FnOnce(&mut PoolEventBroadcaster), - { - let is_done = if let Some(sink) = self.broadcasters.get_mut(hash) { - callback(sink); - sink.is_done() - } else { - false - }; + fn broadcast_event(&mut self, hash: &TxHash, event: TransactionEvent) { + // Broadcast to all listeners for the transaction hash. + if let Entry::Occupied(mut sink) = self.broadcasters_by_hash.entry(*hash) { + sink.get_mut().broadcast(*hash, event.clone()); - if is_done { - self.broadcasters.remove(hash); + if sink.get().is_empty() || event.is_final() { + sink.remove(); + } } + + // Broadcast to all listeners for all transactions. + self.all_events_broadcaster.broadcast(*hash, event); } /// Create a new subscription for the given transaction hash. pub(crate) fn subscribe(&mut self, tx_hash: TxHash) -> TransactionEvents { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - match self.broadcasters.entry(tx_hash) { + match self.broadcasters_by_hash.entry(tx_hash) { Entry::Occupied(mut entry) => { entry.get_mut().senders.push(tx); } Entry::Vacant(entry) => { - entry.insert(PoolEventBroadcaster { is_done: false, senders: vec![tx] }); + entry.insert(PoolEventBroadcaster { senders: vec![tx] }); } }; TransactionEvents { hash: tx_hash, events: rx } } + /// Create a new subscription for all transactions. + pub(crate) fn subscribe_all(&mut self) -> AllTransactionsEvents { + let (tx, rx) = tokio::sync::mpsc::channel(TX_POOL_EVENT_CHANNEL_SIZE); + self.all_events_broadcaster.senders.push(tx); + AllTransactionsEvents { events: rx } + } + /// Notify listeners about a transaction that was added to the pending queue. pub(crate) fn pending(&mut self, tx: &TxHash, replaced: Option<&TxHash>) { - self.broadcast_with(tx, |notifier| notifier.pending()); + self.broadcast_event(tx, TransactionEvent::Pending); if let Some(replaced) = replaced { // notify listeners that this transaction was replaced - self.broadcast_with(replaced, |notifier| notifier.replaced(*tx)); + self.broadcast_event(replaced, TransactionEvent::Replaced(*tx)); } } /// Notify listeners about a transaction that was added to the queued pool. pub(crate) fn queued(&mut self, tx: &TxHash) { - self.broadcast_with(tx, |notifier| notifier.queued()); + self.broadcast_event(tx, TransactionEvent::Queued); } /// Notify listeners about a transaction that was propagated. pub(crate) fn propagated(&mut self, tx: &TxHash, peers: Vec) { - self.broadcast_with(tx, |notifier| notifier.propagated(peers)); + self.broadcast_event(tx, TransactionEvent::Propagated(Arc::new(peers))); } /// Notify listeners about a transaction that was discarded. pub(crate) fn discarded(&mut self, tx: &TxHash) { - self.broadcast_with(tx, |notifier| notifier.discarded()); + self.broadcast_event(tx, TransactionEvent::Discarded); } /// Notify listeners that the transaction was mined pub(crate) fn mined(&mut self, tx: &TxHash, block_hash: H256) { - self.broadcast_with(tx, |notifier| notifier.mined(block_hash)); + self.broadcast_event(tx, TransactionEvent::Mined(block_hash)); } } -/// All Sender half(s) of the event channels for a specific transaction. +/// All Sender half(s) of the event channels for all transactions. /// /// This mimics [tokio::sync::broadcast] but uses separate channels. -#[derive(Debug)] -struct PoolEventBroadcaster { - /// Tracks whether the transaction this notifier can stop because the transaction was - /// completed, or removed. - is_done: bool, +#[derive(Default, Debug)] +struct AllPoolEventsBroadcaster { /// Corresponding sender half(s) for event listener channel - senders: Vec, + senders: Vec>, } -impl PoolEventBroadcaster { - fn broadcast(&mut self, event: TransactionEvent) { - self.senders.retain(|sender| sender.send(event.clone()).is_ok()) - } - - fn is_done(&self) -> bool { - self.senders.is_empty() || self.is_done - } - - /// Transaction was moved to the pending queue. - fn pending(&mut self) { - self.broadcast(TransactionEvent::Pending) - } - - /// Transaction was moved to the queued pool - fn queued(&mut self) { - self.broadcast(TransactionEvent::Queued) - } - - /// Transaction was replaced with the given transaction - fn replaced(&mut self, hash: TxHash) { - self.broadcast(TransactionEvent::Replaced(hash)); - self.is_done = true; +impl AllPoolEventsBroadcaster { + // Broadcast an event to all listeners. Dropped listeners are silently evicted. + fn broadcast(&mut self, tx_hash: TxHash, event: TransactionEvent) { + self.senders.retain(|sender| { + match sender.try_send(PoolTransactionEvent::new(tx_hash, event.clone())) { + Ok(_) | Err(TrySendError::Full(_)) => true, + Err(TrySendError::Closed(_)) => false, + } + }) } +} - /// Transaction was mined. - fn mined(&mut self, block_hash: H256) { - self.broadcast(TransactionEvent::Mined(block_hash)); - self.is_done = true; - } +/// All Sender half(s) of the event channels for a specific transaction. +/// +/// This mimics [tokio::sync::broadcast] but uses separate channels and is unbounded. +#[derive(Default, Debug)] +struct PoolEventBroadcaster { + /// Corresponding sender half(s) for event listener channel + senders: Vec>, +} - /// Transaction was propagated. - fn propagated(&mut self, peers: Vec) { - self.broadcast(TransactionEvent::Propagated(Arc::new(peers))); +impl PoolEventBroadcaster { + /// Returns `true` if there are no more listeners remaining. + fn is_empty(&self) -> bool { + self.senders.is_empty() } - /// Transaction was replaced with the given transaction - fn discarded(&mut self) { - self.broadcast(TransactionEvent::Discarded); - self.is_done = true; + // Broadcast an event to all listeners. Dropped listeners are silently evicted. + fn broadcast(&mut self, tx_hash: TxHash, event: TransactionEvent) { + self.senders + .retain(|sender| sender.send(PoolTransactionEvent::new(tx_hash, event.clone())).is_ok()) } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 30cd3daffc4a..0d6b0983fa09 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -81,7 +81,6 @@ use crate::{ CanonicalStateUpdate, ChangedAccount, PoolConfig, TransactionOrdering, TransactionValidator, }; use best::BestTransactions; -pub use events::TransactionEvent; use parking_lot::{Mutex, RwLock}; use reth_primitives::{Address, TxHash, H256}; use std::{ @@ -93,16 +92,19 @@ use std::{ use tokio::sync::mpsc; use tracing::debug; -mod best; mod events; +pub use events::{PoolTransactionEvent, TransactionEvent}; + mod listener; +pub use listener::{AllTransactionsEvents, TransactionEvents}; + +mod best; mod parked; pub(crate) mod pending; pub(crate) mod size; pub(crate) mod state; pub mod txpool; mod update; -pub use listener::TransactionEvents; /// Transaction pool internals. pub struct PoolInner { @@ -197,7 +199,9 @@ where } /// Adds a new transaction listener to the pool that gets notified about every new transaction - pub fn add_transaction_listener(&self) -> mpsc::Receiver> { + pub fn add_new_transaction_listener( + &self, + ) -> mpsc::Receiver> { const TX_LISTENER_BUFFER_SIZE: usize = 1024; let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); self.transaction_listener.lock().push(tx); @@ -218,6 +222,11 @@ where } } + /// Adds a listener for all transaction events. + pub(crate) fn add_all_transactions_event_listener(&self) -> AllTransactionsEvents { + self.event_listener.write().subscribe_all() + } + /// Returns hashes of _all_ transactions in the pool. pub(crate) fn pooled_transactions_hashes(&self) -> Vec { let pool = self.pool.read(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 256afe996410..6e5ebe197d80 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -2,6 +2,7 @@ use crate::{ error::PoolResult, pool::{state::SubPool, TransactionEvents}, validate::ValidPoolTransaction, + AllTransactionsEvents, }; use reth_primitives::{ Address, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, Transaction, @@ -94,13 +95,16 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns `None` if the transaction is not in the pool. fn transaction_event_listener(&self, tx_hash: TxHash) -> Option; + /// Returns a new transaction change event stream for _all_ transactions in the pool. + fn all_transactions_event_listener(&self) -> AllTransactionsEvents; + /// Returns a new Stream that yields transactions hashes for new ready transactions. /// /// Consumer: RPC fn pending_transactions_listener(&self) -> Receiver; /// Returns a new stream that yields new valid transactions added to the pool. - fn transactions_listener(&self) -> Receiver>; + fn new_transactions_listener(&self) -> Receiver>; /// Returns the _hashes_ of all transactions in the pool. /// From 1e3f0c0e8615a36b2b10db1dbcb0faf1b0b8e394 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 11:50:51 +0200 Subject: [PATCH 056/722] refactor: extract on pipeline finished (#3590) --- crates/consensus/beacon/src/engine/mod.rs | 200 ++++++++++++---------- 1 file changed, 108 insertions(+), 92 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 57028c941044..ffef3be2b1e6 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -27,7 +27,7 @@ use reth_rpc_types::engine::{ ExecutionPayload, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use reth_stages::{ControlFlow, Pipeline}; +use reth_stages::{ControlFlow, Pipeline, PipelineError}; use reth_tasks::TaskSpawner; use std::{ pin::Pin, @@ -1207,107 +1207,123 @@ where return Some(Err(BeaconConsensusEngineError::PipelineChannelClosed)) } EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); - match result { - Ok(ctrl) => { - if reached_max_block { - // Terminate the sync early if it's reached the maximum user - // configured block. - return Some(Ok(())) - } + return self.on_pipeline_finished(result, reached_max_block) + } + }; - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - trace!(target: "consensus::engine", hash=?bad_block.hash, "Bad block detected in unwind"); + None + } - // update the `invalid_headers` cache with the new invalid headers - self.invalid_headers.insert(bad_block); - return None - } + /// Invoked when the pipeline has finished. + /// + /// Returns an Option to indicate whether the engine future should resolve: + /// + /// Returns a result if: + /// - Ok(()) if the pipeline finished successfully + /// - Err(..) if the pipeline failed fatally + /// + /// Returns None if the pipeline finished successfully and engine should continue. + fn on_pipeline_finished( + &mut self, + result: Result, + reached_max_block: bool, + ) -> Option> { + trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); + match result { + Ok(ctrl) => { + if reached_max_block { + // Terminate the sync early if it's reached the maximum user + // configured block. + return Some(Ok(())) + } - // update the canon chain if continuous is enabled - if self.sync.run_pipeline_continuously() { - let max_block = ctrl.progress().unwrap_or_default(); - let max_header = match self.blockchain.sealed_header(max_block) { - Ok(header) => match header { - Some(header) => header, - None => { - return Some(Err(Error::Provider( - ProviderError::HeaderNotFound(max_block.into()), - ) - .into())) - } - }, - Err(error) => { - error!(target: "consensus::engine", ?error, "Error getting canonical header for continuous sync"); - return Some(Err(error.into())) - } - }; - self.blockchain.set_canonical_head(max_header); - } + if let ControlFlow::Unwind { bad_block, .. } = ctrl { + trace!(target: "consensus::engine", hash=?bad_block.hash, "Bad block detected in unwind"); - let sync_target_state = match self - .forkchoice_state_tracker - .sync_target_state() - { - Some(current_state) => current_state, + // update the `invalid_headers` cache with the new invalid headers + self.invalid_headers.insert(bad_block); + return None + } + + // update the canon chain if continuous is enabled + if self.sync.run_pipeline_continuously() { + let max_block = ctrl.progress().unwrap_or_default(); + let max_header = match self.blockchain.sealed_header(max_block) { + Ok(header) => match header { + Some(header) => header, None => { - // This is only possible if the node was run with `debug.tip` - // argument and without CL. - warn!(target: "consensus::engine", "No fork choice state available"); - return None + return Some(Err(Error::Provider(ProviderError::HeaderNotFound( + max_block.into(), + )) + .into())) } - }; - - // Next, we check if we need to schedule another pipeline run or transition - // to live sync via tree. - // This can arise if we buffer the forkchoice head, and if the head is an - // ancestor of an invalid block. - // - // * The forkchoice head could be buffered if it were first sent as a - // `newPayload` request. - // - // In this case, we won't have the head hash in the database, so we would - // set the pipeline sync target to a known-invalid head. - // - // This is why we check the invalid header cache here. - let lowest_buffered_ancestor = - self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); - - // this inserts the head if the lowest buffered ancestor is invalid - if self - .check_invalid_ancestor_with_head( - lowest_buffered_ancestor, - sync_target_state.head_block_hash, - ) - .is_none() - { - // Update the state and hashes of the blockchain tree if possible. - match self.update_tree_on_finished_pipeline( - sync_target_state.finalized_block_hash, - ) { - Ok(synced) => { - if synced { - // we're consider this synced and transition to live sync - self.sync_state_updater.update_sync_state(SyncState::Idle); - } else { - // We don't have the finalized block in the database, so - // we need to run another pipeline. - self.sync.set_pipeline_sync_target( - sync_target_state.finalized_block_hash, - ); - } - } - Err(error) => { - error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); - return Some(Err(error.into())) - } - }; + }, + Err(error) => { + error!(target: "consensus::engine", ?error, "Error getting canonical header for continuous sync"); + return Some(Err(error.into())) } + }; + self.blockchain.set_canonical_head(max_header); + } + + let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { + Some(current_state) => current_state, + None => { + // This is only possible if the node was run with `debug.tip` + // argument and without CL. + warn!(target: "consensus::engine", "No fork choice state available"); + return None } - // Any pipeline error at this point is fatal. - Err(error) => return Some(Err(error.into())), }; + + // Next, we check if we need to schedule another pipeline run or transition + // to live sync via tree. + // This can arise if we buffer the forkchoice head, and if the head is an + // ancestor of an invalid block. + // + // * The forkchoice head could be buffered if it were first sent as a `newPayload` + // request. + // + // In this case, we won't have the head hash in the database, so we would + // set the pipeline sync target to a known-invalid head. + // + // This is why we check the invalid header cache here. + let lowest_buffered_ancestor = + self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); + + // this inserts the head if the lowest buffered ancestor is invalid + if self + .check_invalid_ancestor_with_head( + lowest_buffered_ancestor, + sync_target_state.head_block_hash, + ) + .is_none() + { + // Update the state and hashes of the blockchain tree if possible. + match self + .update_tree_on_finished_pipeline(sync_target_state.finalized_block_hash) + { + Ok(synced) => { + if synced { + // we're consider this synced and transition to live sync + self.sync_state_updater.update_sync_state(SyncState::Idle); + } else { + // We don't have the finalized block in the database, so + // we need to run another pipeline. + self.sync.set_pipeline_sync_target( + sync_target_state.finalized_block_hash, + ); + } + } + Err(error) => { + error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); + return Some(Err(error.into())) + } + }; + } } + // Any pipeline error at this point is fatal. + Err(error) => return Some(Err(error.into())), }; None From 9cd31f14874cba7d410eff82c721a48f841217a3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 11:58:48 +0200 Subject: [PATCH 057/722] perf: put all prefix sets in Rc (#3582) --- crates/trie/benches/prefix_set.rs | 8 ++- crates/trie/src/hashed_cursor/post_state.rs | 13 ++-- crates/trie/src/prefix_set/loader.rs | 8 +-- crates/trie/src/prefix_set/mod.rs | 69 +++++++++++++++++++-- crates/trie/src/trie.rs | 39 +++++++----- crates/trie/src/walker.rs | 9 ++- 6 files changed, 110 insertions(+), 36 deletions(-) diff --git a/crates/trie/benches/prefix_set.rs b/crates/trie/benches/prefix_set.rs index 33de35cf9f40..95782fe890f1 100644 --- a/crates/trie/benches/prefix_set.rs +++ b/crates/trie/benches/prefix_set.rs @@ -7,21 +7,23 @@ use proptest::{ test_runner::{basic_result_cache, TestRunner}, }; use reth_primitives::trie::Nibbles; -use reth_trie::prefix_set::PrefixSet; +use reth_trie::prefix_set::PrefixSetMut; use std::collections::BTreeSet; +/// Abstractions used for benching pub trait PrefixSetAbstraction: Default { fn insert(&mut self, key: Nibbles); fn contains(&mut self, key: Nibbles) -> bool; } -impl PrefixSetAbstraction for PrefixSet { +/// Abstractions used for benching +impl PrefixSetAbstraction for PrefixSetMut { fn insert(&mut self, key: Nibbles) { self.insert(key) } fn contains(&mut self, key: Nibbles) -> bool { - PrefixSet::contains(self, key) + PrefixSetMut::contains(self, key) } } diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 8bd4527624d2..474fe0335247 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -1,5 +1,5 @@ use super::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}; -use crate::prefix_set::PrefixSet; +use crate::prefix_set::{PrefixSet, PrefixSetMut}; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, tables, @@ -27,13 +27,13 @@ pub struct HashedPostState { } impl HashedPostState { - /// Construct (PrefixSets)[PrefixSet] from hashed post state. + /// Construct (PrefixSet)[PrefixSet] from hashed post state. /// The prefix sets contain the hashed account and storage keys that have been changed in the /// post state. pub fn construct_prefix_sets(&self) -> (PrefixSet, HashMap) { // Initialize prefix sets. - let mut account_prefix_set = PrefixSet::default(); - let mut storage_prefix_set: HashMap = HashMap::default(); + let mut account_prefix_set = PrefixSetMut::default(); + let mut storage_prefix_set: HashMap = HashMap::default(); for hashed_address in self.accounts.keys() { account_prefix_set.insert(Nibbles::unpack(hashed_address)); @@ -49,7 +49,10 @@ impl HashedPostState { } } - (account_prefix_set, storage_prefix_set) + ( + account_prefix_set.freeze(), + storage_prefix_set.into_iter().map(|(k, v)| (k, v.freeze())).collect(), + ) } } diff --git a/crates/trie/src/prefix_set/loader.rs b/crates/trie/src/prefix_set/loader.rs index dc9e807f3720..df6393841c6b 100644 --- a/crates/trie/src/prefix_set/loader.rs +++ b/crates/trie/src/prefix_set/loader.rs @@ -1,4 +1,4 @@ -use super::PrefixSet; +use super::PrefixSetMut; use derive_more::Deref; use reth_db::{ cursor::DbCursorRO, @@ -29,10 +29,10 @@ where pub fn load( self, range: RangeInclusive, - ) -> Result<(PrefixSet, HashMap), DatabaseError> { + ) -> Result<(PrefixSetMut, HashMap), DatabaseError> { // Initialize prefix sets. - let mut account_prefix_set = PrefixSet::default(); - let mut storage_prefix_set: HashMap = HashMap::default(); + let mut account_prefix_set = PrefixSetMut::default(); + let mut storage_prefix_set: HashMap = HashMap::default(); // Walk account changeset and insert account prefixes. let mut account_cursor = self.cursor_read::()?; diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index e0101db129e9..d760e236d9c9 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -1,4 +1,5 @@ use reth_primitives::trie::Nibbles; +use std::rc::Rc; mod loader; pub use loader::PrefixSetLoader; @@ -14,22 +15,22 @@ pub use loader::PrefixSetLoader; /// # Examples /// /// ``` -/// use reth_trie::prefix_set::PrefixSet; +/// use reth_trie::prefix_set::PrefixSetMut; /// -/// let mut prefix_set = PrefixSet::default(); +/// let mut prefix_set = PrefixSetMut::default(); /// prefix_set.insert(b"key1"); /// prefix_set.insert(b"key2"); /// /// assert_eq!(prefix_set.contains(b"key"), true); /// ``` #[derive(Debug, Default, Clone)] -pub struct PrefixSet { +pub struct PrefixSetMut { keys: Vec, sorted: bool, index: usize, } -impl PrefixSet { +impl PrefixSetMut { /// Returns `true` if any of the keys in the set has the given prefix or /// if the given prefix is a prefix of any key in the set. pub fn contains>(&mut self, prefix: T) -> bool { @@ -75,6 +76,64 @@ impl PrefixSet { pub fn is_empty(&self) -> bool { self.keys.is_empty() } + + /// Returns a `PrefixSet` with the same elements as this set. + /// + /// If not yet sorted, the elements will be sorted and deduplicated. + pub fn freeze(mut self) -> PrefixSet { + if !self.sorted { + self.keys.sort(); + self.keys.dedup(); + } + + PrefixSet { keys: Rc::new(self.keys), index: self.index } + } +} + +/// A sorted prefix set that has an immutable _sorted_ list of unique keys. +/// +/// See also [PrefixSetMut::freeze]. +#[derive(Debug, Default, Clone)] +pub struct PrefixSet { + keys: Rc>, + index: usize, +} + +impl PrefixSet { + /// Returns `true` if any of the keys in the set has the given prefix or + /// if the given prefix is a prefix of any key in the set. + #[inline] + pub fn contains>(&mut self, prefix: T) -> bool { + let prefix = prefix.into(); + + while self.index > 0 && self.keys[self.index] > prefix { + self.index -= 1; + } + + for (idx, key) in self.keys[self.index..].iter().enumerate() { + if key.has_prefix(&prefix) { + self.index += idx; + return true + } + + if key > &prefix { + self.index += idx; + return false + } + } + + false + } + + /// Returns the number of elements in the set. + pub fn len(&self) -> usize { + self.keys.len() + } + + /// Returns `true` if the set is empty. + pub fn is_empty(&self) -> bool { + self.keys.is_empty() + } } #[cfg(test)] @@ -83,7 +142,7 @@ mod tests { #[test] fn test_contains_with_multiple_inserts_and_duplicates() { - let mut prefix_set = PrefixSet::default(); + let mut prefix_set = PrefixSetMut::default(); prefix_set.insert(b"123"); prefix_set.insert(b"124"); prefix_set.insert(b"456"); diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 5359f0949709..c9df6c9f850b 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -1,7 +1,7 @@ use crate::{ account::EthAccount, hashed_cursor::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}, - prefix_set::{PrefixSet, PrefixSetLoader}, + prefix_set::{PrefixSet, PrefixSetLoader, PrefixSetMut}, progress::{IntermediateStateRootState, StateRootProgress}, trie_cursor::{AccountTrieCursor, StorageTrieCursor}, updates::{TrieKey, TrieOp, TrieUpdates}, @@ -90,7 +90,7 @@ where pub fn new(tx: &'a TX) -> Self { Self { tx, - changed_account_prefixes: PrefixSet::default(), + changed_account_prefixes: PrefixSetMut::default().freeze(), changed_storage_prefixes: HashMap::default(), previous_state: None, threshold: 100_000, @@ -110,8 +110,10 @@ where ) -> Result { let (account_prefixes, storage_prefixes) = PrefixSetLoader::new(tx).load(range)?; Ok(Self::new(tx) - .with_changed_account_prefixes(account_prefixes) - .with_changed_storage_prefixes(storage_prefixes)) + .with_changed_account_prefixes(account_prefixes.freeze()) + .with_changed_storage_prefixes( + storage_prefixes.into_iter().map(|(k, v)| (k, v.freeze())).collect(), + )) } /// Computes the state root of the trie with the changed account and storage prefixes and @@ -371,7 +373,7 @@ where Self { tx, hashed_address, - changed_prefixes: PrefixSet::default(), + changed_prefixes: PrefixSetMut::default().freeze(), hashed_cursor_factory: tx, } } @@ -389,7 +391,12 @@ impl<'a, 'b, TX, H> StorageRoot<'a, 'b, TX, H> { hashed_cursor_factory: &'b H, hashed_address: H256, ) -> Self { - Self { tx, hashed_address, changed_prefixes: PrefixSet::default(), hashed_cursor_factory } + Self { + tx, + hashed_address, + changed_prefixes: PrefixSetMut::default().freeze(), + hashed_cursor_factory, + } } /// Set the changed prefixes. @@ -591,10 +598,10 @@ mod tests { trie_updates.flush(tx.tx_ref()).unwrap(); // 3. Calculate the incremental root - let mut storage_changes = PrefixSet::default(); + let mut storage_changes = PrefixSetMut::default(); storage_changes.insert(Nibbles::unpack(modified_key)); let loader = StorageRoot::new_hashed(tx.tx_ref(), hashed_address) - .with_changed_prefixes(storage_changes); + .with_changed_prefixes(storage_changes.freeze()); let incremental_root = loader.root().unwrap(); assert_eq!(modified_root, incremental_root); @@ -1014,7 +1021,7 @@ mod tests { Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; hashed_account_cursor.upsert(key4b, account4b).unwrap(); - let mut prefix_set = PrefixSet::default(); + let mut prefix_set = PrefixSetMut::default(); prefix_set.insert(Nibbles::unpack(key4b)); let expected_state_root = @@ -1022,7 +1029,7 @@ mod tests { .unwrap(); let (root, trie_updates) = StateRoot::new(tx.tx_ref()) - .with_changed_account_prefixes(prefix_set) + .with_changed_account_prefixes(prefix_set.freeze()) .root_with_updates() .unwrap(); assert_eq!(root, expected_state_root); @@ -1060,7 +1067,7 @@ mod tests { let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); - let mut account_prefix_set = PrefixSet::default(); + let mut account_prefix_set = PrefixSetMut::default(); account_prefix_set.insert(Nibbles::unpack(account.0)); let computed_expected_root: H256 = triehash::trie_root::([ @@ -1074,7 +1081,7 @@ mod tests { ]); let (root, trie_updates) = StateRoot::new(tx.tx_ref()) - .with_changed_account_prefixes(account_prefix_set) + .with_changed_account_prefixes(account_prefix_set.freeze()) .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); @@ -1116,7 +1123,7 @@ mod tests { let account3 = hashed_account_cursor.seek_exact(key3).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); - let mut account_prefix_set = PrefixSet::default(); + let mut account_prefix_set = PrefixSetMut::default(); account_prefix_set.insert(Nibbles::unpack(account2.0)); account_prefix_set.insert(Nibbles::unpack(account3.0)); @@ -1131,7 +1138,7 @@ mod tests { ]); let (root, trie_updates) = StateRoot::new(tx.tx_ref()) - .with_changed_account_prefixes(account_prefix_set) + .with_changed_account_prefixes(account_prefix_set.freeze()) .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); @@ -1227,7 +1234,7 @@ mod tests { let mut state = BTreeMap::default(); for accounts in account_changes { let should_generate_changeset = !state.is_empty(); - let mut changes = PrefixSet::default(); + let mut changes = PrefixSetMut::default(); for (hashed_address, balance) in accounts.clone() { hashed_account_cursor.upsert(hashed_address, Account { balance,..Default::default() }).unwrap(); if should_generate_changeset { @@ -1236,7 +1243,7 @@ mod tests { } let (state_root, trie_updates) = StateRoot::new(tx.tx_ref()) - .with_changed_account_prefixes(changes) + .with_changed_account_prefixes(changes.freeze()) .root_with_updates() .unwrap(); diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index b876f0d93c1e..a343b0ee69f0 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -256,7 +256,10 @@ impl<'a, K: Key + From>, C: TrieCursor> TrieWalker<'a, K, C> { mod tests { use super::*; - use crate::trie_cursor::{AccountTrieCursor, StorageTrieCursor}; + use crate::{ + prefix_set::PrefixSetMut, + trie_cursor::{AccountTrieCursor, StorageTrieCursor}, + }; use reth_db::{ cursor::DbCursorRW, tables, test_utils::create_test_rw_db, transaction::DbTxMut, }; @@ -378,9 +381,9 @@ mod tests { assert_eq!(cursor.key(), None); // We insert something that's not part of the existing trie/prefix. - let mut changed = PrefixSet::default(); + let mut changed = PrefixSetMut::default(); changed.insert(&[0xF, 0x1]); - let mut cursor = TrieWalker::new(&mut trie, changed); + let mut cursor = TrieWalker::new(&mut trie, changed.freeze()); // Root node assert_eq!(cursor.key(), Some(Nibbles::from_hex(vec![]))); From ab2fc7e17a22ca87b9af37ae0bf6da24fb34f6b5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 12:32:28 +0200 Subject: [PATCH 058/722] fix: use DatabaseRef impl when fetching overriden account (#3589) Co-authored-by: Roman Krasiuk --- crates/rpc/rpc/src/eth/revm_utils.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index cd552b398a54..fbfe93e53ad5 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -459,7 +459,9 @@ where DB: DatabaseRef, EthApiError: From<::Error>, { - let mut account_info = db.basic(account)?.unwrap_or_default(); + // we need to fetch the account via the `DatabaseRef` to not update the state of the account, + // which is modified via `Database::basic` + let mut account_info = DatabaseRef::basic(db, account)?.unwrap_or_default(); if let Some(nonce) = account_override.nonce { account_info.nonce = nonce.as_u64(); From a932e2f1fe66e527dfcb8f4b4a1075445d2dd114 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 12:58:36 +0200 Subject: [PATCH 059/722] perf: better engine downloads (#3584) --- crates/consensus/beacon/src/engine/mod.rs | 139 +++++++++++++--------- 1 file changed, 83 insertions(+), 56 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index ffef3be2b1e6..26fc189e3674 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -69,6 +69,9 @@ const MAX_INVALID_HEADERS: u32 = 512u32; /// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` /// for more information. +/// +/// This is the default threshold, the distance to the head that the tree will be used for sync. +/// If the distance exceeds this threshold, the pipeline will be used for sync. pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// A _shareable_ beacon consensus frontend. Used to interact with the spawned beacon consensus @@ -1065,62 +1068,9 @@ where self.try_make_sync_target_canonical(downloaded_num_hash); } InsertPayloadOk::Inserted(BlockStatus::Disconnected { missing_parent }) => { - // compare the missing parent with the canonical tip - let canonical_tip_num = self.blockchain.canonical_tip().number; - let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); - - let mut requires_pipeline = self.exceeds_pipeline_run_threshold( - canonical_tip_num, - missing_parent.number, - ); - - // check if the downloaded block is the tracked finalized block - if let Some(ref state) = sync_target_state { - if downloaded_num_hash.hash == state.finalized_block_hash { - // we downloaded the finalized block - requires_pipeline = self.exceeds_pipeline_run_threshold( - canonical_tip_num, - downloaded_num_hash.number, - ); - } else if let Some(buffered_finalized) = - self.blockchain.buffered_header_by_hash(state.finalized_block_hash) - { - // if we have buffered the finalized block, we should check how far - // we're off - requires_pipeline = self.exceeds_pipeline_run_threshold( - canonical_tip_num, - buffered_finalized.number, - ); - } - } - - // if the number of missing blocks is greater than the max, run the - // pipeline - if requires_pipeline { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should - // skip the pipeline run - if Ok(None) == - self.blockchain.header_by_hash_or_number( - state.finalized_block_hash.into(), - ) - { - self.sync.set_pipeline_sync_target(state.finalized_block_hash) - } - } - } else { - // continue downloading the missing parent - // - // this happens if either: - // * the missing parent block num < canonical tip num - // * this case represents a missing block on a fork that is shorter - // than the canonical chain - // * the missing parent block num >= canonical tip num, but the number - // of missing blocks is less than the pipeline threshold - // * this case represents a potentially long range of blocks to - // download and execute - self.sync.download_full_block(missing_parent.hash); - } + // block is not connected to the canonical head, we need to download its + // missing branch first + self.on_disconnected_block(downloaded_num_hash, missing_parent); } _ => (), } @@ -1134,6 +1084,83 @@ where } } + /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. + /// + /// This mainly compares the missing parent of the downloaded block with the current canonical + /// tip, and decides whether or not the pipeline should be run. + /// + /// The canonical tip is compared to the missing parent using `exceeds_pipeline_run_threshold`, + /// which returns true if the missing parent is sufficiently ahead of the canonical tip. If so, + /// the pipeline is run. Otherwise, we need to insert blocks using the blockchain tree, and + /// must download blocks outside of the pipeline. In this case, the distance is used to + /// determine how many blocks we should download at once. + fn on_disconnected_block( + &mut self, + downloaded_block: BlockNumHash, + missing_parent: BlockNumHash, + ) { + // compare the missing parent with the canonical tip + let canonical_tip_num = self.blockchain.canonical_tip().number; + let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); + + trace!(target: "consensus::engine", ?downloaded_block, ?missing_parent, tip=?canonical_tip_num, "Handling disconnected block"); + + let mut exceeds_pipeline_run_threshold = + self.exceeds_pipeline_run_threshold(canonical_tip_num, missing_parent.number); + + // check if the downloaded block is the tracked safe block + if let Some(ref state) = sync_target_state { + if downloaded_block.hash == state.finalized_block_hash { + // we downloaded the finalized block + exceeds_pipeline_run_threshold = + self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); + } else if let Some(buffered_finalized) = + self.blockchain.buffered_header_by_hash(state.finalized_block_hash) + { + // if we have buffered the finalized block, we should check how far + // we're off + exceeds_pipeline_run_threshold = self + .exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); + } + } + + // if the number of missing blocks is greater than the max, run the + // pipeline + if exceeds_pipeline_run_threshold { + if let Some(state) = sync_target_state { + // if we have already canonicalized the finalized block, we should + // skip the pipeline run + match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { + Err(err) => { + warn!(target: "consensus::engine", ?err, "Failed to get finalized block header"); + } + Ok(None) => { + // we don't have the block yet and the distance exceeds the allowed + // threshold + self.sync.set_pipeline_sync_target(state.safe_block_hash); + // we can exit early here because the pipeline will take care of this + return + } + Ok(Some(_)) => { + // we're fully synced to the finalized block + // but we want to continue downloading the missing parent + } + } + } + } + + // continue downloading the missing parent + // + // this happens if either: + // * the missing parent block num < canonical tip num + // * this case represents a missing block on a fork that is shorter than the canonical + // chain + // * the missing parent block num >= canonical tip num, but the number of missing blocks is + // less than the pipeline threshold + // * this case represents a potentially long range of blocks to download and execute + self.sync.download_full_block(missing_parent.hash); + } + /// Attempt to form a new canonical chain based on the current sync target. /// /// This is invoked when we successfully downloaded a new block from the network which resulted From d4f505c95e8385035e8c9bb9d531c78b52b495b0 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 14:16:13 +0200 Subject: [PATCH 060/722] fix: use same stale label for prs (#3593) --- .github/workflows/stale.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 91d99476b889..4a73a8a1bfc7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -16,7 +16,9 @@ jobs: days-before-stale: 14 days-before-close: 7 stale-issue-label: "S-stale" + stale-pr-label: "S-stale" exempt-issue-labels: "M-prevent-stale" + exempt-pr-labels: "M-prevent-stale" stale-issue-message: "This issue is stale because it has been open for 14 days with no activity." close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale." exempt-all-milestones: true From ceb4e355a234d0cb5b1ccb550912c5ad9833b6fc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 14:25:04 +0200 Subject: [PATCH 061/722] test: add test for state override json (#3586) --- crates/rpc/rpc-types/src/eth/state.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/state.rs b/crates/rpc/rpc-types/src/eth/state.rs index 35b6b87648de..38e971083a5e 100644 --- a/crates/rpc/rpc-types/src/eth/state.rs +++ b/crates/rpc/rpc-types/src/eth/state.rs @@ -30,3 +30,22 @@ pub struct AccountOverride { #[serde(default, skip_serializing_if = "Option::is_none")] pub state_diff: Option>, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_state_override() { + let s = r#"{ + "0x0000000000000000000000000000000000000124": { + "code": "0x6080604052348015600e575f80fd5b50600436106026575f3560e01c80632096525514602a575b5f80fd5b60306044565b604051901515815260200160405180910390f35b5f604e600242605e565b5f0360595750600190565b505f90565b5f82607757634e487b7160e01b5f52601260045260245ffd5b50069056fea2646970667358221220287f77a4262e88659e3fb402138d2ee6a7ff9ba86bae487a95aa28156367d09c64736f6c63430008140033" + } + }"#; + let state_override: StateOverride = serde_json::from_str(s).unwrap(); + let acc = state_override + .get(&"0x0000000000000000000000000000000000000124".parse().unwrap()) + .unwrap(); + assert!(acc.code.is_some()); + } +} From 706fc4181602b5d0b8b18f1eef822fd572e3ea22 Mon Sep 17 00:00:00 2001 From: qedk <1994constant@gmail.com> Date: Wed, 5 Jul 2023 17:03:56 +0400 Subject: [PATCH 062/722] feat: add cargo binstall support (#3578) --- Cargo.toml | 5 +++++ book/installation/binaries.md | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 11ba1a26fbb1..7aa869fdc4da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,3 +133,8 @@ futures-util = "0.3.25" ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } + +[package.metadata.binstall] +pkg-url = "{ repo }/releases/download/v{ version }/{ name }-v{ version }-{ target }{ archive-suffix }" +bin-dir = "{ bin }{ binary-ext }" +pkg-fmt = "tgz" diff --git a/book/installation/binaries.md b/book/installation/binaries.md index 0adbf634e2fa..31fa92f19774 100644 --- a/book/installation/binaries.md +++ b/book/installation/binaries.md @@ -26,3 +26,10 @@ As an example, you could install the Linux x86_64 version like so: 1. Test the binary with `./reth --version` (it should print the version). 2. (Optional) Move the `reth` binary to a location in your `PATH`, so the `reth` command can be called from anywhere. For most Linux distros, you can move the binary to `/usr/local/bin`: `sudo cp ./reth /usr/local/bin`. + +### Using `cargo-binstall` +Alternatively, if you have [binstall](https://github.com/cargo-bins/cargo-binstall) installed, you can use it to install and load the binary: +```bash +cargo binstall reth +``` + From 12f792560f7bd4e0e5ab1f6c771e074895498d99 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 15:04:26 +0200 Subject: [PATCH 063/722] Revert "feat: add cargo binstall support" (#3602) --- Cargo.toml | 5 ----- book/installation/binaries.md | 7 ------- 2 files changed, 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7aa869fdc4da..11ba1a26fbb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,8 +133,3 @@ futures-util = "0.3.25" ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } - -[package.metadata.binstall] -pkg-url = "{ repo }/releases/download/v{ version }/{ name }-v{ version }-{ target }{ archive-suffix }" -bin-dir = "{ bin }{ binary-ext }" -pkg-fmt = "tgz" diff --git a/book/installation/binaries.md b/book/installation/binaries.md index 31fa92f19774..0adbf634e2fa 100644 --- a/book/installation/binaries.md +++ b/book/installation/binaries.md @@ -26,10 +26,3 @@ As an example, you could install the Linux x86_64 version like so: 1. Test the binary with `./reth --version` (it should print the version). 2. (Optional) Move the `reth` binary to a location in your `PATH`, so the `reth` command can be called from anywhere. For most Linux distros, you can move the binary to `/usr/local/bin`: `sudo cp ./reth /usr/local/bin`. - -### Using `cargo-binstall` -Alternatively, if you have [binstall](https://github.com/cargo-bins/cargo-binstall) installed, you can use it to install and load the binary: -```bash -cargo binstall reth -``` - From 2e436b5609c57f125d38fb2f98bac71216e5501e Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 14:39:19 +0200 Subject: [PATCH 064/722] docs: remove naked link in installation (#3595) --- book/installation/installation.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index 916f3fe634e7..d3ad78fc9134 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -14,20 +14,22 @@ The hardware requirements for running Reth depend on the node configuration and The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible. -| | Archive Node | Full Node | -|-----------|------------------------------------|-------------------------------------| -| Disk | At least 2TB (NVMe recommended) | TBD | -| Memory | 8GB+ | 8GB+ | -| CPU | Higher clock speed over core count | Higher clock speeds over core count | -| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | +| | Archive Node | Full Node | +|-----------|----------------------------------------|-------------------------------------| +| Disk | At least 2TB (TLC NVMe recommended) | TBD | +| Memory | 8GB+ | 8GB+ | +| CPU | Higher clock speed over core count | Higher clock speeds over core count | +| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | -It is then crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement. +#### QLC and TLC + +It is crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement. QLC (Quad-Level Cell) NVMe drives utilize four bits of data per cell, allowing for higher storage density and lower manufacturing costs. However, this increased density comes at the expense of performance. QLC drives have slower read and write speeds compared to TLC drives. They also have a lower endurance, meaning they may have a shorter lifespan and be less suitable for heavy workloads or constant data rewriting. TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data per cell. While they have a slightly lower storage density compared to QLC drives, TLC drives offer faster performance. They typically have higher read and write speeds, making them more suitable for demanding tasks such as data-intensive applications, gaming, and multimedia editing. TLC drives also tend to have a higher endurance, making them more durable and longer-lasting. -Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at the following link: https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038 +Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). ### Disk @@ -40,6 +42,10 @@ NVMe drives are recommended for the best performance, with SSDs being a cheaper At the time of writing, syncing an Ethereum mainnet node to block 17.4M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. +> **Note** +> +> It is highly recommended to choose a TLC drive when using NVMe, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). + ### CPU Most of the time during syncing is spent executing transactions, which is a single-threaded operation due to potential state dependencies of a transaction on previous ones. From 3806a0c25a63639e57eb9bf4023374f8107b5a5c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 15:10:11 +0200 Subject: [PATCH 065/722] chore: change field TransactionTrace order (#3598) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 74b8b399e409..35ec518a48f8 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -238,12 +238,12 @@ pub enum TraceOutput { #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionTrace { - pub trace_address: Vec, - pub subtraces: usize, #[serde(flatten)] pub action: Action, #[serde(flatten)] pub result: Option, + pub subtraces: usize, + pub trace_address: Vec, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] From 338d6d5d9fc6f0efdc10a52233adc858d3e0b8f5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 5 Jul 2023 14:21:29 +0100 Subject: [PATCH 066/722] fix(book): change recommended `--authrpc.port` value from 9999 to 8551 (#3599) --- book/run/mainnet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/run/mainnet.md b/book/run/mainnet.md index f140947a3d9f..c9286a7ca084 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -35,7 +35,7 @@ So one might do: RUST_LOG=info reth node \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ - --authrpc.port 9999 + --authrpc.port 8551 ``` At this point, our Reth node has started discovery, and even discovered some new peers. But it will not start syncing until you spin up the consensus layer! From 5b246fe29d37bdab582819f74a57195626baa7c9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 5 Jul 2023 14:38:37 +0100 Subject: [PATCH 067/722] feat(book): recommend more CL checkpoints (#3601) --- book/run/mainnet.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/book/run/mainnet.md b/book/run/mainnet.md index c9286a7ca084..e3f47cff0388 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -53,12 +53,14 @@ RUST_LOG=info lighthouse bn \ --execution-jwt /path/to/secret ``` -If you don't intend on running validators on your node you can add : +If you don't intend on running validators on your node you can add: ``` bash --disable-deposit-contract-sync ``` +The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). + Your Reth node should start receiving "fork choice updated" messages, and begin syncing the chain. ## Verify the chain is growing From 7d8f0c7f8708170dc5fb187015b6c0d726320145 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 15:39:12 +0200 Subject: [PATCH 068/722] docs: add transaction pool example (#3596) Co-authored-by: BrazilRaw <138177568+BrazilRaw@users.noreply.github.com> --- bin/reth/src/node/mod.rs | 2 +- crates/transaction-pool/src/lib.rs | 66 +++++++++++++++++++++ crates/transaction-pool/src/validate/eth.rs | 14 ++++- 3 files changed, 79 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index a91687812610..a37acc28da8f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -220,7 +220,7 @@ impl Command { let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?; let transaction_pool = reth_transaction_pool::Pool::eth_pool( - EthTransactionValidator::new( + EthTransactionValidator::with_additional_tasks( blockchain_db.clone(), Arc::clone(&self.chain), ctx.task_executor.clone(), diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 917c2aecad36..46a744cfb8f3 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -86,6 +86,57 @@ //! [`Pool`](crate::Pool) type is just an `Arc` wrapper around `PoolInner`. This is the usable type //! that provides the `TransactionPool` interface. //! +//! +//! ## Examples +//! +//! Listen for new transactions and print them: +//! +//! ``` +//! use reth_primitives::MAINNET; +//! use reth_provider::StateProviderFactory; +//! use reth_tasks::TokioTaskExecutor; +//! use reth_transaction_pool::{EthTransactionValidator, Pool, TransactionPool}; +//! async fn t(client: C) where C: StateProviderFactory + Clone + 'static{ +//! let pool = Pool::eth_pool( +//! EthTransactionValidator::new(client, MAINNET.clone(), TokioTaskExecutor::default()), +//! Default::default(), +//! ); +//! let mut transactions = pool.pending_transactions_listener(); +//! tokio::task::spawn( async move { +//! while let Some(tx) = transactions.recv().await { +//! println!("New transaction: {:?}", tx); +//! } +//! }); +//! +//! // do something useful with the pool, like RPC integration +//! +//! # } +//! ``` +//! +//! Spawn maintenance task to keep the pool updated +//! +//! ``` +//! use futures_util::Stream; +//! use reth_primitives::MAINNET; +//! use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; +//! use reth_tasks::TokioTaskExecutor; +//! use reth_transaction_pool::{EthTransactionValidator, Pool}; +//! use reth_transaction_pool::maintain::maintain_transaction_pool_future; +//! async fn t(client: C, stream: St) +//! where C: StateProviderFactory + BlockReaderIdExt + Clone + 'static, +//! St: Stream + Send + Unpin + 'static, +//! { +//! let pool = Pool::eth_pool( +//! EthTransactionValidator::new(client.clone(), MAINNET.clone(), TokioTaskExecutor::default()), +//! Default::default(), +//! ); +//! +//! // spawn a task that listens for new blocks and updates the pool's transactions, mined transactions etc.. +//! tokio::task::spawn( maintain_transaction_pool_future(client, pool, stream)); +//! +//! # } +//! ``` +//! //! ## Feature Flags //! //! - `serde` (default): Enable serde support @@ -230,6 +281,21 @@ where { /// Returns a new [Pool] that uses the default [EthTransactionValidator] when validating /// [PooledTransaction]s and ords via [GasCostOrdering] + /// + /// # Example + /// + /// ``` + /// use reth_provider::StateProviderFactory; + /// use reth_primitives::MAINNET; + /// use reth_tasks::TokioTaskExecutor; + /// use reth_transaction_pool::{EthTransactionValidator, Pool}; + /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static{ + /// let pool = Pool::eth_pool( + /// EthTransactionValidator::new(client, MAINNET.clone(), TokioTaskExecutor::default()), + /// Default::default(), + /// ); + /// # } + /// ``` pub fn eth_pool( validator: EthTransactionValidator, config: PoolConfig, diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index ee24c6266914..605c7d08a41b 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -31,9 +31,19 @@ pub struct EthTransactionValidator { impl EthTransactionValidator { /// Creates a new instance for the given [ChainSpec] /// - /// This will always spawn a validation tasks that perform the actual validation. A will spawn + /// This will spawn a single validation tasks that performs the actual validation. + pub fn new(client: Client, chain_spec: Arc, tasks: T) -> Self + where + T: TaskSpawner, + { + Self::with_additional_tasks(client, chain_spec, tasks, 0) + } + + /// Creates a new instance for the given [ChainSpec] + /// + /// This will always spawn a validation task that performs the actual validation. It will spawn /// `num_additional_tasks` additional tasks. - pub fn new( + pub fn with_additional_tasks( client: Client, chain_spec: Arc, tasks: T, From fc540c60fdf8022b92a8c19e7c94308b93b7cf1a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 15:47:51 +0200 Subject: [PATCH 069/722] fix: use refunds for root call (#3594) --- crates/revm/revm-inspectors/src/tracing/mod.rs | 12 ++++++++++-- crates/revm/revm-inspectors/src/tracing/utils.rs | 1 - 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 0dfee063a492..644b5262b7ea 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -24,6 +24,7 @@ mod utils; use crate::tracing::{ arena::PushTraceKind, types::{CallTraceNode, StorageChange}, + utils::gas_used, }; pub use builder::{ geth::{self, GethTraceBuilder}, @@ -193,7 +194,7 @@ impl TracingInspector { /// This expects an existing trace [Self::start_trace_on_call] fn fill_trace_on_call_end( &mut self, - _data: &EVMData<'_, DB>, + data: &EVMData<'_, DB>, status: InstructionResult, gas: &Gas, output: Bytes, @@ -202,7 +203,14 @@ impl TracingInspector { let trace_idx = self.pop_trace_idx(); let trace = &mut self.traces.arena[trace_idx].trace; - trace.gas_used = gas.spend(); + if trace_idx == 0 { + // this is the root call which should get the gas used of the transaction + // refunds are applied after execution, which is when the root call ends + trace.gas_used = gas_used(data.env.cfg.spec_id, gas.spend(), gas.refunded() as u64); + } else { + trace.gas_used = gas.spend(); + } + trace.status = status; trace.success = matches!(status, return_ok!()); trace.output = output.clone(); diff --git a/crates/revm/revm-inspectors/src/tracing/utils.rs b/crates/revm/revm-inspectors/src/tracing/utils.rs index b3d41f49e92e..64468f825025 100644 --- a/crates/revm/revm-inspectors/src/tracing/utils.rs +++ b/crates/revm/revm-inspectors/src/tracing/utils.rs @@ -23,7 +23,6 @@ pub(crate) fn convert_memory(data: &[u8]) -> Vec { /// Get the gas used, accounting for refunds #[inline] -#[allow(unused)] pub(crate) fn gas_used(spec: SpecId, spent: u64, refunded: u64) -> u64 { let refund_quotient = if SpecId::enabled(spec, SpecId::LONDON) { 5 } else { 2 }; spent - (refunded).min(spent / refund_quotient) From e7cedee411d3071028af8055f1d6e700fbcece92 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 16:36:49 +0200 Subject: [PATCH 070/722] perf: only record return value if required (#3040) Co-authored-by: Bjerg --- .../revm-inspectors/src/tracing/builder/geth.rs | 11 +++++++---- crates/revm/revm-inspectors/src/tracing/config.rs | 5 +++++ crates/revm/revm-inspectors/src/tracing/mod.rs | 2 +- crates/revm/revm-inspectors/src/tracing/types.rs | 3 --- crates/rpc/rpc/src/debug.rs | 11 ++++++----- crates/rpc/rpc/src/eth/revm_utils.rs | 14 +++++++++++++- 6 files changed, 32 insertions(+), 14 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 9786eb6a9c04..a3eaa3233fd3 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -4,7 +4,7 @@ use crate::tracing::{ types::{CallTraceNode, CallTraceStepStackItem}, TracingInspectorConfig, }; -use reth_primitives::{Address, H256}; +use reth_primitives::{Address, Bytes, H256}; use reth_rpc_types::trace::geth::*; use std::collections::{BTreeMap, HashMap, VecDeque}; @@ -58,7 +58,7 @@ impl GethTraceBuilder { } if opts.is_return_data_enabled() { - log.return_data = trace_node.trace.last_call_return_value.clone().map(Into::into); + log.return_data = Some(trace_node.trace.output.clone().into()); } // Add step to geth trace @@ -74,10 +74,13 @@ impl GethTraceBuilder { } /// Generate a geth-style trace e.g. for `debug_traceTransaction` + /// + /// This expects the gas used and return value for the + /// [ExecutionResult](revm::primitives::ExecutionResult) of the executed transaction. pub fn geth_traces( &self, - // TODO(mattsse): This should be the total gas used, or gas used by last CallTrace? receipt_gas_used: u64, + return_value: Bytes, opts: GethDefaultTracingOptions, ) -> DefaultFrame { if self.nodes.is_empty() { @@ -95,7 +98,7 @@ impl GethTraceBuilder { // If the top-level trace succeeded, then it was a success failed: !main_trace.success, gas: receipt_gas_used, - return_value: main_trace.output.clone().into(), + return_value, struct_logs, } } diff --git a/crates/revm/revm-inspectors/src/tracing/config.rs b/crates/revm/revm-inspectors/src/tracing/config.rs index 521786451a2c..f968993062d3 100644 --- a/crates/revm/revm-inspectors/src/tracing/config.rs +++ b/crates/revm/revm-inspectors/src/tracing/config.rs @@ -16,6 +16,8 @@ pub struct TracingInspectorConfig { pub record_state_diff: bool, /// Whether to ignore precompile calls. pub exclude_precompile_calls: bool, + /// Whether to record individual return data + pub record_call_return_data: bool, /// Whether to record logs pub record_logs: bool, } @@ -29,6 +31,7 @@ impl TracingInspectorConfig { record_stack_snapshots: true, record_state_diff: false, exclude_precompile_calls: false, + record_call_return_data: false, record_logs: true, } } @@ -43,6 +46,7 @@ impl TracingInspectorConfig { record_stack_snapshots: false, record_state_diff: false, exclude_precompile_calls: true, + record_call_return_data: false, record_logs: false, } } @@ -57,6 +61,7 @@ impl TracingInspectorConfig { record_stack_snapshots: true, record_state_diff: true, exclude_precompile_calls: false, + record_call_return_data: false, record_logs: false, } } diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 644b5262b7ea..b064e0dd692c 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -177,7 +177,6 @@ impl TracingInspector { value, status: InstructionResult::Continue, caller, - last_call_return_value: self.last_call_return_data.clone(), maybe_precompile, gas_limit, ..Default::default() @@ -214,6 +213,7 @@ impl TracingInspector { trace.status = status; trace.success = matches!(status, return_ok!()); trace.output = output.clone(); + self.last_call_return_data = Some(output); if let Some(address) = created_address { diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 8809de608550..2e48479fd7e3 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -142,8 +142,6 @@ pub(crate) struct CallTrace { /// The return data of the call if this was not a contract creation, otherwise it is the /// runtime bytecode of the created contract pub(crate) output: Bytes, - /// The return data of the last call, if any - pub(crate) last_call_return_value: Option, /// The gas cost of the call pub(crate) gas_used: u64, /// The gas limit of the call @@ -181,7 +179,6 @@ impl Default for CallTrace { data: Default::default(), maybe_precompile: None, output: Default::default(), - last_call_return_value: None, gas_used: Default::default(), gas_limit: Default::default(), status: InstructionResult::Continue, diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 528da093e45d..3f8fd11a026d 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -2,7 +2,8 @@ use crate::{ eth::{ error::{EthApiError, EthResult}, revm_utils::{ - clone_into_empty_db, inspect, prepare_call_env, replay_transactions_until, EvmOverrides, + clone_into_empty_db, inspect, prepare_call_env, replay_transactions_until, + result_output, EvmOverrides, }, EthTransactions, TransactionSource, }, @@ -346,8 +347,8 @@ where let (res, _) = self.inner.eth_api.inspect_call_at(call, at, overrides, &mut inspector).await?; let gas_used = res.result.gas_used(); - - let frame = inspector.into_geth_builder().geth_traces(gas_used, config); + let return_value = result_output(&res.result).unwrap_or_default().into(); + let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); Ok(frame.into()) } @@ -424,8 +425,8 @@ where let (res, _) = inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); - - let frame = inspector.into_geth_builder().geth_traces(gas_used, config); + let return_value = result_output(&res.result).unwrap_or_default().into(); + let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); Ok((frame.into(), res.state)) } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index fbfe93e53ad5..259c34589beb 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -18,7 +18,7 @@ use revm::{ }; use revm_primitives::{ db::{DatabaseCommit, DatabaseRef}, - Bytecode, + Bytecode, ExecutionResult, }; use tracing::trace; @@ -524,6 +524,18 @@ where } } +/// Helper to get the output data from a result +/// +/// TODO: Can be phased out when is released +#[inline] +pub(crate) fn result_output(res: &ExecutionResult) -> Option { + match res { + ExecutionResult::Success { output, .. } => Some(output.clone().into_data()), + ExecutionResult::Revert { output, .. } => Some(output.clone()), + _ => None, + } +} + #[cfg(test)] mod tests { use super::*; From 64ca5214588e7039dd39a8eaeaede999980c4e0c Mon Sep 17 00:00:00 2001 From: Paolo Facchinetti <51409747+paolofacchinetti@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:49:42 +0200 Subject: [PATCH 071/722] feat: add ethereum-metrics-exporter (#3573) Co-authored-by: Bjerg --- book/installation/docker.md | 19 +- etc/docker-compose.yml | 27 +- etc/ethereum-metrics-exporter/config.yaml | 12 + etc/generate-jwt.sh | 11 +- etc/grafana/dashboards/metrics-exporter.json | 3578 ++++++++++++++++++ etc/lighthouse.yml | 44 + etc/prometheus/prometheus.yml | 7 +- 7 files changed, 3667 insertions(+), 31 deletions(-) create mode 100644 etc/ethereum-metrics-exporter/config.yaml create mode 100644 etc/grafana/dashboards/metrics-exporter.json create mode 100644 etc/lighthouse.yml diff --git a/book/installation/docker.md b/book/installation/docker.md index ac97c710eb6b..6a101141b09e 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -80,23 +80,36 @@ To run Reth with Docker Compose, run the following command from a shell inside t ```bash ./etc/generate-jwt.sh -docker compose -f etc/docker-compose.yml up -d +docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d ``` +> **Note** +> +> If you want to run Reth with a CL that is not Lighthouse: +> +> - The JWT for the consensus client can be found at `etc/jwttoken/jwt.hex` in this repository, after the `etc/generate-jwt.sh` script is run +> - The Reth Engine API is accessible on `localhost:8551` + To check if Reth is running correctly, run: ```bash docker compose logs -f reth ``` -The default `docker-compose.yml` file will create four containers: +The default `docker-compose.yml` file will create three containers: - Reth - Prometheus - Grafana + +The optional `lighthouse.yml` file will create two containers: + - Lighthouse +- [`ethereum-metrics-exporter`](https://github.com/ethpandaops/ethereum-metrics-exporter) -Grafana will be exposed on `localhost:3000` and accessible via default credentials (username and password is `admin`) +Grafana will be exposed on `localhost:3000` and accessible via default credentials (username and password is `admin`), with two available dashboards: +- reth +- Ethereum Metrics Exporter (works only if Lighthouse is also running) ## Interacting with Reth inside Docker diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index a19e253f7c83..0261711eb227 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -9,6 +9,7 @@ services: - '9001:9001' # metrics - '30303:30303' # eth/66 peering - '8545:8545' # rpc + - '8551:8551' # engine volumes: - rethdata:/root/.local/share/reth/mainnet/db - rethlogs:/root/rethlogs @@ -22,29 +23,7 @@ services: --authrpc.port 8551 --authrpc.jwtsecret /root/jwt/jwt.hex --http --http.addr 0.0.0.0 --http.port 8545 - --http.api "eth,net" - - lighthouse: - restart: unless-stopped - image: sigp/lighthouse - depends_on: - - reth - ports: - - '5052:5052/tcp' - - '5053:5053/tcp' - - '5054:5054/tcp' # metrics - - '9000:9000/tcp' - - '9000:9000/udp' - volumes: - - lighthousedata:/root/.lighthouse - - ./jwttoken:/root/jwt:ro - command: > - lighthouse bn - --http --http-address 0.0.0.0 - --execution-endpoint http://reth:8551 - --metrics --metrics-address 0.0.0.0 - --execution-jwt /root/jwt/jwt.hex - --checkpoint-sync-url https://mainnet.checkpoint.sigp.io + --http.api "eth,net,web3" prometheus: restart: unless-stopped @@ -87,8 +66,6 @@ volumes: driver: local rethlogs: driver: local - lighthousedata: - driver: local prometheusdata: driver: local grafanadata: diff --git a/etc/ethereum-metrics-exporter/config.yaml b/etc/ethereum-metrics-exporter/config.yaml new file mode 100644 index 000000000000..fd57b3c696c5 --- /dev/null +++ b/etc/ethereum-metrics-exporter/config.yaml @@ -0,0 +1,12 @@ +consensus: + enabled: true + url: "http://lighthouse:5052" + name: "consensus-client" +execution: + enabled: true + url: "http://reth:8545" + name: "execution-client" + modules: + - "eth" + - "net" + - "web3" \ No newline at end of file diff --git a/etc/generate-jwt.sh b/etc/generate-jwt.sh index 711b5b55d41d..6f5bc2b790c3 100755 --- a/etc/generate-jwt.sh +++ b/etc/generate-jwt.sh @@ -1,4 +1,11 @@ # Borrowed from EthStaker's prepare for the merge guide # See https://github.com/remyroy/ethstaker/blob/main/prepare-for-the-merge.md#configuring-a-jwt-token-file -mkdir -p jwttoken -openssl rand -hex 32 | tr -d "\n" | tee > jwttoken/jwt.hex \ No newline at end of file + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +mkdir -p ${SCRIPT_DIR}/jwttoken +if [[ ! -f ${SCRIPT_DIR}/jwttoken/jwt.hex ]] +then + openssl rand -hex 32 | tr -d "\n" | tee > ${SCRIPT_DIR}/jwttoken/jwt.hex +else + echo "${SCRIPT_DIR}/jwttoken/jwt.hex already exists!" +fi \ No newline at end of file diff --git a/etc/grafana/dashboards/metrics-exporter.json b/etc/grafana/dashboards/metrics-exporter.json new file mode 100644 index 000000000000..a626536eaa75 --- /dev/null +++ b/etc/grafana/dashboards/metrics-exporter.json @@ -0,0 +1,3578 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Companion dashboard for https://github.com/samcm/ethereum-metrics-exporter", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 16277, + "graphTooltip": 0, + "id": 2, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "ndvLBM54z" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 5, + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 9, + "x": 0, + "y": 1 + }, + "id": 10, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "max by (version)(eth_exe_web3_client_version{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "{{ version }}", + "refId": "A" + } + ], + "title": "Execution Client", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 9, + "x": 9, + "y": 1 + }, + "id": 29, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_spec_config_name{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ name }}", + "refId": "A" + } + ], + "title": "Consensus Config", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 40, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_fork_current{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ fork }}", + "refId": "A" + } + ], + "title": "Consensus Fork", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 9, + "x": 0, + "y": 5 + }, + "id": 31, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "max by (version)(eth_con_node_version{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "{{ version }}", + "refId": "A" + } + ], + "title": "Consensus Client", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 18, + "y": 5 + }, + "id": 2, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_chain_id{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Execution Chain ID", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 5 + }, + "id": 3, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_network_id{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Execution Network ID", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 9, + "x": 9, + "y": 6 + }, + "id": 30, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_spec_preset_base{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ preset }}", + "refId": "A" + } + ], + "title": "Consensus Preset", + "type": "stat" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 7, + "panels": [], + "title": "Consensus Layer", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "Synced" + }, + "1": { + "index": 0, + "text": "Syncing" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "green", + "value": 0 + }, + { + "color": "#EAB839", + "value": 1 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 11 + }, + "id": 19, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_sync_is_syncing{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Sync Status", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 11 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_sync_percentage{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Sync Percent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 10, + "y": 11 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "\n eth_con_beacon_slot{instance=~\"$instance\", slot!=\"genesis\"}\n", + "interval": "", + "legendFormat": "{{ block_id }}", + "range": true, + "refId": "A" + } + ], + "title": "Slots", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 11 + }, + "id": 48, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_finality_checkpoint_epochs{instance=~\"$instance\", state_id=\"head\", checkpoint!=\"previous_justified\"}", + "interval": "", + "legendFormat": "{{ checkpoint }}", + "refId": "A" + } + ], + "title": "Epochs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Epochs" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 11 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance)(\n eth_con_beacon_finality_checkpoint_epochs{instance=~\"$instance\", checkpoint=\"justified\", state_id=\"head\"}\n)\n-\nsum by (instance)(\n eth_con_beacon_finality_checkpoint_epochs{instance=~\"$instance\", checkpoint=\"finalized\", state_id=\"head\"}\n)", + "hide": false, + "interval": "", + "legendFormat": "Justified -> Finalized", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance)(\n eth_con_beacon_finality_checkpoint_epochs{instance=~\"$instance\", checkpoint=\"justified\", state_id=\"head\"}\n)\n-\nsum by (instance)(\n eth_con_beacon_finality_checkpoint_epochs{instance=~\"$instance\", checkpoint=\"previous_justified\", state_id=\"head\"}\n)", + "hide": false, + "interval": "", + "legendFormat": "Justified -> Previous Justified", + "refId": "B" + } + ], + "title": "Epoch distances", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "slots" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 0, + "y": 17 + }, + "id": 21, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_sync_distance{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Sync Distance", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 3, + "y": 17 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(eth_con_sync_percentage{instance=~\"$instance\"}[1m])) by (instance)", + "instant": false, + "interval": "", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Sync rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 6, + "y": 17 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(increase(eth_con_beacon_reorg_count{instance=~\"$instance\"}[1m]))", + "interval": "", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Reorgs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Slots" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 10, + "y": 17 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"head\"}\n)\n-\nsum by (instance) (\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"finalized\"}\n) ", + "interval": "1m", + "legendFormat": "Head -> Finalized", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"head\"}\n)\n-\nsum by (instance) (\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"justified\"}\n) != 0", + "hide": false, + "interval": "1m", + "legendFormat": "Head -> Justified", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"justified\"}\n)\n-\nsum by (instance) (\n eth_con_beacon_slot{instance=~\"$instance\", block_id=\"finalized\"}\n) != 0", + "hide": false, + "interval": "1m", + "legendFormat": "Justified -> Finalized", + "range": true, + "refId": "C" + } + ], + "title": "Slot distances", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 17 + }, + "id": 67, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_transactions{instance=~\"$instance\", block_id=\"head\"}", + "interval": "", + "legendFormat": "Transactions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_slashings{instance=~\"$instance\", block_id=\"head\", type=\"proposer\"}", + "hide": false, + "interval": "", + "legendFormat": "Proposer Slashings", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_slashings{instance=~\"$instance\", block_id=\"head\", type=\"attester\"}", + "hide": false, + "interval": "", + "legendFormat": "Attester Slashings", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_attestations{instance=~\"$instance\",block_id=\"head\"}", + "hide": false, + "interval": "", + "legendFormat": "Attestations", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_deposits{instance=~\"$instance\",block_id=\"head\"}", + "hide": false, + "interval": "", + "legendFormat": "Deposits", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_beacon_voluntary_exits{instance=~\"$instance\",block_id=\"head\"}", + "hide": false, + "interval": "", + "legendFormat": "Voluntary Exits", + "refId": "F" + } + ], + "title": "Counts Per Block (at head)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 17 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (event)(increase(eth_con_event_count{instance=~\"$instance\"}[1m]))", + "hide": false, + "interval": "1m", + "legendFormat": "{{ event }}", + "range": true, + "refId": "A" + } + ], + "title": "Events (per min)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 23 + }, + "id": 69, + "options": { + "calculate": true, + "calculation": { + "xBuckets": { + "mode": "size" + }, + "yBuckets": { + "mode": "count", + "value": "30" + } + }, + "cellGap": 1, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(increase(eth_con_beacon_proposer_delay_bucket{instance=~\"$instance\"}[1m])) by (le)", + "interval": "1m", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Proposer delay", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*Per Block" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 6, + "y": 23 + }, + "id": 72, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(eth_con_beacon_withdrawals_index_max{instance=~\"$instance\", block_id=\"head\"}) by (instance)", + "interval": "", + "legendFormat": "Index Max", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(eth_con_beacon_withdrawals_index_min{instance=~\"$instance\", block_id=\"head\"}) by (instance)", + "hide": false, + "interval": "", + "legendFormat": "Index Min", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(eth_con_beacon_withdrawals{instance=~\"$instance\", block_id=\"head\"}) by (instance)", + "hide": false, + "interval": "", + "legendFormat": "Per Block", + "range": true, + "refId": "C" + } + ], + "title": "Withdrawals", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "gwei" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 11, + "y": 23 + }, + "id": 71, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(eth_con_beacon_withdrawals_amount_gwei{instance=~\"$instance\", block_id=\"head\"}[1m])) by (instance)", + "interval": "", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Withdrawal Amounts", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 16, + "y": 23 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (direction)(eth_con_peers{instance=~\"$instance\", state=\"connected\"})", + "hide": false, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Peers (stacked)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 20, + "y": 23 + }, + "id": 28, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_con_fork_epoch{instance=~\"$instance\", fork!=\"SHARDING\"}", + "hide": false, + "interval": "", + "legendFormat": "{{ fork }}", + "refId": "A" + } + ], + "title": "Fork Epochs", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 9, + "panels": [], + "title": "Execution Layer", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "Synced" + }, + "1": { + "index": 0, + "text": "Syncing" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "green", + "value": 0 + }, + { + "color": "#EAB839", + "value": 1 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 31 + }, + "id": 24, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_sync_is_syncing{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Sync Status", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 31 + }, + "id": 34, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_sync_percentage{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Sync Percent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 10, + "y": 31 + }, + "id": 39, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_block_most_recent_number{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Block Height", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 15, + "y": 31 + }, + "id": 45, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_net_peer_count{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 31 + }, + "id": 47, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance)(\n eth_exe_block_head_total_difficulty_trillions{instance=~\"$instance\"}\n)", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Total Difficulty (trillions)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "blocks" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 37 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_sync_highest_block{instance=~\"$instance\"} - eth_exe_sync_current_block{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Sync Distance", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 37 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance)(rate(eth_exe_sync_percentage{instance=~\"$instance\"}[1m]))", + "interval": "1m", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Sync rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Gwei" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 10, + "y": 37 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_gas_price_gwei{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Suggested Gas Price", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 9, + "x": 15, + "y": 37 + }, + "id": 54, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(\n eth_exe_block_head_gas_used{instance=~\"$instance\"}\n)\n/\nsum by (instance)( \n eth_exe_block_head_gas_limit{instance=~\"$instance\"} \n)", + "interval": "20s", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Gas Used ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 43 + }, + "id": 49, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (instance)(\n increase(\n eth_exe_block_head_transactions_in_block{instance=~\"$instance\"}[1m]\n )\n)", + "interval": "1m", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Transactions per block", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 5, + "y": 43 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "eth_exe_block_head_block_size_bytes{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Block size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 10, + "y": 43 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "eth_exe_txpool_transactions{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ status }}", + "range": true, + "refId": "A" + } + ], + "title": "Transaction Pool", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 15, + "y": 43 + }, + "id": 53, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_block_head_gas_used{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Gas used", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 1, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 43 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "eth_exe_block_head_gas_limit{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Gas limit", + "type": "timeseries" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 49 + }, + "id": 42, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 50 + }, + "id": 44, + "options": { + "footer": { + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": false + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum by (spec)(\n label_replace(\n {__name__=~\"eth_con_spec.*\", instance=~\"$instance\"},\n \"spec\",\n \"$1\",\n \"__name__\",\n \"eth_con_spec_(.*)\"\n )\n)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "{{ spec }}", + "refId": "A" + } + ], + "title": "Consensus Spec", + "type": "table" + } + ], + "title": "Consensus Spec", + "type": "row" + } + ], + "refresh": "5s", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "metrics-exporter:9091", + "value": "metrics-exporter:9091" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(eth_con_beacon_slot, instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(eth_con_beacon_slot, instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Ethereum Metrics Exporter (Single)", + "uid": "M2TT9Su7z", + "version": 1, + "weekStart": "" + } \ No newline at end of file diff --git a/etc/lighthouse.yml b/etc/lighthouse.yml new file mode 100644 index 000000000000..339ac173f2d6 --- /dev/null +++ b/etc/lighthouse.yml @@ -0,0 +1,44 @@ +version: '3.9' +name: reth + +services: + + lighthouse: + restart: unless-stopped + image: sigp/lighthouse + depends_on: + - reth + ports: + - '5052:5052/tcp' # rpc + - '5053:5053/tcp' + - '5054:5054/tcp' # metrics + - '9000:9000/tcp' # p2p + - '9000:9000/udp' # p2p + volumes: + - lighthousedata:/root/.lighthouse + - ./jwttoken:/root/jwt:ro + command: > + lighthouse bn + --http --http-address 0.0.0.0 + --execution-endpoint http://reth:8551 + --metrics --metrics-address 0.0.0.0 + --execution-jwt /root/jwt/jwt.hex + --checkpoint-sync-url https://mainnet.checkpoint.sigp.io + + metrics-exporter: + restart: unless-stopped + image: ethpandaops/ethereum-metrics-exporter:debian-latest + depends_on: + - reth + - lighthouse + ports: + - 9091:9091 # metrics + volumes: + - ./ethereum-metrics-exporter/config.yaml:/root/config.yaml + command: + - --config=/root/config.yaml + - --metrics-port=9091 + +volumes: + lighthousedata: + driver: local \ No newline at end of file diff --git a/etc/prometheus/prometheus.yml b/etc/prometheus/prometheus.yml index 8c578af3ad7f..e2a690b71d38 100644 --- a/etc/prometheus/prometheus.yml +++ b/etc/prometheus/prometheus.yml @@ -3,4 +3,9 @@ scrape_configs: metrics_path: "/" scrape_interval: 5s static_configs: - - targets: ['reth:9001'] \ No newline at end of file + - targets: ['reth:9001'] + - job_name: ethereum-metrics-exporter + metrics_path: "/metrics" + scrape_interval: 5s + static_configs: + - targets: ['metrics-exporter:9091'] \ No newline at end of file From 9309279a2a08349e1a012c7d266b944194e61606 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Vincent?= <28714795+leovct@users.noreply.github.com> Date: Wed, 5 Jul 2023 17:03:30 +0200 Subject: [PATCH 072/722] doc: book cli updater (#3576) --- .github/workflows/book.yml | 21 +- Makefile | 6 + book/cli/cli.md | 12 +- book/cli/config.json | 33 ++ book/cli/config.md | 13 +- book/cli/db.md | 408 +++++++++++++++++--- book/cli/debug.md | 261 ++++++++----- book/cli/import.md | 40 +- book/cli/init.md | 27 +- book/cli/node.md | 188 +++++---- book/cli/p2p.md | 117 ++++-- book/cli/stage.md | 768 ++++++++++++++++++++++++++++--------- book/cli/test-vectors.md | 91 ++++- book/cli/update.sh | 128 +++++++ 14 files changed, 1662 insertions(+), 451 deletions(-) create mode 100644 book/cli/config.json create mode 100755 book/cli/update.sh diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 684acb8e966c..3e29f13a5a01 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -105,11 +105,30 @@ jobs: retention-days: 1 if-no-files-found: error + up-to-date: + runs-on: ubuntu-latest + name: up-to-date + + steps: + - uses: actions/checkout@v3 + + - name: Try to update the book cli documentation + run: make update-book-cli BUILD_PATH=reth/target + + - name: Check if the book cli documentation is up to date + run: | + if [[ -n $(git status --porcelain) ]]; then + echo "Error: Documentation is not up to date. Please run \`make update-book-cli\`." + exit 1 + else + echo "The documentation is up to date." + fi + deploy: # Only deploy if a push to main if: github.ref_name == 'main' && github.event_name == 'push' runs-on: ubuntu-latest - needs: [test, lint, build] + needs: [test, lint, build, up-to-date] # Grant GITHUB_TOKEN the permissions required to make a Pages deployment permissions: diff --git a/Makefile b/Makefile index d49d427b9e87..68b8fd3ba6fd 100644 --- a/Makefile +++ b/Makefile @@ -196,3 +196,9 @@ db-tools: ## Compile MDBX debugging tools. @$(MAKE) -C $(MDBX_PATH) IOARENA=1 clean > /dev/null @echo "Run \"$(DB_TOOLS_DIR)/mdbx_stat\" for the info about MDBX db file." @echo "Run \"$(DB_TOOLS_DIR)/mdbx_chk\" for the MDBX db file integrity check." + +.PHONY: update-book-cli +update-book-cli: ## Update book cli documentation. + cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" + @echo "Updating book cli doc..." + @./book/cli/update.sh $(BUILD_PATH) diff --git a/book/cli/cli.md b/book/cli/cli.md index 300b5361a6ea..22bd8e3096e5 100644 --- a/book/cli/cli.md +++ b/book/cli/cli.md @@ -39,7 +39,7 @@ Commands: db Database debugging utilities stage - Manipulate individual stages. + Manipulate individual stages p2p P2P Debugging utilities test-vectors @@ -64,21 +64,21 @@ Logging: --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/config.json b/book/cli/config.json new file mode 100644 index 000000000000..355e5c37dae6 --- /dev/null +++ b/book/cli/config.json @@ -0,0 +1,33 @@ +{ + "commands": { + "config": [], + "db": { + "stats": [], + "list": [], + "get": [], + "drop": [], + "version": [], + "path": [] + }, + "debug": { + "execution": [], + "merkle": [] + }, + "import": [], + "init": [], + "node": [], + "p2p": { + "header": [], + "body": [] + }, + "stage": { + "run": [], + "drop": [], + "dump": ["execution", "storage-hashing", "account-hashing", "merkle"], + "unwind": ["to-block", "num-blocks"] + }, + "test-vectors": { + "tables": [] + } + } +} diff --git a/book/cli/config.md b/book/cli/config.md index 39cb09462860..9f0d2f6578bc 100644 --- a/book/cli/config.md +++ b/book/cli/config.md @@ -14,27 +14,30 @@ Options: --default Show the default config + -h, --help + Print help (see a summary with '-h') + Logging: --log.persistent The flag to enable persistent logs --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/db.md b/book/cli/db.md index 370fbdb0aad6..c06640a9b7ac 100644 --- a/book/cli/db.md +++ b/book/cli/db.md @@ -26,59 +26,210 @@ Commands: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] -h, --help Print help (see a summary with '-h') +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + Display: -v, --verbosity... Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth db drop` + +Deletes all database entries + +```bash +$ reth db drop --help + +Usage: reth db drop [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + -v Errors -vv Warnings -vvv Info -vvvv Debug -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` -## `reth db stats` +## `reth db get` + +Gets the content of a table for the given key ```bash -$ reth db stats --help -Lists all the tables, their entry count and their size +$ reth db get --help -Usage: reth db stats [OPTIONS] +Usage: reth db get [OPTIONS] + +Arguments: +
+ The table name + + NOTE: The dupsort tables are not supported now. + + + The key to get content for Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + -h, --help Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` ## `reth db list` +Lists the contents of a table + ```bash $ reth db list --help -Lists the contents of a table Usage: reth db list [OPTIONS]
@@ -87,55 +238,87 @@ Arguments: The table name Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + -s, --skip Skip first N entries - + [default: 0] - -r, --reverse - Reverse the order of the entries. If enabled last table entries are read. + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] - [default: false] + -r, --reverse + Reverse the order of the entries. If enabled last table entries are read -l, --len How many items to take from the walker - + [default: 5] -j, --json - Dump as JSON instead of using TUI. + Dump as JSON instead of using TUI -h, --help Print help (see a summary with '-h') -``` -## `reth db get` +Logging: + --log.persistent + The flag to enable persistent logs -```bash -$ reth db get --help -Gets the content of a table for the given key + --log.directory + The path to put log files in + + [default: /reth/logs] -Usage: reth db get [OPTIONS]
+ --log.journald + Log events to journald -Arguments: -
- The table name + --log.filter + The filter to use for logs written to the log file + + [default: error] -Options: - --key - The key to get content for +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) - -h, --help - Print help (see a summary with '-h') + -q, --quiet + Silence all log output ``` -## `reth db version` +## `reth db path` + +Returns the full database path ```bash -$ reth db version --help -Lists current and local database versions +$ reth db path --help -Usage: reth db version [OPTIONS] +Usage: reth db path [OPTIONS] Options: --datadir @@ -163,43 +346,168 @@ Options: -h, --help Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` -## `reth db drop` +## `reth db stats` + +Lists all the tables, their entry count and their size ```bash -$ reth db drop --help -Deletes all database entries +$ reth db stats --help -Usage: reth db drop [OPTIONS] +Usage: reth db stats [OPTIONS] Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + -h, --help Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` ## `reth db version` +Lists current and local database versions + ```bash $ reth db version --help -Lists current and local database versions Usage: reth db version [OPTIONS] Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + -h, --help Print help (see a summary with '-h') -``` -## `reth db path` +Logging: + --log.persistent + The flag to enable persistent logs -```bash -$ reth db path --help -Returns the full database path + --log.directory + The path to put log files in + + [default: /reth/logs] -Usage: reth db path [OPTIONS] + --log.journald + Log events to journald -Options: - -h, --help - Print help (see a summary with '-h') -``` \ No newline at end of file + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` diff --git a/book/cli/debug.md b/book/cli/debug.md index fe152136b795..586a72649770 100644 --- a/book/cli/debug.md +++ b/book/cli/debug.md @@ -1,40 +1,45 @@ # `reth debug` +Various debug routines + ```bash $ reth debug --help -Various debug routines -Usage: reth debug +Usage: reth debug [OPTIONS] Commands: execution - Debug the roundtrip execution of blocks as well as the generated data. + Debug the roundtrip execution of blocks as well as the generated data merkle - Debug the clean & incremental state root calculations. + Debug the clean & incremental state root calculations help Print this message or the help of the given subcommand(s) +Options: + -h, --help + Print help (see a summary with '-h') + Logging: --log.persistent The flag to enable persistent logs --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -47,127 +52,203 @@ Display: ## `reth debug execution` +Debug the roundtrip execution of blocks as well as the generated data + ```bash $ reth debug execution --help -Debug the roundtrip execution of blocks as well as the generated data. -Usage: reth debug execution [OPTIONS] +Usage: reth debug execution [OPTIONS] --to Options: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - --chain - The chain this node is running. + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - - mainnet - - goerli - - sepolia +Networking: + -d, --disable-discovery + Disable the discovery service - [default: mainnet] + --disable-dns-discovery + Disable the DNS discovery - --debug.tip - Set the chain tip manually for testing purposes. + --disable-discv4-discovery + Disable Discv4 discovery - --to - The maximum block height. + --discovery.port + The UDP port to use for P2P discovery/networking. default: 30303 - --interval - The block interval for sync and unwind. + --trusted-peers + Target trusted peer enodes --trusted-peers enode://abcd@192.168.0.1:30303 - [default: 1000] + --trusted-only + Connect only to trusted peers -Networking: - -d, --disable-discovery - Disable the discovery service + --bootnodes + Bootnodes to connect to initially. + + Will fall back to a network-specific default if not specified. - --disable-dns-discovery - Disable the DNS discovery + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - --disable-discv4-discovery - Disable Discv4 discovery + --identity + Custom node identity + + [default: reth/v0.1.0-alpha.1/aarch64-apple-darwin] - --discovery.port - The UDP port to use for P2P discovery/networking. default: 30303 + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - --trusted-peers - Target trusted peer enodes --trusted-peers enode://abcd@192.168.0.1:30303 + --no-persist-peers + Do not persist peers. - --trusted-only - Connect only to trusted peers + --nat + NAT resolution method (any|none|upnp|publicip|extip:) + + [default: any] - --bootnodes - Bootnodes to connect to initially. + --port + Network listening port. default: 30303 - Will fall back to a network-specific default if not specified. + --to + The maximum block height - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + --interval + The block interval for sync and unwind. Defaults to `1000` + + [default: 1000] - --identity - Custom node identity +Debug: + --debug.tip + Set the chain tip manually for testing purposes. + + NOTE: This is a temporary flag - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. +Logging: + --log.persistent + The flag to enable persistent logs - --no-persist-peers - Do not persist peers. + --log.directory + The path to put log files in + + [default: /reth/logs] - --nat - NAT resolution method + --log.journald + Log events to journald - [default: any] + --log.filter + The filter to use for logs written to the log file + + [default: error] - --port - Network listening port. default: 30303 +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + -q, --quiet + Silence all log output ``` ## `reth debug merkle` +Debug the clean & incremental state root calculations + ```bash $ reth debug merkle --help -Debug the clean & incremental state root calculations. -Usage: reth debug merkle [OPTIONS] +Usage: reth debug merkle [OPTIONS] --to Options: - --datadir - The path to the data dir for all reth files and subdirectories. + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + --to + The height to finish at + + --skip-node-depth + The depth after which we should start comparing branch nodes + + -h, --help + Print help (see a summary with '-h') - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - --chain - The chain this node is running. +Logging: + --log.persistent + The flag to enable persistent logs - Possible values are either a built-in chain or the path to a chain specification file. + --log.directory + The path to put log files in + + [default: /reth/logs] - Built-in chains: - - mainnet - - goerli - - sepolia + --log.journald + Log events to journald - [default: mainnet] + --log.filter + The filter to use for logs written to the log file + + [default: error] - --to - The height to finish at +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) - --skip-node-depth - The depth after which we should start comparing branch nodes + -q, --quiet + Silence all log output ``` diff --git a/book/cli/import.md b/book/cli/import.md index 55e9012a8117..ec79c91a635c 100644 --- a/book/cli/import.md +++ b/book/cli/import.md @@ -1,9 +1,18 @@ # `reth import` +This syncs RLP encoded blocks from a file + ```bash $ reth import --help -Usage: reth import [OPTIONS] +Usage: reth import [OPTIONS] + +Arguments: + + The path to a block file for import. + + The online stages (headers and bodies) are replaced by a file import, after which the + remaining stages are executed. Options: --config @@ -11,30 +20,29 @@ Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] - --path - The path to a block file for import. - - The online stages (headers and bodies) are replaced by a file import, after which the - remaining stages are executed. + -h, --help + Print help (see a summary with '-h') Logging: --log.persistent @@ -42,21 +50,21 @@ Logging: --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/init.md b/book/cli/init.md index e4684ca565b4..5517043fb6ca 100644 --- a/book/cli/init.md +++ b/book/cli/init.md @@ -1,5 +1,7 @@ # `reth init` +Initialize the database from a genesis file + ```bash $ reth init --help @@ -8,46 +10,51 @@ Usage: reth init [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] + -h, --help + Print help (see a summary with '-h') + Logging: --log.persistent The flag to enable persistent logs --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/node.md b/book/cli/node.md index a6b53dcf05f8..64b62d480a73 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -1,24 +1,22 @@ # `reth node` -The main node operator command. +Start the node ```bash $ reth node --help -Start the node - Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -26,26 +24,23 @@ Options: --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] - --auto-mine - Automatically mine blocks for new transactions - -h, --help Print help (see a summary with '-h') Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -69,7 +64,7 @@ Networking: --bootnodes Bootnodes to connect to initially. - + Will fall back to a network-specific default if not specified. --peers-file @@ -78,24 +73,26 @@ Networking: --identity Custom node identity + + [default: reth/v0.1.0-alpha.1/aarch64-apple-darwin] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers Do not persist peers. --nat - NAT resolution method - + NAT resolution method (any|none|upnp|publicip|extip:) + [default: any] --port Network listening port. default: 30303 -Rpc: +RPC: --http Enable the HTTP-RPC server @@ -106,7 +103,9 @@ Rpc: Http server port to listen on --http.api - Rpc Modules to be configured for http server + Rpc Modules to be configured for the HTTP server + + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc] --http.corsdomain Http Corsdomain to allow request from @@ -124,7 +123,9 @@ Rpc: Origins from which to accept WebSocket requests --ws.api - Rpc Modules to be configured for Ws server + Rpc Modules to be configured for the WS server + + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc] --ipcdisable Disable the IPC-RPC server @@ -141,53 +142,102 @@ Rpc: --authrpc.jwtsecret Path to a JWT secret to use for authenticated RPC endpoints - --rpc-max-request-size - Set the maximum RPC request payload size for both HTTP and WS in megabytes. - - --rpc-max-response-size - Set the maximum RPC response payload size for both HTTP and WS in megabytes. - - --rpc-max-subscriptions-per-connection - Set the the maximum concurrent subscriptions per connection. - - --rpc-max-connections - Maximum number of RPC server connections. - - --rpc-max-tracing-requests - Maximum number of concurrent tracing requests. - - --gas-price-oracle - Gas price oracle configuration. - - --block-cache-len - Maximum number of block cache entries. - - --receipt-cache-len - Maximum number of receipt cache entries. - - --env-cache-len - Maximum number of env cache entries. + --rpc-max-request-size + Set the maximum RPC request payload size for both HTTP and WS in megabytes + + [default: 15] + + --rpc-max-response-size + Set the maximum RPC response payload size for both HTTP and WS in megabytes + + [default: 100] + + --rpc-max-subscriptions-per-connection + Set the the maximum concurrent subscriptions per connection + + [default: 1024] + + --rpc-max-connections + Maximum number of RPC server connections + + [default: 100] + + --rpc-max-tracing-requests + Maximum number of concurrent tracing requests + + [default: 25] + + --rpc.gascap + Maximum gas limit for `eth_call` and call tracing RPC methods + + [default: 30000000] + +GAS PRICE ORACLE: + --gpo.blocks + Number of recent blocks to check for gas price + + [default: 20] + + --gpo.ignoreprice + Gas Price below which gpo will ignore transactions + + [default: 2] + + --gpo.maxprice + Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo + + [default: 500000000000] + + --gpo.percentile + The percentile of gas prices to use for the estimate + + [default: 60] + + --block-cache-size + Max size for cached block data in megabytes + + [default: 500] + + --receipt-cache-size + Max size for cached receipt data in megabytes + + [default: 500] + + --env-cache-size + Max size for cached evm env data in megabytes + + [default: 1] Builder: - --builder.extradata - Block extra data set by the payload builder. - - --builder.gaslimit - Target gas ceiling for built blocks. - - --builder.interval - The interval at which the job should build a new payload after the last (in seconds). - - --builder.deadline - The deadline for when the payload builder job should resolve. - - --builder.max-tasks - Maximum number of tasks to spawn for building a payload. + --builder.extradata + Block extra data set by the payload builder + + [default: reth/v0.1.0-alpha.1/macos] + + --builder.gaslimit + Target gas ceiling for built blocks + + [default: 30000000] + + --builder.interval + The interval at which the job should build a new payload after the last (in seconds) + + [default: 1] + + --builder.deadline + The deadline for when the payload builder job should resolve + + [default: 12] + + --builder.max-tasks + Maximum number of tasks to spawn for building a payload + + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -195,7 +245,7 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block @@ -213,27 +263,31 @@ Debug: --debug.hook-all Hook on every transaction in a block +Rpc: + --auto-mine + Automatically mine blocks for new transactions + Logging: --log.persistent The flag to enable persistent logs --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/p2p.md b/book/cli/p2p.md index 1796c858a1a0..ddaa94133d0c 100644 --- a/book/cli/p2p.md +++ b/book/cli/p2p.md @@ -1,8 +1,9 @@ # `reth p2p` +P2P Debugging utilities + ```bash $ reth p2p --help -P2P Debugging utilities Usage: reth p2p [OPTIONS] @@ -20,30 +21,30 @@ Options: --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --p2p-secret-key Secret key to use for this node. - + This also will deterministically set the peer ID. -d, --disable-discovery @@ -66,7 +67,7 @@ Options: --retries The number of retries per request - + [default: 5] --nat @@ -81,21 +82,69 @@ Logging: --log.directory The path to put log files in - - [default: /Users/georgios/Library/Caches/reth/logs] + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file - - [default: debug] + + [default: error] Display: -v, --verbosity... Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth p2p body` + +Download block body + +```bash +$ reth p2p body --help + +Usage: reth p2p body [OPTIONS] + +Arguments: + + The block number or hash + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + -v Errors -vv Warnings -vvv Info @@ -108,28 +157,48 @@ Display: ## `reth p2p header` +Download block header + ```bash $ reth p2p header --help -Download block header -Usage: reth p2p header +Usage: reth p2p header [OPTIONS] Arguments: The header number or hash -``` +Options: + -h, --help + Print help (see a summary with '-h') -## `reth p2p body` +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + --log.journald + Log events to journald -```bash -$ reth p2p body --help -Download block body + --log.filter + The filter to use for logs written to the log file + + [default: error] -Usage: reth p2p body +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) -Arguments: - - The block number or hash + -q, --quiet + Silence all log output ``` diff --git a/book/cli/stage.md b/book/cli/stage.md index a89eb7b60791..d23bc2bfa4fa 100644 --- a/book/cli/stage.md +++ b/book/cli/stage.md @@ -1,26 +1,177 @@ # `reth stage` +Manipulate individual stages + ```bash $ reth stage --help -Usage: reth db +Usage: reth stage [OPTIONS] Commands: run - Run a single stage. - - Note that this won't use the Pipeline and as a result runs stages - assuming that all the data can be held in memory. It is not recommended - to run a stage for really large block ranges if your computer does not have - a lot of memory to store all the data. + Run a single stage drop - Drop a stage's tables from the database. + Drop a stage's tables from the database dump - Dumps a stage from a range into a new database. + Dumps a stage from a range into a new database unwind - Unwinds a certain block range, deleting it from the database. + Unwinds a certain block range, deleting it from the database help - Print this message or the help of the given subcommand(s) + Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth stage drop` + +Drop a stage's tables from the database + +```bash +$ reth stage drop --help + +Usage: reth stage drop [OPTIONS] + +Arguments: + + [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth stage dump` + +Dumps a stage from a range into a new database + +```bash +$ reth stage dump --help + +Usage: reth stage dump [OPTIONS] + +Commands: + execution + Execution stage + storage-hashing + StorageHashing stage + account-hashing + AccountHashing stage + merkle + Merkle stage + help + Print this message or the help of the given subcommand(s) + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') Logging: --log.persistent @@ -28,21 +179,245 @@ Logging: --log.directory The path to put log files in + + [default: /reth/logs] - [default: /Users/georgios/Library/Caches/reth/logs] + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +### `reth stage dump execution` + +Execution stage + +```bash +$ reth stage dump execution --help + +Usage: reth stage dump execution [OPTIONS] --output-db --from --to + +Options: + --output-db + The path to the new database folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] --log.journald Log events to journald --log.filter The filter to use for logs written to the log file + + [default: error] - [default: debug] +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +### `reth stage dump storage-hashing` + +StorageHashing stage + +```bash +$ reth stage dump storage-hashing --help + +Usage: reth stage dump storage-hashing [OPTIONS] --output-db --from --to + +Options: + --output-db + The path to the new database folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] Display: -v, --verbosity... Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +### `reth stage dump account-hashing` + +AccountHashing stage + +```bash +$ reth stage dump account-hashing --help +Usage: reth stage dump account-hashing [OPTIONS] --output-db --from --to + +Options: + --output-db + The path to the new database folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +### `reth stage dump merkle` + +Merkle stage + +```bash +$ reth stage dump merkle --help + +Usage: reth stage dump merkle [OPTIONS] --output-db --from --to + +Options: + --output-db + The path to the new database folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + -v Errors -vv Warnings -vvv Info @@ -55,14 +430,20 @@ Display: ## `reth stage run` +Run a single stage. + ```bash -Usage: reth stage run [OPTIONS] --from --to --stage - Run a single stage. +$ reth stage run --help + +Note that this won't use the Pipeline and as a result runs stages assuming that all the data can be held in memory. It is not recommended to run a stage for really large block ranges if your computer does not have a lot of memory to store all the data. - Note that this won't use the Pipeline and as a result runs stages - assuming that all the data can be held in memory. It is not recommended - to run a stage for really large block ranges if your computer does not have - a lot of memory to store all the data. +Usage: reth stage run [OPTIONS] --from --to + +Arguments: + + The name of the stage to run + + [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] Options: --config @@ -70,56 +451,46 @@ Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. - --stage - The name of the stage to run - - [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] - --from The height to start at -t, --to The end of the stage - --batch-size + --batch-size Batch size for stage execution and unwind -s, --skip-unwind Normally, running the stage requires unwinding for stages that already have been run, in order to not rewrite to the same database slots. - + You can optionally skip the unwinding phase if you're syncing a block range that has not been synced before. - --commit - Commits the changes in the database. WARNING: potentially destructive. - - Useful when you want to run diagnostics on the database. - -h, --help Print help (see a summary with '-h') @@ -144,240 +515,283 @@ Networking: --bootnodes Bootnodes to connect to initially. - + Will fall back to a network-specific default if not specified. --peers-file The path to the known peers file. Connected peers are dumped to this file on nodes shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - --identity + --identity Custom node identity + + [default: reth/v0.1.0-alpha.1/aarch64-apple-darwin] - --p2p-secret-key + --p2p-secret-key Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the - data dir for the chain being used. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers Do not persist peers. --nat - NAT resolution method - + NAT resolution method (any|none|upnp|publicip|extip:) + [default: any] --port Network listening port. default: 30303 + + -c, --commit + Commits the changes in the database. WARNING: potentially destructive. + + Useful when you want to run diagnostics on the database. + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` -## `reth stage drop` +## `reth stage unwind` + +Unwinds a certain block range, deleting it from the database ```bash -Usage: reth stage drop [OPTIONS] - Drop a stage's tables from the database. +$ reth stage unwind --help -Arguments: - - The name of the stage to drop +Usage: reth stage unwind [OPTIONS] - [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] +Commands: + to-block + Unwinds the database until the given block number (range is inclusive) + num-blocks + Unwinds the given number of blocks from the database + help + Print this message or the help of the given subcommand(s) Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` -## `reth stage dump` +### `reth stage unwind to-block` + +Unwinds the database until the given block number (range is inclusive) ```bash -Usage: reth stage dump [OPTIONS] - Dumps a stage from a range into a new database. +$ reth stage unwind to-block --help -Commands: - execution - Execution stage. - storage-hashing - StorageHashing stage. - account-hashing - AccountHashing stage. - merkle - Merkle stage. - help - Print this message or the help of the given subcommand(s) +Usage: reth stage unwind to-block [OPTIONS] + +Arguments: + + Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] -``` - -### `reth stage dump execution` -```bash -Usage: reth stage dump execution [OPTIONS] - Execution stage. - -Options: - --output_db - The path to the new database folder. - - -f, --from - From which block. - - -t, --to - To which block. - - -d, --dry-run - If passed, it will dry-run a stage execution from the newly created database right after - dumping. - - [default: false] -``` - -### `reth stage dump storage-hashing` + -h, --help + Print help (see a summary with '-h') -```bash -Usage: reth stage dump storage-hashing [OPTIONS] - StorageHashing stage. +Logging: + --log.persistent + The flag to enable persistent logs -Options: - --output_db - The path to the new database folder. + --log.directory + The path to put log files in + + [default: /reth/logs] - -f, --from - From which block. + --log.journald + Log events to journald - -t, --to - To which block. + --log.filter + The filter to use for logs written to the log file + + [default: error] - -d, --dry-run - If passed, it will dry-run a stage execution from the newly created database right after - dumping. +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) - [default: false] + -q, --quiet + Silence all log output ``` -### `reth stage dump account-hashing` - -```bash -Usage: reth stage dump account-hashing [OPTIONS] - AccountHashing stage. - -Options: - --output_db - The path to the new database folder. - - -f, --from - From which block. +### `reth stage unwind num-blocks` - -t, --to - To which block. - - -d, --dry-run - If passed, it will dry-run a stage execution from the newly created database right after - dumping. - - [default: false] -``` - -### `reth stage dump merkle` +Unwinds the given number of blocks from the database ```bash -Usage: reth stage dump merkle [OPTIONS] - Merkle stage. - -Options: - --output_db - The path to the new database folder. - - -f, --from - From which block. +$ reth stage unwind num-blocks --help - -t, --to - To which block. - -d, --dry-run - If passed, it will dry-run a stage execution from the newly created database right after - dumping. +Usage: reth stage unwind num-blocks [OPTIONS] - [default: false] -``` - -## `reth stage unwind` - -```bash -Usage: reth stage unwind [OPTIONS] - Unwinds a certain block range, deleting it from the database. - -Commands: - to-block - Unwinds the database until the given block number (range is inclusive). - num-blocks - Unwinds the given number of blocks from the database. - help - Print this message or the help of the given subcommand(s) +Arguments: + + Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. - + Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: - mainnet - goerli - sepolia - + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` diff --git a/book/cli/test-vectors.md b/book/cli/test-vectors.md index 4e7f9c8e75eb..68ada830f32f 100644 --- a/book/cli/test-vectors.md +++ b/book/cli/test-vectors.md @@ -1,16 +1,97 @@ # `reth test-vectors` +Generate Test Vectors + ```bash $ reth test-vectors --help -Generate Test Vectors -Usage: reth test-vectors +Usage: reth test-vectors [OPTIONS] Commands: tables - Generates test vectors for specified tables. If no table is specified, generate for all. + Generates test vectors for specified tables. If no table is specified, generate for all + help + Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth test-vectors tables` + +Generates test vectors for specified tables. If no table is specified, generate for all + +```bash +$ reth test-vectors tables --help + +Usage: reth test-vectors tables [OPTIONS] [NAMES]... Arguments: - - List of table names. Case-sensitive. + [NAMES]... + List of table names. Case-sensitive + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output ``` diff --git a/book/cli/update.sh b/book/cli/update.sh new file mode 100755 index 000000000000..ca7722e3f337 --- /dev/null +++ b/book/cli/update.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +# Define the build path. +build_path=$1 +if [ -z "$build_path" ]; then + echo "Build path variable is not defined. Exiting..." + exit 1 +fi +reth_path=./$build_path/debug/reth +echo "Using reth path: $reth_path (build path: $build_path)" + +# Define the path to the JSON configuration file. +json_file="./book/cli/config.json" +echo "Using config file: $json_file" + +# Read commands from JSON configuration file. +read_cmds_from_json() { + local json_file="$1" + jq -r '.commands | keys[]' "$json_file" +} + +# Read subcommands for a given command from JSON configuration file. +read_subcmds_from_json() { + local json_file="$1" + local cmd="$2" + jq -r ".commands[\"$cmd\"] | if type == \"object\" then keys[] else .[] end" "$json_file" +} + +# Read subsubcommands for a given command and subcommand from JSON configuration file. +read_subsubcmds_from_json() { + local json_file="$1" + local cmd="$2" + local subcmd="$3" + jq -r ".commands[\"$cmd\"][\"$subcmd\"][]" "$json_file" +} + +# Update the main documentation. +update_main_doc() { + local file_path="./book/cli/cli.md" + local cmd_help_output=$($reth_path --help) + sed -i -e '/## Commands/,$d' "$file_path" + cat >> "$file_path" << EOF +## Commands + +\`\`\`bash +$ reth --help +$cmd_help_output +\`\`\` +EOF +} + +# Update any `reth` command documentation. +update_cli_cmd() { + local cmd="$1" + local subcmds=("${@:2}") + echo "reth $cmd" + + local cmd_help_output=$($reth_path "$cmd" --help) + local description=$(echo "$cmd_help_output" | head -n 1) + cat > "./book/cli/$cmd.md" << EOF +# \`reth $cmd\` + +$(if [[ -n "$description" ]]; then echo "$description"; fi) + +\`\`\`bash +$ reth $cmd --help +$(echo "$cmd_help_output" | sed '1d') +\`\`\` +EOF + + for subcmd in "${subcmds[@]}"; do + echo " ├── $subcmd" + + local subcmd_help_output=$($reth_path "$cmd" "$subcmd" --help) + local subcmd_description=$(echo "$subcmd_help_output" | head -n 1) + cat >> "book/cli/$cmd.md" << EOF + +## \`reth $cmd $subcmd\` + +$(if [[ -n "$subcmd_description" ]]; then echo "$subcmd_description"; fi) + +\`\`\`bash +$ reth $cmd $subcmd --help +$(echo "$subcmd_help_output" | sed '1d') +\`\`\` +EOF + + # Read subsubcommands and update documentation + subsubcmds=($(read_subsubcmds_from_json "$json_file" "$cmd" "$subcmd")) + for subsubcmd in "${subsubcmds[@]}"; do + echo " ├── $subsubcmd" + + local subsubcmd_help_output=$($reth_path "$cmd" "$subcmd" "$subsubcmd" --help) + local subsubcmd_description=$(echo "$subsubcmd_help_output" | head -n 1) + cat >> "book/cli/$cmd.md" << EOF + +### \`reth $cmd $subcmd $subsubcmd\` + +$(if [[ -n "$subsubcmd_description" ]]; then echo "$subsubcmd_description"; fi) + +\`\`\`bash +$ reth $cmd $subcmd $subsubcmd --help +$(echo "$subsubcmd_help_output" | sed '1d') +\`\`\` +EOF + done + done +} + +# Update the book CLI documentation. +main() { + update_main_doc + + # Update commands doc. + cmds=($(read_cmds_from_json "$json_file")) + for cmd in "${cmds[@]}"; do + subcmds=($(read_subcmds_from_json "$json_file" "$cmd")) + update_cli_cmd "$cmd" "${subcmds[@]}" + done + + # Update default paths on both Linux and macOS to avoid triggering the CI. + sed -i -e 's/default: \/.*\/reth\//default: \/reth\//g' ./book/cli/*.md + rm ./book/cli/*.md-e + + echo "Book updated successfully." +} + +main From d120effa5d6e2043b8df99a4930722e0ab4136f0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 5 Jul 2023 18:52:59 +0300 Subject: [PATCH 073/722] perf(trie): post state cursors (#3588) --- crates/storage/provider/src/post_state/mod.rs | 34 +- crates/trie/src/hashed_cursor/post_state.rs | 611 +++++++++++------- 2 files changed, 401 insertions(+), 244 deletions(-) diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index ba0ba30f3d22..6fb6b02989f4 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -193,24 +193,34 @@ impl PostState { /// /// The hashed post state. pub fn hash_state_slow(&self) -> HashedPostState { - let mut accounts = BTreeMap::default(); + let mut hashed_post_state = HashedPostState::default(); + + // Insert accounts with hashed keys from account changes. for (address, account) in self.accounts() { - accounts.insert(keccak256(address), *account); + let hashed_address = keccak256(address); + if let Some(account) = account { + hashed_post_state.insert_account(hashed_address, *account); + } else { + hashed_post_state.insert_cleared_account(hashed_address); + } } - let mut storages = BTreeMap::default(); + // Insert accounts and storages with hashed keys from storage changes. for (address, storage) in self.storage() { - let mut hashed_storage = BTreeMap::default(); + let mut hashed_storage = HashedStorage::new(storage.wiped()); for (slot, value) in &storage.storage { - hashed_storage.insert(keccak256(H256(slot.to_be_bytes())), *value); + let hashed_slot = keccak256(H256(slot.to_be_bytes())); + if *value == U256::ZERO { + hashed_storage.insert_zero_valued_slot(hashed_slot); + } else { + hashed_storage.insert_non_zero_valued_storage(hashed_slot, *value); + } } - storages.insert( - keccak256(address), - HashedStorage { wiped: storage.wiped(), storage: hashed_storage }, - ); + + hashed_post_state.insert_hashed_storage(keccak256(address), hashed_storage); } - HashedPostState { accounts, storages } + hashed_post_state } /// Calculate the state root for this [PostState]. @@ -248,7 +258,7 @@ impl PostState { &self, tx: &'a TX, ) -> Result { - let hashed_post_state = self.hash_state_slow(); + let hashed_post_state = self.hash_state_slow().sorted(); let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_post_state); StateRoot::new(tx) @@ -1832,7 +1842,7 @@ mod tests { .map(|key| { let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; let storage = - (0..10).map(|key| (H256::from_low_u64_be(key), U256::from(key))).collect(); + (1..11).map(|key| (H256::from_low_u64_be(key), U256::from(key))).collect(); (Address::from_low_u64_be(key), (account, storage)) }) .collect(); diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 474fe0335247..662a71163aec 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -6,27 +6,113 @@ use reth_db::{ transaction::{DbTx, DbTxGAT}, }; use reth_primitives::{trie::Nibbles, Account, StorageEntry, H256, U256}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{HashMap, HashSet}; /// The post state account storage with hashed slots. -#[derive(Debug, Default, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HashedStorage { + /// Hashed storage slots with non-zero. + non_zero_valued_storage: Vec<(H256, U256)>, + /// Slots that have been zero valued. + zero_valued_slots: HashSet, /// Whether the storage was wiped or not. - pub wiped: bool, - /// Hashed storage slots. - pub storage: BTreeMap, + wiped: bool, + /// Whether the storage entries were sorted or not. + sorted: bool, +} + +impl HashedStorage { + /// Create new instance of [HashedStorage]. + pub fn new(wiped: bool) -> Self { + Self { + non_zero_valued_storage: Vec::new(), + zero_valued_slots: HashSet::new(), + wiped, + sorted: true, // empty is sorted + } + } + + /// Sorts the non zero value storage entries. + pub fn sort_storage(&mut self) { + if !self.sorted { + self.non_zero_valued_storage.sort_unstable_by_key(|(slot, _)| *slot); + self.sorted = true; + } + } + + /// Insert non zero-valued storage entry. + pub fn insert_non_zero_valued_storage(&mut self, slot: H256, value: U256) { + debug_assert!(value != U256::ZERO, "value cannot be zero"); + self.non_zero_valued_storage.push((slot, value)); + self.sorted = false; + } + + /// Insert zero-valued storage slot. + pub fn insert_zero_valued_slot(&mut self, slot: H256) { + self.zero_valued_slots.insert(slot); + } } /// The post state with hashed addresses as keys. -#[derive(Debug, Default, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HashedPostState { /// Map of hashed addresses to account info. - pub accounts: BTreeMap>, + accounts: Vec<(H256, Account)>, + /// Set of cleared accounts. + cleared_accounts: HashSet, /// Map of hashed addresses to hashed storage. - pub storages: BTreeMap, + storages: HashMap, + /// Whether the account and storage entries were sorted or not. + sorted: bool, +} + +impl Default for HashedPostState { + fn default() -> Self { + Self { + accounts: Vec::new(), + cleared_accounts: HashSet::new(), + storages: HashMap::new(), + sorted: true, // empty is sorted + } + } } impl HashedPostState { + /// Sort and return self. + pub fn sorted(mut self) -> Self { + self.sort(); + self + } + + /// Sort account and storage entries. + pub fn sort(&mut self) { + if !self.sorted { + for (_, storage) in self.storages.iter_mut() { + storage.sort_storage(); + } + + self.accounts.sort_unstable_by_key(|(address, _)| *address); + self.sorted = true; + } + } + + /// Insert non-empty account info. + pub fn insert_account(&mut self, hashed_address: H256, account: Account) { + self.accounts.push((hashed_address, account)); + self.sorted = false; + } + + /// Insert cleared hashed account key. + pub fn insert_cleared_account(&mut self, hashed_address: H256) { + self.cleared_accounts.insert(hashed_address); + } + + /// Insert hashed storage entry. + pub fn insert_hashed_storage(&mut self, hashed_address: H256, hashed_storage: HashedStorage) { + self.sorted &= hashed_storage.sorted; + self.storages.insert(hashed_address, hashed_storage); + } + /// Construct (PrefixSet)[PrefixSet] from hashed post state. /// The prefix sets contain the hashed account and storage keys that have been changed in the /// post state. @@ -35,17 +121,24 @@ impl HashedPostState { let mut account_prefix_set = PrefixSetMut::default(); let mut storage_prefix_set: HashMap = HashMap::default(); - for hashed_address in self.accounts.keys() { + // Populate account prefix set. + for (hashed_address, _) in &self.accounts { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + } + for hashed_address in &self.cleared_accounts { account_prefix_set.insert(Nibbles::unpack(hashed_address)); } + // Populate storage prefix sets. for (hashed_address, hashed_storage) in self.storages.iter() { account_prefix_set.insert(Nibbles::unpack(hashed_address)); - for hashed_slot in hashed_storage.storage.keys() { - storage_prefix_set - .entry(*hashed_address) - .or_default() - .insert(Nibbles::unpack(hashed_slot)); + + let storage_prefix_set_entry = storage_prefix_set.entry(*hashed_address).or_default(); + for (hashed_slot, _) in &hashed_storage.non_zero_valued_storage { + storage_prefix_set_entry.insert(Nibbles::unpack(hashed_slot)); + } + for hashed_slot in &hashed_storage.zero_valued_slots { + storage_prefix_set_entry.insert(Nibbles::unpack(hashed_slot)); } } @@ -74,22 +167,17 @@ impl<'a, 'b, 'tx, TX: DbTx<'tx>> HashedCursorFactory<'a> where 'a: 'b, { - type AccountCursor = HashedPostStateAccountCursor<'b, >::Cursor> where Self: 'a ; + type AccountCursor = HashedPostStateAccountCursor<'b, >::Cursor> where Self: 'a; type StorageCursor = HashedPostStateStorageCursor<'b, >::DupCursor> where Self: 'a; fn hashed_account_cursor(&'a self) -> Result { let cursor = self.tx.cursor_read::()?; - Ok(HashedPostStateAccountCursor { post_state: self.post_state, cursor, last_account: None }) + Ok(HashedPostStateAccountCursor::new(cursor, self.post_state)) } fn hashed_storage_cursor(&'a self) -> Result { let cursor = self.tx.cursor_dup_read::()?; - Ok(HashedPostStateStorageCursor { - post_state: self.post_state, - cursor, - account: None, - last_slot: None, - }) + Ok(HashedPostStateStorageCursor::new(cursor, self.post_state)) } } @@ -101,22 +189,26 @@ pub struct HashedPostStateAccountCursor<'b, C> { cursor: C, /// The reference to the in-memory [HashedPostState]. post_state: &'b HashedPostState, + /// The post state account index where the cursor is currently at. + post_state_account_index: usize, /// The last hashed account key that was returned by the cursor. /// De facto, this is a current cursor position. last_account: Option, } -impl<'b, 'tx, C> HashedPostStateAccountCursor<'b, C> -where - C: DbCursorRO<'tx, tables::HashedAccount>, -{ +impl<'b, C> HashedPostStateAccountCursor<'b, C> { + /// Create new instance of [HashedPostStateAccountCursor]. + pub fn new(cursor: C, post_state: &'b HashedPostState) -> Self { + Self { cursor, post_state, last_account: None, post_state_account_index: 0 } + } + /// Returns `true` if the account has been destroyed. /// This check is used for evicting account keys from the state trie. /// /// This function only checks the post state, not the database, because the latter does not /// store destroyed accounts. fn is_account_cleared(&self, account: &H256) -> bool { - matches!(self.post_state.accounts.get(account), Some(None)) + self.post_state.cleared_accounts.contains(account) } /// Return the account with the lowest hashed account key. @@ -124,30 +216,28 @@ where /// Given the next post state and database entries, return the smallest of the two. /// If the account keys are the same, the post state entry is given precedence. fn next_account( - &self, - post_state_item: Option<(H256, Account)>, + post_state_item: Option<&(H256, Account)>, db_item: Option<(H256, Account)>, - ) -> Result, reth_db::DatabaseError> { - let result = match (post_state_item, db_item) { + ) -> Option<(H256, Account)> { + match (post_state_item, db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal (Some((post_state_address, post_state_account)), Some((db_address, db_account))) => { - if post_state_address <= db_address { - Some((post_state_address, post_state_account)) + if post_state_address <= &db_address { + Some((*post_state_address, *post_state_account)) } else { Some((db_address, db_account)) } } // If the database is empty, return the post state entry (Some((post_state_address, post_state_account)), None) => { - Some((post_state_address, post_state_account)) + Some((*post_state_address, *post_state_account)) } // If the post state is empty, return the database entry (None, Some((db_address, db_account))) => Some((db_address, db_account)), // If both are empty, return None (None, None) => None, - }; - Ok(result) + } } } @@ -164,35 +254,45 @@ where /// The returned account key is memoized and the cursor remains positioned at that key until /// [HashedAccountCursor::seek] or [HashedAccountCursor::next] are called. fn seek(&mut self, key: H256) -> Result, reth_db::DatabaseError> { + debug_assert!(self.post_state.sorted, "`HashedPostState` must be pre-sorted"); + self.last_account = None; - // Attempt to find the account in poststate. - let post_state_item = self - .post_state - .accounts - .iter() - .find_map(|(k, v)| v.filter(|_| k >= &key).map(|v| (*k, v))); - if let Some((address, account)) = post_state_item { - // It's an exact match, return the account from post state without looking up in the - // database. - if address == key { - self.last_account = Some(address); - return Ok(Some((address, account))) + // Take the next account from the post state with the key greater than or equal to the + // sought key. + let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); + while let Some((k, _)) = post_state_entry { + if k >= &key { + // Found the next entry that is equal or greater than the key. + break + } + + self.post_state_account_index += 1; + post_state_entry = self.post_state.accounts.get(self.post_state_account_index); + } + + // It's an exact match, return the account from post state without looking up in the + // database. + if let Some((address, account)) = post_state_entry { + if address == &key { + self.last_account = Some(*address); + return Ok(Some((*address, *account))) } } // It's not an exact match, reposition to the first greater or equal account that wasn't // cleared. - let mut db_item = self.cursor.seek(key)?; - while db_item + let mut db_entry = self.cursor.seek(key)?; + while db_entry .as_ref() .map(|(address, _)| self.is_account_cleared(address)) .unwrap_or_default() { - db_item = self.cursor.next()?; + db_entry = self.cursor.next()?; } - let result = self.next_account(post_state_item, db_item)?; + // Compare two entries and return the lowest. + let result = Self::next_account(post_state_entry, db_entry); self.last_account = result.as_ref().map(|(address, _)| *address); Ok(result) } @@ -205,28 +305,38 @@ where /// NOTE: This function will not return any entry unless [HashedAccountCursor::seek] has been /// called. fn next(&mut self) -> Result, reth_db::DatabaseError> { + debug_assert!(self.post_state.sorted, "`HashedPostState` must be pre-sorted"); + let last_account = match self.last_account.as_ref() { Some(account) => account, None => return Ok(None), // no previous entry was found }; // If post state was given precedence, move the cursor forward. - let mut db_item = self.cursor.current()?; - while db_item + let mut db_entry = self.cursor.current()?; + while db_entry .as_ref() .map(|(address, _)| address <= last_account || self.is_account_cleared(address)) .unwrap_or_default() { - db_item = self.cursor.next()?; + db_entry = self.cursor.next()?; + } + + // Take the next account from the post state with the key greater than or equal to the + // sought key. + let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); + while let Some((k, _)) = post_state_entry { + if k > last_account { + // Found the next entry in the post state. + break + } + + self.post_state_account_index += 1; + post_state_entry = self.post_state.accounts.get(self.post_state_account_index); } - let post_state_item = self - .post_state - .accounts - .iter() - .find(|(k, v)| k > &last_account && v.is_some()) - .map(|(address, info)| (*address, info.unwrap())); - let result = self.next_account(post_state_item, db_item)?; + // Compare two entries and return the lowest. + let result = Self::next_account(post_state_entry, db_entry); self.last_account = result.as_ref().map(|(address, _)| *address); Ok(result) } @@ -240,6 +350,8 @@ pub struct HashedPostStateStorageCursor<'b, C> { cursor: C, /// The reference to the post state. post_state: &'b HashedPostState, + /// The post state index where the cursor is currently at. + post_state_storage_index: usize, /// The current hashed account key. account: Option, /// The last slot that has been returned by the cursor. @@ -248,6 +360,11 @@ pub struct HashedPostStateStorageCursor<'b, C> { } impl<'b, C> HashedPostStateStorageCursor<'b, C> { + /// Create new instance of [HashedPostStateStorageCursor]. + pub fn new(cursor: C, post_state: &'b HashedPostState) -> Self { + Self { cursor, post_state, account: None, last_slot: None, post_state_storage_index: 0 } + } + /// Returns `true` if the storage for the given /// The database is not checked since it already has no wiped storage entries. fn is_db_storage_wiped(&self, account: &H256) -> bool { @@ -259,12 +376,11 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. - fn is_touched_slot_value_zero(&self, account: &H256, slot: &H256) -> bool { + fn is_slot_zero_valued(&self, account: &H256, slot: &H256) -> bool { self.post_state .storages .get(account) - .and_then(|storage| storage.storage.get(slot)) - .map(|value| *value == U256::ZERO) + .map(|storage| storage.zero_valued_slots.contains(slot)) .unwrap_or_default() } @@ -274,10 +390,10 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// If the storage keys are the same, the post state entry is given precedence. fn next_slot( &self, - post_state_item: Option<(&H256, &U256)>, + post_state_item: Option<&(H256, U256)>, db_item: Option, - ) -> Result, reth_db::DatabaseError> { - let result = match (post_state_item, db_item) { + ) -> Option { + match (post_state_item, db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal (Some((post_state_slot, post_state_value)), Some(db_entry)) => { @@ -295,8 +411,7 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { (None, Some(db_entry)) => Some(db_entry), // If both are empty, return None (None, None) => None, - }; - Ok(result) + } } } @@ -314,7 +429,7 @@ where // If the storage has been wiped at any point storage.wiped && // and the current storage does not contain any non-zero values - storage.storage.iter().all(|(_, value)| *value == U256::ZERO) + storage.non_zero_valued_storage.is_empty() } None => self.cursor.seek_exact(key)?.is_none(), }; @@ -327,24 +442,34 @@ where account: H256, subkey: H256, ) -> Result, reth_db::DatabaseError> { - self.last_slot = None; - self.account = Some(account); + if self.account.map_or(true, |acc| acc != account) { + self.account = Some(account); + self.last_slot = None; + self.post_state_storage_index = 0; + } - // Attempt to find the account's storage in poststate. - let post_state_item = self - .post_state - .storages - .get(&account) - .map(|storage| { - storage - .storage - .iter() - .skip_while(|(slot, value)| slot < &&subkey || value == &&U256::ZERO) - }) - .and_then(|mut iter| iter.next()); - if let Some((slot, value)) = post_state_item { - // It's an exact match, return the storage slot from post state without looking up in - // the database. + // Attempt to find the account's storage in post state. + let mut post_state_entry = None; + if let Some(storage) = self.post_state.storages.get(&account) { + debug_assert!(storage.sorted, "`HashStorage` must be pre-sorted"); + + post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); + + while let Some((slot, _)) = post_state_entry { + if slot >= &subkey { + // Found the next entry that is equal or greater than the key. + break + } + + self.post_state_storage_index += 1; + post_state_entry = + storage.non_zero_valued_storage.get(self.post_state_storage_index); + } + } + + // It's an exact match, return the storage slot from post state without looking up in + // the database. + if let Some((slot, value)) = post_state_entry { if slot == &subkey { self.last_slot = Some(*slot); return Ok(Some(StorageEntry { key: *slot, value: *value })) @@ -352,23 +477,24 @@ where } // It's not an exact match, reposition to the first greater or equal account. - let db_item = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped(&account) { None } else { - let mut db_item = self.cursor.seek_by_key_subkey(account, subkey)?; + let mut db_entry = self.cursor.seek_by_key_subkey(account, subkey)?; - while db_item + while db_entry .as_ref() - .map(|entry| self.is_touched_slot_value_zero(&account, &entry.key)) + .map(|entry| self.is_slot_zero_valued(&account, &entry.key)) .unwrap_or_default() { - db_item = self.cursor.next_dup_val()?; + db_entry = self.cursor.next_dup_val()?; } - db_item + db_entry }; - let result = self.next_slot(post_state_item, db_item)?; + // Compare two entries and return the lowest. + let result = self.next_slot(post_state_entry, db_entry); self.last_slot = result.as_ref().map(|entry| entry.key); Ok(result) } @@ -387,40 +513,49 @@ where None => return Ok(None), // no previous entry was found }; - let db_item = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped(&account) { None } else { // If post state was given precedence, move the cursor forward. - let mut db_item = self.cursor.seek_by_key_subkey(account, *last_slot)?; + let mut db_entry = self.cursor.seek_by_key_subkey(account, *last_slot)?; // If the entry was already returned, move to the next. - if db_item.as_ref().map(|entry| &entry.key == last_slot).unwrap_or_default() { - db_item = self.cursor.next_dup_val()?; + if db_entry.as_ref().map(|entry| &entry.key == last_slot).unwrap_or_default() { + db_entry = self.cursor.next_dup_val()?; } - while db_item + while db_entry .as_ref() - .map(|entry| self.is_touched_slot_value_zero(&account, &entry.key)) + .map(|entry| self.is_slot_zero_valued(&account, &entry.key)) .unwrap_or_default() { - db_item = self.cursor.next_dup_val()?; + db_entry = self.cursor.next_dup_val()?; } - db_item + db_entry }; - let post_state_item = self - .post_state - .storages - .get(&account) - .map(|storage| { - storage - .storage - .iter() - .skip_while(|(slot, value)| slot <= &last_slot || value == &&U256::ZERO) - }) - .and_then(|mut iter| iter.next()); - let result = self.next_slot(post_state_item, db_item)?; + // Attempt to find the account's storage in post state. + let mut post_state_entry = None; + if let Some(storage) = self.post_state.storages.get(&account) { + debug_assert!(storage.sorted, "`HashStorage` must be pre-sorted"); + + post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); + + while let Some((k, _)) = post_state_entry { + if k > last_slot { + // Found the next entry. + break + } + + self.post_state_storage_index += 1; + post_state_entry = + storage.non_zero_valued_storage.get(self.post_state_storage_index); + } + } + + // Compare two entries and return the lowest. + let result = self.next_slot(post_state_entry, db_entry); self.last_slot = result.as_ref().map(|entry| entry.key); Ok(result) } @@ -431,6 +566,7 @@ mod tests { use super::*; use proptest::prelude::*; use reth_db::{database::Database, test_utils::create_test_rw_db, transaction::DbTxMut}; + use std::collections::BTreeMap; fn assert_account_cursor_order<'a, 'b>( factory: &'a impl HashedCursorFactory<'b>, @@ -478,17 +614,17 @@ mod tests { fn post_state_only_accounts() { let accounts = Vec::from_iter((1..11).map(|key| (H256::from_low_u64_be(key), Account::default()))); - let post_state = HashedPostState { - accounts: BTreeMap::from_iter( - accounts.iter().map(|(key, account)| (*key, Some(*account))), - ), - storages: Default::default(), - }; + + let mut hashed_post_state = HashedPostState::default(); + for (hashed_address, account) in &accounts { + hashed_post_state.insert_account(*hashed_address, *account); + } + hashed_post_state.sort(); let db = create_test_rw_db(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -525,18 +661,14 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: BTreeMap::from_iter( - accounts - .iter() - .filter(|x| x.0.to_low_u64_be() % 2 != 0) - .map(|(key, account)| (*key, Some(*account))), - ), - storages: Default::default(), - }; + let mut hashed_post_state = HashedPostState::default(); + for (hashed_address, account) in accounts.iter().filter(|x| x.0.to_low_u64_be() % 2 != 0) { + hashed_post_state.insert_account(*hashed_address, *account); + } + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -556,17 +688,18 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: BTreeMap::from_iter( - accounts.iter().filter(|x| x.0.to_low_u64_be() % 2 != 0).map(|(key, account)| { - (*key, if removed_keys.contains(key) { None } else { Some(*account) }) - }), - ), - storages: Default::default(), - }; + let mut hashed_post_state = HashedPostState::default(); + for (hashed_address, account) in accounts.iter().filter(|x| x.0.to_low_u64_be() % 2 != 0) { + if removed_keys.contains(hashed_address) { + hashed_post_state.insert_cleared_account(*hashed_address); + } else { + hashed_post_state.insert_account(*hashed_address, *account); + } + } + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = accounts.into_iter().filter(|x| !removed_keys.contains(&x.0)); assert_account_cursor_order(&factory, expected); } @@ -587,15 +720,14 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: BTreeMap::from_iter( - accounts.iter().map(|(key, account)| (*key, Some(*account))), - ), - storages: Default::default(), - }; + let mut hashed_post_state = HashedPostState::default(); + for (hashed_address, account) in &accounts { + hashed_post_state.insert_account(*hashed_address, *account); + } + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -610,12 +742,15 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: BTreeMap::from_iter( - post_state_accounts.iter().map(|(key, account)| (*key, *account)), - ), - storages: Default::default(), - }; + let mut hashed_post_state = HashedPostState::default(); + for (hashed_address, account) in &post_state_accounts { + if let Some(account) = account { + hashed_post_state.insert_account(*hashed_address, *account); + } else { + hashed_post_state.insert_cleared_account(*hashed_address); + } + } + hashed_post_state.sort(); let mut expected = db_accounts; // overwrite or remove accounts from the expected result @@ -628,7 +763,7 @@ mod tests { } let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); assert_account_cursor_order(&factory, expected.into_iter()); } ); @@ -673,51 +808,44 @@ mod tests { // wiped storage, must be empty { - let post_state = HashedPostState { - accounts: BTreeMap::default(), - storages: BTreeMap::from_iter([( - address, - HashedStorage { wiped: true, ..Default::default() }, - )]), - }; + let wiped = true; + let hashed_storage = HashedStorage::new(wiped); + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let mut cursor = factory.hashed_storage_cursor().unwrap(); assert!(cursor.is_storage_empty(address).unwrap()); } // wiped storage, but post state has zero-value entries { - let post_state = HashedPostState { - accounts: BTreeMap::default(), - storages: BTreeMap::from_iter([( - address, - HashedStorage { - wiped: true, - storage: BTreeMap::from_iter([(H256::random(), U256::ZERO)]), - }, - )]), - }; + let wiped = true; + let mut hashed_storage = HashedStorage::new(wiped); + hashed_storage.insert_zero_valued_slot(H256::random()); + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let mut cursor = factory.hashed_storage_cursor().unwrap(); assert!(cursor.is_storage_empty(address).unwrap()); } // wiped storage, but post state has non-zero entries { - let post_state = HashedPostState { - accounts: BTreeMap::default(), - storages: BTreeMap::from_iter([( - address, - HashedStorage { - wiped: true, - storage: BTreeMap::from_iter([(H256::random(), U256::from(1))]), - }, - )]), - }; + let wiped = true; + let mut hashed_storage = HashedStorage::new(wiped); + hashed_storage.insert_non_zero_valued_storage(H256::random(), U256::from(1)); + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let mut cursor = factory.hashed_storage_cursor().unwrap(); assert!(!cursor.is_storage_empty(address).unwrap()); } @@ -727,9 +855,9 @@ mod tests { fn storage_cursor_correct_order() { let address = H256::random(); let db_storage = - BTreeMap::from_iter((0..10).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); + BTreeMap::from_iter((1..11).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); let post_state_storage = - BTreeMap::from_iter((10..20).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); + BTreeMap::from_iter((11..21).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); let db = create_test_rw_db(); db.update(|tx| { @@ -744,16 +872,18 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: Default::default(), - storages: BTreeMap::from([( - address, - HashedStorage { wiped: false, storage: post_state_storage.clone() }, - )]), - }; + let wiped = false; + let mut hashed_storage = HashedStorage::new(wiped); + for (slot, value) in post_state_storage.iter() { + hashed_storage.insert_non_zero_valued_storage(*slot, *value); + } + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = [(address, db_storage.into_iter().chain(post_state_storage.into_iter()).collect())] .into_iter(); @@ -764,8 +894,7 @@ mod tests { fn zero_value_storage_entries_are_discarded() { let address = H256::random(); let db_storage = - BTreeMap::from_iter((0..10).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); - // every even number is changed to zero value + BTreeMap::from_iter((0..10).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); // every even number is changed to zero value let post_state_storage = BTreeMap::from_iter((0..10).map(|key| { (H256::from_low_u64_be(key), if key % 2 == 0 { U256::ZERO } else { U256::from(key) }) })); @@ -780,15 +909,22 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: Default::default(), - storages: BTreeMap::from([( - address, - HashedStorage { wiped: false, storage: post_state_storage.clone() }, - )]), - }; + let wiped = false; + let mut hashed_storage = HashedStorage::new(wiped); + for (slot, value) in post_state_storage.iter() { + if *value == U256::ZERO { + hashed_storage.insert_zero_valued_slot(*slot); + } else { + hashed_storage.insert_non_zero_valued_storage(*slot, *value); + } + } + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + hashed_post_state.sort(); + let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = [( address, post_state_storage.into_iter().filter(|(_, value)| *value > U256::ZERO).collect(), @@ -801,9 +937,9 @@ mod tests { fn wiped_storage_is_discarded() { let address = H256::random(); let db_storage = - BTreeMap::from_iter((0..10).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); + BTreeMap::from_iter((1..11).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); let post_state_storage = - BTreeMap::from_iter((10..20).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); + BTreeMap::from_iter((11..21).map(|key| (H256::from_low_u64_be(key), U256::from(key)))); let db = create_test_rw_db(); db.update(|tx| { @@ -815,16 +951,18 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: Default::default(), - storages: BTreeMap::from([( - address, - HashedStorage { wiped: true, storage: post_state_storage.clone() }, - )]), - }; + let wiped = true; + let mut hashed_storage = HashedStorage::new(wiped); + for (slot, value) in post_state_storage.iter() { + hashed_storage.insert_non_zero_valued_storage(*slot, *value); + } + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = [(address, post_state_storage)].into_iter(); assert_storage_cursor_order(&factory, expected); } @@ -848,16 +986,18 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: Default::default(), - storages: BTreeMap::from([( - address, - HashedStorage { wiped: false, storage: storage.clone() }, - )]), - }; + let wiped = false; + let mut hashed_storage = HashedStorage::new(wiped); + for (slot, value) in storage.iter() { + hashed_storage.insert_non_zero_valued_storage(*slot, *value); + } + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.insert_hashed_storage(address, hashed_storage); + hashed_post_state.sort(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = [(address, storage)].into_iter(); assert_storage_cursor_order(&factory, expected); } @@ -881,14 +1021,21 @@ mod tests { }) .unwrap(); - let post_state = HashedPostState { - accounts: Default::default(), - storages: BTreeMap::from_iter(post_state_storages.iter().map( - |(address, (wiped, storage))| { - (*address, HashedStorage { wiped: *wiped, storage: storage.clone() }) - }, - )), - }; + let mut hashed_post_state = HashedPostState::default(); + + for (address, (wiped, storage)) in &post_state_storages { + let mut hashed_storage = HashedStorage::new(*wiped); + for (slot, value) in storage { + if *value == U256::ZERO { + hashed_storage.insert_zero_valued_slot(*slot); + } else { + hashed_storage.insert_non_zero_valued_storage(*slot, *value); + } + } + hashed_post_state.insert_hashed_storage(*address, hashed_storage); + } + + hashed_post_state.sort(); let mut expected = db_storages; // overwrite or remove accounts from the expected result @@ -901,7 +1048,7 @@ mod tests { } let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &post_state); + let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); assert_storage_cursor_order(&factory, expected.into_iter()); }); } From 4e2712bef273b9a2cd6278b5a09ad00436b456e6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Jul 2023 18:34:56 +0200 Subject: [PATCH 074/722] refactor: extract exceeds pipeline threshold (#3605) --- crates/consensus/beacon/src/engine/mod.rs | 121 ++++++++++++++-------- 1 file changed, 76 insertions(+), 45 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 26fc189e3674..80c00714129e 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -402,10 +402,76 @@ where /// Returns true if the distance from the local tip to the block is greater than the configured /// threshold + #[inline] fn exceeds_pipeline_run_threshold(&self, local_tip: u64, block: u64) -> bool { block > local_tip && block - local_tip > self.pipeline_run_threshold } + /// Returns the finalized hash to sync to if the distance from the local tip to the block is + /// greater than the configured threshold and we're not synced to the finalized block yet block + /// yet (if we've seen that block already). + /// + /// If this is invoked after a new block has been downloaded, the downloaded block could be the + /// (missing) finalized block. + fn can_pipeline_sync_to_finalized( + &self, + canonical_tip_num: u64, + target_block_number: u64, + downloaded_block: Option, + ) -> Option { + let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); + + // check if the distance exceeds the threshold for pipeline sync + let mut exceeds_pipeline_run_threshold = + self.exceeds_pipeline_run_threshold(canonical_tip_num, target_block_number); + + // check if the downloaded block is the tracked finalized block + if let Some(ref buffered_finalized) = sync_target_state + .as_ref() + .and_then(|state| self.blockchain.buffered_header_by_hash(state.finalized_block_hash)) + { + // if we have buffered the finalized block, we should check how far + // we're off + exceeds_pipeline_run_threshold = + self.exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); + } + + // If this is invoked after we downloaded a block we can check if this block is the + // finalized block + if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { + if downloaded_block.hash == state.finalized_block_hash { + // we downloaded the finalized block + exceeds_pipeline_run_threshold = + self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); + } + } + + // if the number of missing blocks is greater than the max, run the + // pipeline + if exceeds_pipeline_run_threshold { + if let Some(state) = sync_target_state { + // if we have already canonicalized the finalized block, we should + // skip the pipeline run + match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { + Err(err) => { + warn!(target: "consensus::engine", ?err, "Failed to get finalized block header"); + } + Ok(None) => { + // we don't have the block yet and the distance exceeds the allowed + // threshold + return Some(state.finalized_block_hash) + } + Ok(Some(_)) => { + // we're fully synced to the finalized block + // but we want to continue downloading the missing parent + } + } + } + } + + None + } + /// If validation fails, the response MUST contain the latest valid hash: /// /// - The block hash of the ancestor of the invalid payload satisfying the following two @@ -1101,52 +1167,17 @@ where ) { // compare the missing parent with the canonical tip let canonical_tip_num = self.blockchain.canonical_tip().number; - let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); - - trace!(target: "consensus::engine", ?downloaded_block, ?missing_parent, tip=?canonical_tip_num, "Handling disconnected block"); - let mut exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, missing_parent.number); - - // check if the downloaded block is the tracked safe block - if let Some(ref state) = sync_target_state { - if downloaded_block.hash == state.finalized_block_hash { - // we downloaded the finalized block - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); - } else if let Some(buffered_finalized) = - self.blockchain.buffered_header_by_hash(state.finalized_block_hash) - { - // if we have buffered the finalized block, we should check how far - // we're off - exceeds_pipeline_run_threshold = self - .exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); - } - } - - // if the number of missing blocks is greater than the max, run the - // pipeline - if exceeds_pipeline_run_threshold { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should - // skip the pipeline run - match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { - Err(err) => { - warn!(target: "consensus::engine", ?err, "Failed to get finalized block header"); - } - Ok(None) => { - // we don't have the block yet and the distance exceeds the allowed - // threshold - self.sync.set_pipeline_sync_target(state.safe_block_hash); - // we can exit early here because the pipeline will take care of this - return - } - Ok(Some(_)) => { - // we're fully synced to the finalized block - // but we want to continue downloading the missing parent - } - } - } + if let Some(target) = self.can_pipeline_sync_to_finalized( + canonical_tip_num, + missing_parent.number, + Some(downloaded_block), + ) { + // we don't have the block yet and the distance exceeds the allowed + // threshold + self.sync.set_pipeline_sync_target(target); + // we can exit early here because the pipeline will take care of syncing + return } // continue downloading the missing parent From 64dec6e36bf375a963aba5ca064de7642e452c43 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 22:41:21 +0200 Subject: [PATCH 075/722] ci: remove broken book workflow (#3613) --- .github/workflows/book.yml | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 3e29f13a5a01..684acb8e966c 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -105,30 +105,11 @@ jobs: retention-days: 1 if-no-files-found: error - up-to-date: - runs-on: ubuntu-latest - name: up-to-date - - steps: - - uses: actions/checkout@v3 - - - name: Try to update the book cli documentation - run: make update-book-cli BUILD_PATH=reth/target - - - name: Check if the book cli documentation is up to date - run: | - if [[ -n $(git status --porcelain) ]]; then - echo "Error: Documentation is not up to date. Please run \`make update-book-cli\`." - exit 1 - else - echo "The documentation is up to date." - fi - deploy: # Only deploy if a push to main if: github.ref_name == 'main' && github.event_name == 'push' runs-on: ubuntu-latest - needs: [test, lint, build, up-to-date] + needs: [test, lint, build] # Grant GITHUB_TOKEN the permissions required to make a Pages deployment permissions: From 81bd01ec60e095741d7844b1b7cc2b17054ec009 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 23:02:34 +0200 Subject: [PATCH 076/722] docs: note that only wsl2 works (#3609) --- book/installation/source.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/book/installation/source.md b/book/installation/source.md index a076e0003c88..a4448482d421 100644 --- a/book/installation/source.md +++ b/book/installation/source.md @@ -1,6 +1,10 @@ # Build from Source -You can build Reth on Linux, macOS, and Windows WSL. +You can build Reth on Linux, macOS, and Windows WSL2. + +> **Note** +> +> Reth does **not** work on Windows WSL1. ## Dependencies @@ -127,4 +131,4 @@ binary](../installation/binaries.md). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. -_(Thanks to Sigma Prime for this section from [their Lighthouse book](https://lighthouse-book.sigmaprime.io/installation.html)!)_ \ No newline at end of file +_(Thanks to Sigma Prime for this section from [their Lighthouse book](https://lighthouse-book.sigmaprime.io/installation.html)!)_ From 428a6dc2f63ac7f2798c0cb56cf099108d7cbd00 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 5 Jul 2023 23:02:46 +0200 Subject: [PATCH 077/722] fix: correct types in `libmdbx-rs` for windows (#3608) --- crates/storage/libmdbx-rs/src/cursor.rs | 6 +++--- crates/storage/libmdbx-rs/src/database.rs | 4 ++-- crates/storage/libmdbx-rs/src/flags.rs | 5 ++--- crates/storage/libmdbx-rs/src/transaction.rs | 5 ++++- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 5aa4946d5681..4bfb2f25681c 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -11,7 +11,7 @@ use ffi::{ MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE, MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, }; -use libc::{c_uint, c_void}; +use libc::c_void; use parking_lot::Mutex; use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr, rc::Rc, result}; @@ -709,7 +709,7 @@ where cursor: &'cur mut Cursor<'txn, K>, /// The first operation to perform when the consumer calls Iter.next(). - op: c_uint, + op: MDBX_cursor_op, _marker: PhantomData, }, @@ -722,7 +722,7 @@ where Value: TableObject<'txn>, { /// Creates a new iterator backed by the given cursor. - fn new(cursor: &'cur mut Cursor<'txn, K>, op: c_uint) -> Self { + fn new(cursor: &'cur mut Cursor<'txn, K>, op: MDBX_cursor_op) -> Self { IterDup::Ok { cursor, op, _marker: PhantomData } } } diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index fb198a4c48ae..a64e0382e46f 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -4,7 +4,7 @@ use crate::{ transaction::{txn_execute, TransactionKind}, Transaction, }; -use libc::c_uint; +use ffi::MDBX_db_flags_t; use std::{ffi::CString, marker::PhantomData, ptr}; /// A handle to an individual database in an environment. @@ -24,7 +24,7 @@ impl<'txn> Database<'txn> { pub(crate) fn new<'env, K: TransactionKind, E: EnvironmentKind>( txn: &'txn Transaction<'env, K, E>, name: Option<&str>, - flags: c_uint, + flags: MDBX_db_flags_t, ) -> Result { let c_name = name.map(|n| CString::new(n).unwrap()); let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() }; diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index f0cefd859f59..ac11b03e8bc8 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -1,6 +1,5 @@ use bitflags::bitflags; use ffi::*; -use libc::c_uint; /// MDBX sync mode #[derive(Clone, Copy, Debug)] @@ -188,7 +187,7 @@ impl EnvironmentFlags { bitflags! { #[doc="Database options."] #[derive(Default)] - pub struct DatabaseFlags: c_uint { + pub struct DatabaseFlags: MDBX_env_flags_t { const REVERSE_KEY = MDBX_REVERSEKEY; const DUP_SORT = MDBX_DUPSORT; const INTEGER_KEY = MDBX_INTEGERKEY; @@ -203,7 +202,7 @@ bitflags! { bitflags! { #[doc="Write options."] #[derive(Default)] - pub struct WriteFlags: c_uint { + pub struct WriteFlags: MDBX_env_flags_t { const UPSERT = MDBX_UPSERT; const NO_OVERWRITE = MDBX_NOOVERWRITE; const NO_DUP_DATA = MDBX_NODUPDATA; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 210f8f15fcfa..df391e94e0c3 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -193,7 +193,10 @@ where ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()) }))?; } - Ok(DatabaseFlags::from_bits_truncate(flags)) + + // The types are not the same on Windows. Great! + #[cfg_attr(not(windows), allow(clippy::useless_conversion))] + Ok(DatabaseFlags::from_bits_truncate(flags.try_into().unwrap())) } /// Retrieves database statistics. From 09fe22f47085e650cbe149af9a678e129df57afd Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Thu, 6 Jul 2023 07:20:19 -0400 Subject: [PATCH 078/722] feat: add error field to parity transaction traces (#3611) Co-authored-by: Matthias Seitz --- .../revm/revm-inspectors/src/tracing/types.rs | 6 +++--- crates/rpc/rpc-types/src/eth/trace/parity.rs | 21 +++---------------- crates/rpc/rpc/src/trace.rs | 1 + 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 2e48479fd7e3..34c990abdd55 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -6,8 +6,7 @@ use reth_rpc_types::trace::{ geth::{CallFrame, CallLogFrame, GethDefaultTracingOptions, StructLog}, parity::{ Action, ActionType, CallAction, CallOutput, CallType, ChangedType, CreateAction, - CreateOutput, Delta, SelfdestructAction, StateDiff, TraceOutput, TraceResult, - TransactionTrace, + CreateOutput, Delta, SelfdestructAction, StateDiff, TraceOutput, TransactionTrace, }, }; use revm::interpreter::{ @@ -321,9 +320,10 @@ impl CallTraceNode { /// Converts this node into a parity `TransactionTrace` pub(crate) fn parity_transaction_trace(&self, trace_address: Vec) -> TransactionTrace { let action = self.parity_action(); - let output = TraceResult::parity_success(self.parity_trace_output()); + let output = self.parity_trace_output(); TransactionTrace { action, + error: None, result: Some(output), trace_address, subtraces: self.children.len(), diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 35ec518a48f8..e3bb31b4fdcd 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -10,23 +10,6 @@ use std::{ ops::{Deref, DerefMut}, }; -/// Result type for parity style transaction trace -pub type TraceResult = crate::trace::common::TraceResult; - -// === impl TraceResult === - -impl TraceResult { - /// Wraps the result type in a [TraceResult::Success] variant - pub fn parity_success(result: TraceOutput) -> Self { - TraceResult::Success { result } - } - - /// Wraps the result type in a [TraceResult::Error] variant - pub fn parity_error(error: String) -> Self { - TraceResult::Error { error } - } -} - /// Different Trace diagnostic targets. #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -241,7 +224,9 @@ pub struct TransactionTrace { #[serde(flatten)] pub action: Action, #[serde(flatten)] - pub result: Option, + pub error: Option, + #[serde(flatten)] + pub result: Option, pub subtraces: usize, pub trace_address: Vec, } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 3158f6d0b2d6..9a19b9282b59 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -635,6 +635,7 @@ fn reward_trace(header: &SealedHeader, reward: RewardAction) -> LocalizedTransac trace_address: vec![], subtraces: 0, action: Action::Reward(reward), + error: None, result: None, }, } From 596d32686cad08ec872d677702521483de83573c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 6 Jul 2023 07:33:14 -0400 Subject: [PATCH 079/722] feat: download block ranges (#3416) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/mod.rs | 28 +- crates/consensus/beacon/src/engine/sync.rs | 355 ++++++++++- crates/interfaces/src/p2p/full_block.rs | 599 ++++++++++++++++-- .../interfaces/src/test_utils/full_block.rs | 123 +++- crates/primitives/src/block.rs | 57 ++ crates/primitives/src/header.rs | 11 +- crates/primitives/src/lib.rs | 4 +- 7 files changed, 1073 insertions(+), 104 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 80c00714129e..4cc52939e683 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -472,6 +472,17 @@ where None } + /// Returns how far the local tip is from the given block. If the local tip is at the same + /// height or its block number is greater than the given block, this returns None. + #[inline] + fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { + if block > local_tip { + Some(block - local_tip) + } else { + None + } + } + /// If validation fails, the response MUST contain the latest valid hash: /// /// - The block hash of the ancestor of the invalid payload satisfying the following two @@ -602,9 +613,10 @@ where // Terminate the sync early if it's reached the maximum user // configured block. if is_valid_response { - // node's fully synced, clear pending requests - self.sync.clear_full_block_requests(); + // node's fully synced, clear active download requests + self.sync.clear_block_download_requests(); + // check if we reached the maximum configured block let tip_number = self.blockchain.canonical_tip().number; if self.sync.has_reached_max_block(tip_number) { return true @@ -1189,7 +1201,15 @@ where // * the missing parent block num >= canonical tip num, but the number of missing blocks is // less than the pipeline threshold // * this case represents a potentially long range of blocks to download and execute - self.sync.download_full_block(missing_parent.hash); + if let Some(distance) = + self.distance_from_local_tip(canonical_tip_num, missing_parent.number) + { + self.sync.download_block_range(missing_parent.hash, distance) + } else { + // This happens when the missing parent is on an outdated + // sidechain + self.sync.download_full_block(missing_parent.hash); + } } /// Attempt to form a new canonical chain based on the current sync target. @@ -1217,7 +1237,7 @@ where self.sync_state_updater.update_sync_state(SyncState::Idle); // clear any active block requests - self.sync.clear_full_block_requests(); + self.sync.clear_block_download_requests(); } Err(err) => { // if we failed to make the FCU's head canonical, because we don't have that diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 3e9aa91b68f2..73bb85dbb93d 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -5,14 +5,15 @@ use futures::FutureExt; use reth_db::database::Database; use reth_interfaces::p2p::{ bodies::client::BodiesClient, - full_block::{FetchFullBlockFuture, FullBlockClient}, + full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; use reth_primitives::{BlockNumber, SealedBlock, H256}; use reth_stages::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use std::{ - collections::VecDeque, + cmp::{Ordering, Reverse}, + collections::BinaryHeap, task::{ready, Context, Poll}, }; use tokio::sync::oneshot; @@ -39,10 +40,13 @@ where pipeline_state: PipelineState, /// Pending target block for the pipeline to sync pending_pipeline_target: Option, - /// In requests in progress. + /// In-flight full block requests in progress. inflight_full_block_requests: Vec>, - /// Buffered events until the manager is polled and the pipeline is idle. - queued_events: VecDeque, + /// In-flight full block _range_ requests in progress. + inflight_block_range_requests: Vec>, + /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for + /// ordering. This means the blocks will be popped from the heap with ascending block numbers. + range_buffered_blocks: BinaryHeap>, /// If enabled, the pipeline will be triggered continuously, as soon as it becomes idle run_pipeline_continuously: bool, /// Max block after which the consensus engine would terminate the sync. Used for debugging @@ -71,7 +75,8 @@ where pipeline_state: PipelineState::Idle(Some(pipeline)), pending_pipeline_target: None, inflight_full_block_requests: Vec::new(), - queued_events: VecDeque::new(), + inflight_block_range_requests: Vec::new(), + range_buffered_blocks: BinaryHeap::new(), run_pipeline_continuously, max_block, metrics: EngineSyncMetrics::default(), @@ -81,6 +86,7 @@ where /// Sets the metrics for the active downloads fn update_block_download_metrics(&self) { self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); + // TODO: full block range metrics } /// Sets the max block value for testing @@ -89,9 +95,10 @@ where self.max_block = Some(block); } - /// Cancels all full block requests that are in progress. - pub(crate) fn clear_full_block_requests(&mut self) { + /// Cancels all download requests that are in progress. + pub(crate) fn clear_block_download_requests(&mut self) { self.inflight_full_block_requests.clear(); + self.inflight_block_range_requests.clear(); self.update_block_download_metrics(); } @@ -127,6 +134,29 @@ where self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) } + /// Starts requesting a range of blocks from the network, in reverse from the given hash. + /// + /// If the `count` is 1, this will use the `download_full_block` method instead, because it + /// downloads headers and bodies for the block concurrently. + pub(crate) fn download_block_range(&mut self, hash: H256, count: u64) { + if count == 1 { + self.download_full_block(hash); + } else { + trace!( + target: "consensus::engine", + ?hash, + ?count, + "start downloading full block range." + ); + + let request = self.full_block_client.get_full_block_range(hash, count); + self.inflight_block_range_requests.push(request); + } + + // // TODO: need more metrics for block ranges + // self.update_block_download_metrics(); + } + /// Starts requesting a full block from the network. /// /// Returns `true` if the request was started, `false` if there's already a request for the @@ -222,7 +252,7 @@ where // we also clear any pending full block requests because we expect them to be // outdated (included in the range the pipeline is syncing anyway) - self.clear_full_block_requests(); + self.clear_block_download_requests(); Some(EngineSyncEvent::PipelineStarted(target)) } @@ -237,37 +267,63 @@ where return Poll::Ready(event) } - loop { - // drain buffered events first if pipeline is not running - if self.is_pipeline_idle() { - if let Some(event) = self.queued_events.pop_front() { - return Poll::Ready(event) - } + // make sure we poll the pipeline if it's active, and return any ready pipeline events + if !self.is_pipeline_idle() { + // advance the pipeline + if let Poll::Ready(event) = self.poll_pipeline(cx) { + return Poll::Ready(event) + } + } + + // advance all full block requests + for idx in (0..self.inflight_full_block_requests.len()).rev() { + let mut request = self.inflight_full_block_requests.swap_remove(idx); + if let Poll::Ready(block) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); + self.range_buffered_blocks.push(Reverse(OrderedSealedBlock(block))); } else { - // advance the pipeline - if let Poll::Ready(event) = self.poll_pipeline(cx) { - return Poll::Ready(event) - } + // still pending + self.inflight_full_block_requests.push(request); } + } - // advance all requests - for idx in (0..self.inflight_full_block_requests.len()).rev() { - let mut request = self.inflight_full_block_requests.swap_remove(idx); - if let Poll::Ready(block) = request.poll_unpin(cx) { - self.queued_events.push_back(EngineSyncEvent::FetchedFullBlock(block)); - } else { - // still pending - self.inflight_full_block_requests.push(request); - } + // advance all full block range requests + for idx in (0..self.inflight_block_range_requests.len()).rev() { + let mut request = self.inflight_block_range_requests.swap_remove(idx); + if let Poll::Ready(blocks) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); + self.range_buffered_blocks + .extend(blocks.into_iter().map(OrderedSealedBlock).map(Reverse)); + } else { + // still pending + self.inflight_block_range_requests.push(request); } + } - self.update_block_download_metrics(); + self.update_block_download_metrics(); - if !self.pipeline_state.is_idle() || self.queued_events.is_empty() { - // can not make any progress - return Poll::Pending - } + // drain an element of the block buffer if there are any + if let Some(block) = self.range_buffered_blocks.pop() { + return Poll::Ready(EngineSyncEvent::FetchedFullBlock(block.0 .0)) } + + Poll::Pending + } +} + +/// A wrapper type around [SealedBlock] that implements the [Ord] trait by block number. +#[derive(Debug, Clone, PartialEq, Eq)] +struct OrderedSealedBlock(SealedBlock); + +impl PartialOrd for OrderedSealedBlock { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.number.partial_cmp(&other.0.number) + } +} + +impl Ord for OrderedSealedBlock { + fn cmp(&self, other: &Self) -> Ordering { + self.0.number.cmp(&other.0.number) } } @@ -318,3 +374,236 @@ impl PipelineState { matches!(self, PipelineState::Idle(_)) } } + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use futures::poll; + use reth_db::{ + mdbx::{Env, WriteMap}, + test_utils::create_test_rw_db, + }; + use reth_interfaces::{p2p::either::EitherDownloader, test_utils::TestFullBlockClient}; + use reth_primitives::{ + stage::StageCheckpoint, BlockBody, ChainSpec, ChainSpecBuilder, SealedHeader, MAINNET, + }; + use reth_provider::{test_utils::TestExecutorFactory, PostState}; + use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; + use reth_tasks::TokioTaskExecutor; + use std::{collections::VecDeque, future::poll_fn, sync::Arc}; + use tokio::sync::watch; + + struct TestPipelineBuilder { + pipeline_exec_outputs: VecDeque>, + executor_results: Vec, + max_block: Option, + } + + impl TestPipelineBuilder { + /// Create a new [TestPipelineBuilder]. + fn new() -> Self { + Self { + pipeline_exec_outputs: VecDeque::new(), + executor_results: Vec::new(), + max_block: None, + } + } + + /// Set the pipeline execution outputs to use for the test consensus engine. + fn with_pipeline_exec_outputs( + mut self, + pipeline_exec_outputs: VecDeque>, + ) -> Self { + self.pipeline_exec_outputs = pipeline_exec_outputs; + self + } + + /// Set the executor results to use for the test consensus engine. + #[allow(dead_code)] + fn with_executor_results(mut self, executor_results: Vec) -> Self { + self.executor_results = executor_results; + self + } + + /// Sets the max block for the pipeline to run. + #[allow(dead_code)] + fn with_max_block(mut self, max_block: BlockNumber) -> Self { + self.max_block = Some(max_block); + self + } + + /// Builds the pipeline. + fn build(self, chain_spec: Arc) -> Pipeline>> { + reth_tracing::init_test_tracing(); + let db = create_test_rw_db(); + + let executor_factory = TestExecutorFactory::new(chain_spec.clone()); + executor_factory.extend(self.executor_results); + + // Setup pipeline + let (tip_tx, _tip_rx) = watch::channel(H256::default()); + let mut pipeline = Pipeline::builder() + .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) + .with_tip_sender(tip_tx); + + if let Some(max_block) = self.max_block { + pipeline = pipeline.with_max_block(max_block); + } + + pipeline.build(db, chain_spec) + } + } + + struct TestSyncControllerBuilder { + max_block: Option, + client: Option, + } + + impl TestSyncControllerBuilder { + /// Create a new [TestSyncControllerBuilder]. + fn new() -> Self { + Self { max_block: None, client: None } + } + + /// Sets the max block for the pipeline to run. + #[allow(dead_code)] + fn with_max_block(mut self, max_block: BlockNumber) -> Self { + self.max_block = Some(max_block); + self + } + + /// Sets the client to use for network operations. + fn with_client(mut self, client: Client) -> Self { + self.client = Some(client); + self + } + + /// Builds the sync controller. + fn build( + self, + pipeline: Pipeline, + ) -> EngineSyncController> + where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, + { + let client = self + .client + .map(EitherDownloader::Left) + .unwrap_or_else(|| EitherDownloader::Right(TestFullBlockClient::default())); + + EngineSyncController::new( + pipeline, + client, + Box::::default(), + // run_pipeline_continuously: false here until we want to test this + false, + self.max_block, + ) + } + } + + #[tokio::test] + async fn pipeline_started_after_setting_target() { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + let mut header = SealedHeader::default(); + let body = BlockBody::default(); + client.insert(header.clone(), body.clone()); + for _ in 0..10 { + header.parent_hash = header.hash_slow(); + header.number += 1; + header = header.header.seal_slow(); + client.insert(header.clone(), body.clone()); + } + + // force the pipeline to be "done" after 5 blocks + let pipeline = TestPipelineBuilder::new() + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(5), + done: true, + })])) + .build(chain_spec); + + let mut sync_controller = + TestSyncControllerBuilder::new().with_client(client.clone()).build(pipeline); + + let tip = client.highest_block().expect("there should be blocks here"); + sync_controller.set_pipeline_sync_target(tip.hash); + + let sync_future = poll_fn(|cx| sync_controller.poll(cx)); + let next_event = poll!(sync_future); + + // can assert that the first event here is PipelineStarted because we set the sync target, + // and we should get Ready because the pipeline should be spawned immediately + assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { + assert_eq!(target, tip.hash); + }); + + // the next event should be the pipeline finishing in a good state + let sync_future = poll_fn(|cx| sync_controller.poll(cx)); + let next_ready = sync_future.await; + assert_matches!(next_ready, EngineSyncEvent::PipelineFinished { result, reached_max_block } => { + assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: 5 })); + // no max block configured + assert!(!reached_max_block); + }); + } + + #[tokio::test] + async fn controller_sends_range_request() { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + let mut header = SealedHeader::default(); + let body = BlockBody::default(); + for _ in 0..10 { + header.parent_hash = header.hash_slow(); + header.number += 1; + header = header.header.seal_slow(); + client.insert(header.clone(), body.clone()); + } + + // set up a pipeline + let pipeline = TestPipelineBuilder::new().build(chain_spec); + + let mut sync_controller = + TestSyncControllerBuilder::new().with_client(client.clone()).build(pipeline); + + let tip = client.highest_block().expect("there should be blocks here"); + + // call the download range method + sync_controller.download_block_range(tip.hash, tip.number); + + // ensure we have one in flight range request + assert_eq!(sync_controller.inflight_block_range_requests.len(), 1); + + // ensure the range request is made correctly + let first_req = sync_controller.inflight_block_range_requests.first().unwrap(); + assert_eq!(first_req.start_hash(), tip.hash); + assert_eq!(first_req.count(), tip.number); + + // ensure they are in ascending order + for num in 1..=10 { + let sync_future = poll_fn(|cx| sync_controller.poll(cx)); + let next_ready = sync_future.await; + assert_matches!(next_ready, EngineSyncEvent::FetchedFullBlock(block) => { + assert_eq!(block.number, num); + }); + } + } +} diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 8da7f456f736..6cc4e4b61bbb 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -6,8 +6,13 @@ use crate::{ headers::client::{HeadersClient, SingleHeaderRequest}, }, }; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, WithPeerId, H256}; +use futures::Stream; +use reth_primitives::{ + BlockBody, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, H256, +}; use std::{ + cmp::Reverse, + collections::{HashMap, VecDeque}, fmt::Debug, future::Future, pin::Pin, @@ -15,6 +20,8 @@ use std::{ }; use tracing::debug; +use super::headers::client::HeadersRequest; + /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] pub struct FullBlockClient { @@ -51,9 +58,51 @@ where body: None, } } + + /// Returns a future that fetches [SealedBlock]s for the given hash and count. + /// + /// Note: this future is cancel safe + /// + /// Caution: This does no validation of body (transactions) responses but guarantees that + /// the starting [SealedHeader] matches the requested hash, and that the number of headers and + /// bodies received matches the requested limit. + /// + /// The returned future yields bodies in falling order, i.e. with descending block numbers. + pub fn get_full_block_range( + &self, + hash: H256, + count: u64, + ) -> FetchFullBlockRangeFuture { + let client = self.client.clone(); + + // Optimization: if we only want one block, we don't need to wait for the headers request + // to complete, and can send the block bodies request right away. + let bodies_request = + if count == 1 { None } else { Some(client.get_block_bodies(vec![hash])) }; + + FetchFullBlockRangeFuture { + start_hash: hash, + count, + request: FullBlockRangeRequest { + headers: Some(client.get_headers(HeadersRequest { + start: hash.into(), + limit: count, + direction: HeadersDirection::Falling, + })), + bodies: bodies_request, + }, + client, + headers: None, + pending_headers: VecDeque::new(), + bodies: HashMap::new(), + } + } } /// A future that downloads a full block from the network. +/// +/// This will attempt to fetch both the header and body for the given block hash at the same time. +/// When both requests succeed, the future will yield the full block. #[must_use = "futures do nothing unless polled"] pub struct FetchFullBlockFuture where @@ -223,6 +272,8 @@ where } } +/// The result of a request for a single header or body. This is yielded by the `FullBlockRequest` +/// future. enum ResponseResult { Header(PeerRequestResult>), Body(PeerRequestResult>), @@ -247,18 +298,18 @@ fn ensure_valid_body_response( header: &SealedHeader, block: &BlockBody, ) -> Result<(), ConsensusError> { - let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.ommers); - if header.ommers_hash != ommers_hash { + let body_roots = block.calculate_roots(); + + if header.ommers_hash != body_roots.ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff { - got: ommers_hash, + got: body_roots.ommers_hash, expected: header.ommers_hash, }) } - let transaction_root = reth_primitives::proofs::calculate_transaction_root(&block.transactions); - if header.transactions_root != transaction_root { + if header.transactions_root != body_roots.tx_root { return Err(ConsensusError::BodyTransactionRootDiff { - got: transaction_root, + got: body_roots.tx_root, expected: header.transactions_root, }) } @@ -282,82 +333,393 @@ fn ensure_valid_body_response( Ok(()) } -#[cfg(test)] -mod tests { - use super::*; - use crate::p2p::{ - download::DownloadClient, headers::client::HeadersRequest, priority::Priority, - }; - use parking_lot::Mutex; - use reth_primitives::{BlockHashOrNumber, PeerId, WithPeerId}; - use std::{collections::HashMap, sync::Arc}; - - #[derive(Clone, Default, Debug)] - struct TestSingleFullBlockClient { - headers: Arc>>, - bodies: Arc>>, - } - - impl TestSingleFullBlockClient { - fn insert(&self, header: SealedHeader, body: BlockBody) { - let hash = header.hash(); - let header = header.unseal(); - self.headers.lock().insert(hash, header); - self.bodies.lock().insert(hash, body); +/// A future that downloads a range of full blocks from the network. +/// +/// This first fetches the headers for the given range using the inner `Client`. Once the request +/// is complete, it will fetch the bodies for the headers it received. +/// +/// Once the bodies request completes, the [SealedBlock]s will be assembled and the future will +/// yield the full block range. +/// +/// The full block range will be returned with falling block numbers, i.e. in descending order. +/// +/// NOTE: this assumes that bodies responses are returned by the client in the same order as the +/// hash array used to request them. +#[must_use = "futures do nothing unless polled"] +pub struct FetchFullBlockRangeFuture +where + Client: BodiesClient + HeadersClient, +{ + /// The client used to fetch headers and bodies. + client: Client, + /// The block hash to start fetching from (inclusive). + start_hash: H256, + /// How many blocks to fetch: `len([start_hash, ..]) == count` + count: u64, + /// Requests for headers and bodies that are in progress. + request: FullBlockRangeRequest, + /// Fetched headers. + headers: Option>, + /// The next headers to request bodies for. This is drained as responses are received. + pending_headers: VecDeque, + /// The bodies that have been received so far. + bodies: HashMap, +} + +impl FetchFullBlockRangeFuture +where + Client: BodiesClient + HeadersClient, +{ + /// Returns the block hashes for the given range, if they are available. + pub fn range_block_hashes(&self) -> Option> { + self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect::>()) + } + + /// Returns whether or not the bodies map is fully populated with requested headers and bodies. + fn is_bodies_complete(&self) -> bool { + self.bodies.len() == self.count as usize + } + + /// Inserts a block body, matching it with the `next_header`. + fn insert_body(&mut self, body_response: BodyResponse) { + if let Some(header) = self.pending_headers.pop_front() { + self.bodies.insert(header, body_response); } } - impl DownloadClient for TestSingleFullBlockClient { - fn report_bad_message(&self, _peer_id: PeerId) {} + /// Inserts multiple block bodies. + fn insert_bodies(&mut self, bodies: Vec) { + for body in bodies { + self.insert_body(body); + } + } + + /// Returns the remaining hashes for the bodies request, based on the headers that still exist + /// in the `root_map`. + fn remaining_bodies_hashes(&self) -> Vec { + self.pending_headers.iter().map(|h| h.hash()).collect::>() + } + + /// Returns the [SealedBlock]s if the request is complete and valid. + /// + /// The request is complete if the number of blocks requested is equal to the number of blocks + /// received. The request is valid if the returned bodies match the roots in the headers. + /// + /// These are returned in falling order starting with the requested `hash`, i.e. with + /// descending block numbers. + fn take_blocks(&mut self) -> Option> { + if !self.is_bodies_complete() { + // not done with bodies yet + return None + } + + let headers = self.headers.take()?; + let mut needs_retry = false; + let mut response = Vec::new(); + + for header in &headers { + if let Some(body_resp) = self.bodies.remove(header) { + // validate body w.r.t. the hashes in the header, only inserting into the response + let body = match body_resp { + BodyResponse::Validated(body) => body, + BodyResponse::PendingValidation(resp) => { + // ensure the block is valid, else retry + if let Err(err) = ensure_valid_body_response(header, resp.data()) { + debug!(target: "downloaders", ?err, hash=?header.hash, "Received wrong body in range response"); + self.client.report_bad_message(resp.peer_id()); + + // get body that doesn't match, put back into vecdeque, and just retry + self.pending_headers.push_back(header.clone()); + needs_retry = true; + } + + resp.into_data() + } + }; + + response.push(SealedBlock::new(header.clone(), body)); + } + } + + if needs_retry { + // put response hashes back into bodies map since we aren't returning them as a + // response + for block in response { + let (header, body) = block.split_header_body(); + self.bodies.insert(header, BodyResponse::Validated(body)); + } - fn num_connected_peers(&self) -> usize { - 1 + // put headers back since they were `take`n before + self.headers = Some(headers); + + // create response for failing bodies + let hashes = self.remaining_bodies_hashes(); + self.request.bodies = Some(self.client.get_block_bodies(hashes)); + return None } + + Some(response) + } + + /// Returns whether or not a bodies request has been started, returning false if there is no + /// pending request. + fn has_bodies_request_started(&self) -> bool { + self.request.bodies.is_some() + } + + /// Returns the start hash for the request + pub fn start_hash(&self) -> H256 { + self.start_hash + } + + /// Returns the block count for the request + pub fn count(&self) -> u64 { + self.count } +} + +impl Future for FetchFullBlockRangeFuture +where + Client: BodiesClient + HeadersClient + Unpin + 'static, +{ + type Output = Vec; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + loop { + match ready!(this.request.poll(cx)) { + // This branch handles headers responses from peers - it first ensures that the + // starting hash and number of headers matches what we requested. + // + // If these don't match, we penalize the peer and retry the request. + // If they do match, we sort the headers by block number and start the request for + // the corresponding block bodies. + // + // The next result that should be yielded by `poll` is the bodies response. + RangeResponseResult::Header(res) => { + match res { + Ok(headers) => { + let (peer, mut headers) = headers + .map(|h| { + h.iter().map(|h| h.clone().seal_slow()).collect::>() + }) + .split(); + + // fill in the response if it's the correct length + if headers.len() == this.count as usize { + // sort headers from highest to lowest block number + headers.sort_unstable_by_key(|h| Reverse(h.number)); + + // check the starting hash + if headers[0].hash() != this.start_hash { + // received bad response + this.client.report_bad_message(peer); + } else { + // get the bodies request so it can be polled later + let hashes = + headers.iter().map(|h| h.hash()).collect::>(); + + // populate the pending headers + this.pending_headers = headers.clone().into(); + + // set the actual request if it hasn't been started yet + if !this.has_bodies_request_started() { + this.request.bodies = + Some(this.client.get_block_bodies(hashes)); + } - impl HeadersClient for TestSingleFullBlockClient { - type Output = futures::future::Ready>>; + // set the headers response + this.headers = Some(headers); + } + } + } + Err(err) => { + debug!(target: "downloaders", %err, ?this.start_hash, "Header range download failed"); + } + } - fn get_headers_with_priority( - &self, - request: HeadersRequest, - _priority: Priority, - ) -> Self::Output { - let headers = self.headers.lock(); - let resp = match request.start { - BlockHashOrNumber::Hash(hash) => headers.get(&hash).cloned(), - BlockHashOrNumber::Number(num) => { - headers.values().find(|h| h.number == num).cloned() + if this.headers.is_none() { + // did not receive a correct response yet, retry + this.request.headers = Some(this.client.get_headers(HeadersRequest { + start: this.start_hash.into(), + limit: this.count, + direction: HeadersDirection::Falling, + })); + } + } + // This branch handles block body responses from peers - it first inserts the + // bodies into the `bodies` map, and then checks if the request is complete. + // + // If the request is not complete, and we need to request more bodies, we send + // a bodies request for the headers we don't yet have bodies for. + RangeResponseResult::Body(res) => { + match res { + Ok(bodies_resp) => { + let (peer, new_bodies) = bodies_resp.split(); + + // first insert the received bodies + this.insert_bodies( + new_bodies + .iter() + .map(|resp| WithPeerId::new(peer, resp.clone())) + .map(BodyResponse::PendingValidation) + .collect::>(), + ); + + if !this.is_bodies_complete() { + // get remaining hashes so we can send the next request + let req_hashes = this.remaining_bodies_hashes(); + + // set a new request + this.request.bodies = Some(this.client.get_block_bodies(req_hashes)) + } + } + Err(err) => { + debug!(target: "downloaders", %err, ?this.start_hash, "Body range download failed"); + } + } + if this.bodies.is_empty() { + // received bad response, re-request headers + // TODO: convert this into two futures, one which is a headers range + // future, and one which is a bodies range future. + // + // The headers range future should yield the bodies range future. + // The bodies range future should not have an Option>, it should + // have a populated Vec from the successful headers range future. + // + // This is optimal because we can not send a bodies request without + // first completing the headers request. This way we can get rid of the + // following `if let Some`. A bodies request should never be sent before + // the headers request completes, so this should always be `Some` anyways. + let hashes = this.remaining_bodies_hashes(); + if !hashes.is_empty() { + this.request.bodies = Some(this.client.get_block_bodies(hashes)); + } + } } } - .map(|h| vec![h]) - .unwrap_or_default(); - futures::future::ready(Ok(WithPeerId::new(PeerId::random(), resp))) + + if let Some(res) = this.take_blocks() { + return Poll::Ready(res) + } } } +} - impl BodiesClient for TestSingleFullBlockClient { - type Output = futures::future::Ready>>; +/// A type that buffers the result of a range request so we can return it as a `Stream`. +struct FullBlockRangeStream +where + Client: BodiesClient + HeadersClient, +{ + /// The inner [FetchFullBlockRangeFuture] that is polled. + inner: FetchFullBlockRangeFuture, + /// The blocks that have been received so far. + /// + /// If this is `None` then the request is still in progress. If the vec is empty, then all of + /// the response values have been consumed. + blocks: Option>, +} - fn get_block_bodies_with_priority( - &self, - hashes: Vec, - _priority: Priority, - ) -> Self::Output { - let bodies = self.bodies.lock(); - let mut all_bodies = Vec::new(); - for hash in hashes { - if let Some(body) = bodies.get(&hash) { - all_bodies.push(body.clone()); - } +impl From> for FullBlockRangeStream +where + Client: BodiesClient + HeadersClient, +{ + fn from(inner: FetchFullBlockRangeFuture) -> Self { + Self { inner, blocks: None } + } +} + +impl Stream for FullBlockRangeStream +where + Client: BodiesClient + HeadersClient + Unpin + 'static, +{ + type Item = SealedBlock; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + // If all blocks have been consumed, then return `None`. + if let Some(blocks) = &mut this.blocks { + if blocks.is_empty() { + // Stream is finished + return Poll::Ready(None) + } + + // return the next block if it's ready - the vec should be in ascending order since it + // is reversed right after it is received from the future, so we can just pop() the + // elements to return them from the stream in descending order + return Poll::Ready(blocks.pop()) + } + + // poll the inner future if the blocks are not yet ready + let mut blocks = ready!(Pin::new(&mut this.inner).poll(cx)); + + // the blocks are returned in descending order, reverse the list so we can just pop() the + // vec to yield the next block in the stream + blocks.reverse(); + + // pop the first block from the vec as the first stream element and store the rest + let first_result = blocks.pop(); + + // if the inner future is ready, then we can return the blocks + this.blocks = Some(blocks); + + // return the first block + Poll::Ready(first_result) + } +} + +/// A request for a range of full blocks. Polling this will poll the inner headers and bodies +/// futures until they return responses. It will return either the header or body result, depending +/// on which future successfully returned. +struct FullBlockRangeRequest +where + Client: BodiesClient + HeadersClient, +{ + headers: Option<::Output>, + bodies: Option<::Output>, +} + +impl FullBlockRangeRequest +where + Client: BodiesClient + HeadersClient, +{ + fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { + if let Poll::Ready(res) = fut.poll(cx) { + self.headers = None; + return Poll::Ready(RangeResponseResult::Header(res)) + } + } + + if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() { + if let Poll::Ready(res) = fut.poll(cx) { + self.bodies = None; + return Poll::Ready(RangeResponseResult::Body(res)) } - futures::future::ready(Ok(WithPeerId::new(PeerId::random(), all_bodies))) } + + Poll::Pending } +} + +// The result of a request for headers or block bodies. This is yielded by the +// `FullBlockRangeRequest` future. +enum RangeResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::TestFullBlockClient; + use futures::StreamExt; #[tokio::test] async fn download_single_full_block() { - let client = TestSingleFullBlockClient::default(); + let client = TestFullBlockClient::default(); let header = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); @@ -366,4 +728,115 @@ mod tests { let received = client.get_full_block(header.hash()).await; assert_eq!(received, SealedBlock::new(header, body)); } + + #[tokio::test] + async fn download_single_full_block_range() { + let client = TestFullBlockClient::default(); + let header = SealedHeader::default(); + let body = BlockBody::default(); + client.insert(header.clone(), body.clone()); + let client = FullBlockClient::new(client); + + let received = client.get_full_block_range(header.hash(), 1).await; + let received = received.first().expect("response should include a block"); + assert_eq!(*received, SealedBlock::new(header, body)); + } + + #[tokio::test] + async fn download_full_block_range() { + let client = TestFullBlockClient::default(); + let mut header = SealedHeader::default(); + let body = BlockBody::default(); + client.insert(header.clone(), body.clone()); + for _ in 0..10 { + header.parent_hash = header.hash_slow(); + header.number += 1; + header = header.header.seal_slow(); + client.insert(header.clone(), body.clone()); + } + let client = FullBlockClient::new(client); + + let received = client.get_full_block_range(header.hash(), 1).await; + let received = received.first().expect("response should include a block"); + assert_eq!(*received, SealedBlock::new(header.clone(), body)); + + let received = client.get_full_block_range(header.hash(), 10).await; + assert_eq!(received.len(), 10); + for (i, block) in received.iter().enumerate() { + let expected_number = header.number - i as u64; + assert_eq!(block.header.number, expected_number); + } + } + + #[tokio::test] + async fn download_full_block_range_stream() { + let client = TestFullBlockClient::default(); + let mut header = SealedHeader::default(); + let body = BlockBody::default(); + client.insert(header.clone(), body.clone()); + for _ in 0..10 { + header.parent_hash = header.hash_slow(); + header.number += 1; + header = header.header.seal_slow(); + client.insert(header.clone(), body.clone()); + } + let client = FullBlockClient::new(client); + + let future = client.get_full_block_range(header.hash(), 1); + let mut stream = FullBlockRangeStream::from(future); + + // ensure only block in the stream is the one we requested + let received = stream.next().await.expect("response should not be None"); + assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); + + // stream should be done now + assert_eq!(stream.next().await, None); + + // there are 11 total blocks + let future = client.get_full_block_range(header.hash(), 11); + let mut stream = FullBlockRangeStream::from(future); + + // check first header + let received = stream.next().await.expect("response should not be None"); + let mut curr_number = received.number; + assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); + + // check the rest of the headers + for _ in 0..10 { + let received = stream.next().await.expect("response should not be None"); + assert_eq!(received.number, curr_number - 1); + curr_number = received.number; + } + + // ensure stream is done + let received = stream.next().await; + assert!(received.is_none()); + } + + #[tokio::test] + async fn download_full_block_range_over_soft_limit() { + // default soft limit is 20, so we will request 50 blocks + let client = TestFullBlockClient::default(); + let mut header = SealedHeader::default(); + let body = BlockBody::default(); + client.insert(header.clone(), body.clone()); + for _ in 0..50 { + header.parent_hash = header.hash_slow(); + header.number += 1; + header = header.header.seal_slow(); + client.insert(header.clone(), body.clone()); + } + let client = FullBlockClient::new(client); + + let received = client.get_full_block_range(header.hash(), 1).await; + let received = received.first().expect("response should include a block"); + assert_eq!(*received, SealedBlock::new(header.clone(), body)); + + let received = client.get_full_block_range(header.hash(), 50).await; + assert_eq!(received.len(), 50); + for (i, block) in received.iter().enumerate() { + let expected_number = header.number - i as u64; + assert_eq!(block.header.number, expected_number); + } + } } diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/interfaces/src/test_utils/full_block.rs index b192c6b96d6e..9d1545a0b65f 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/interfaces/src/test_utils/full_block.rs @@ -5,7 +5,12 @@ use crate::p2p::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockBody, Header, PeerId, WithPeerId, H256}; +use parking_lot::Mutex; +use reth_primitives::{ + BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, PeerId, SealedBlock, + SealedHeader, WithPeerId, H256, +}; +use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. #[derive(Debug, Default, Clone)] @@ -43,3 +48,119 @@ impl HeadersClient for NoopFullBlockClient { futures::future::ready(Ok(WithPeerId::new(PeerId::random(), vec![]))) } } + +/// A headers+bodies client that stores the headers and bodies in memory, with an artificial soft +/// bodies response limit that is set to 20 by default. +/// +/// This full block client can be [Clone]d and shared between multiple tasks. +#[derive(Clone, Debug)] +pub struct TestFullBlockClient { + headers: Arc>>, + bodies: Arc>>, + // soft response limit, max number of bodies to respond with + soft_limit: usize, +} + +impl Default for TestFullBlockClient { + fn default() -> Self { + Self { + headers: Arc::new(Mutex::new(HashMap::new())), + bodies: Arc::new(Mutex::new(HashMap::new())), + soft_limit: 20, + } + } +} + +impl TestFullBlockClient { + /// Insert a header and body into the client maps. + pub fn insert(&self, header: SealedHeader, body: BlockBody) { + let hash = header.hash(); + let header = header.unseal(); + self.headers.lock().insert(hash, header); + self.bodies.lock().insert(hash, body); + } + + /// Set the soft response limit. + pub fn set_soft_limit(&mut self, limit: usize) { + self.soft_limit = limit; + } + + /// Get the block with the highest block number. + pub fn highest_block(&self) -> Option { + let headers = self.headers.lock(); + let (hash, header) = headers.iter().max_by_key(|(hash, header)| header.number)?; + let bodies = self.bodies.lock(); + let body = bodies.get(hash)?; + Some(SealedBlock::new(header.clone().seal(*hash), body.clone())) + } +} + +impl DownloadClient for TestFullBlockClient { + fn report_bad_message(&self, _peer_id: PeerId) {} + + fn num_connected_peers(&self) -> usize { + 1 + } +} + +impl HeadersClient for TestFullBlockClient { + type Output = futures::future::Ready>>; + + fn get_headers_with_priority( + &self, + request: HeadersRequest, + _priority: Priority, + ) -> Self::Output { + let headers = self.headers.lock(); + let mut block: BlockHashOrNumber = match request.start { + BlockHashOrNumber::Hash(hash) => headers.get(&hash).cloned(), + BlockHashOrNumber::Number(num) => headers.values().find(|h| h.number == num).cloned(), + } + .map(|h| h.number.into()) + .unwrap(); + + let mut resp = Vec::new(); + + for _ in 0..request.limit { + // fetch from storage + if let Some((_, header)) = headers.iter().find(|(hash, header)| { + BlockNumHash::new(header.number, **hash).matches_block_or_num(&block) + }) { + match request.direction { + HeadersDirection::Falling => block = header.parent_hash.into(), + HeadersDirection::Rising => { + let next = header.number + 1; + block = next.into() + } + } + resp.push(header.clone()); + } else { + break + } + } + futures::future::ready(Ok(WithPeerId::new(PeerId::random(), resp))) + } +} + +impl BodiesClient for TestFullBlockClient { + type Output = futures::future::Ready>>; + + fn get_block_bodies_with_priority( + &self, + hashes: Vec, + _priority: Priority, + ) -> Self::Output { + let bodies = self.bodies.lock(); + let mut all_bodies = Vec::new(); + for hash in hashes { + if let Some(body) = bodies.get(&hash) { + all_bodies.push(body.clone()); + } + + if all_bodies.len() == self.soft_limit { + break + } + } + futures::future::ready(Ok(WithPeerId::new(PeerId::random(), all_bodies))) + } +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 22bf8e71a1be..6d652f92e476 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -139,6 +139,18 @@ impl SealedBlock { (self.header, self.body, self.ommers) } + /// Splits the [BlockBody] and [SealedHeader] into separate components + pub fn split_header_body(self) -> (SealedHeader, BlockBody) { + ( + self.header, + BlockBody { + transactions: self.body, + ommers: self.ommers, + withdrawals: self.withdrawals, + }, + ) + } + /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. pub fn senders(&self) -> Option> { self.body.iter().map(|tx| tx.recover_signer()).collect::>>() @@ -715,6 +727,14 @@ impl BlockNumHash { pub fn into_components(self) -> (BlockNumber, BlockHash) { (self.number, self.hash) } + + /// Returns whether or not the block matches the given [BlockHashOrNumber]. + pub fn matches_block_or_num(&self, block: &BlockHashOrNumber) -> bool { + match block { + BlockHashOrNumber::Hash(hash) => self.hash == *hash, + BlockHashOrNumber::Number(number) => self.number == *number, + } + } } impl From<(BlockNumber, BlockHash)> for BlockNumHash { @@ -774,6 +794,43 @@ impl BlockBody { withdrawals: self.withdrawals.clone(), } } + + /// Calculate the transaction root for the block body. + pub fn calculate_tx_root(&self) -> H256 { + crate::proofs::calculate_transaction_root(&self.transactions) + } + + /// Calculate the ommers root for the block body. + pub fn calculate_ommers_root(&self) -> H256 { + crate::proofs::calculate_ommers_root(&self.ommers) + } + + /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no + /// withdrawals, this will return `None`. + pub fn calculate_withdrawals_root(&self) -> Option { + self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) + } + + /// Calculate all roots (transaction, ommers, withdrawals) for the block body. + pub fn calculate_roots(&self) -> BlockBodyRoots { + BlockBodyRoots { + tx_root: self.calculate_tx_root(), + ommers_hash: self.calculate_ommers_root(), + withdrawals_root: self.calculate_withdrawals_root(), + } + } +} + +/// A struct that represents roots associated with a block body. This can be used to correlate +/// block body responses with headers. +#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Hash)] +pub struct BlockBodyRoots { + /// The transaction root for the block body. + pub tx_root: H256, + /// The ommers hash for the block body. + pub ommers_hash: H256, + /// The withdrawals root for the block body, if withdrawals exist. + pub withdrawals_root: Option, } #[cfg(test)] diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 5a87c6411b61..54b81649b84e 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -2,7 +2,7 @@ use crate::{ basefee::calculate_next_block_base_fee, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, - BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, H64, U256, + BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, H64, U256, }; use bytes::{Buf, BufMut, BytesMut}; @@ -151,6 +151,15 @@ impl Header { self.transactions_root == EMPTY_ROOT } + /// Converts all roots in the header to a [BlockBodyRoots] struct. + pub fn body_roots(&self) -> BlockBodyRoots { + BlockBodyRoots { + tx_root: self.transactions_root, + ommers_hash: self.ommers_hash, + withdrawals_root: self.withdrawals_root, + } + } + /// Calculate base fee for next block according to the EIP-1559 spec. /// /// Returns a `None` if no base fee is set, no EIP-1559 support diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index eb3bcf1942ee..a76bcba902bf 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -54,8 +54,8 @@ pub mod proofs; pub use account::{Account, Bytecode}; pub use bits::H512; pub use block::{ - Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, - ForkBlock, SealedBlock, SealedBlockWithSenders, + Block, BlockBody, BlockBodyRoots, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, + BlockWithSenders, ForkBlock, SealedBlock, SealedBlockWithSenders, }; pub use bloom::Bloom; pub use chain::{ From f3c7a8253c5d954079ae49463bedad21654b48f2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 6 Jul 2023 12:36:09 +0100 Subject: [PATCH 080/722] refactor(stages): use `MetricsListener` for Execution stage gas metric (#3511) --- bin/reth/src/node/mod.rs | 19 +++++++----- crates/stages/src/metrics/listener.rs | 11 +++++++ crates/stages/src/metrics/sync_metrics.rs | 9 ++++++ crates/stages/src/stages/execution.rs | 37 +++++++++++------------ 4 files changed, 48 insertions(+), 28 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index a37acc28da8f..b7bf59466be0 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -696,7 +696,7 @@ impl Command { if continuous { HeaderSyncMode::Continuous } else { HeaderSyncMode::Tip(tip_rx) }; let pipeline = builder .with_tip_sender(tip_tx) - .with_metrics_tx(metrics_tx) + .with_metrics_tx(metrics_tx.clone()) .add_stages( DefaultStages::new( header_mode, @@ -712,13 +712,16 @@ impl Command { .set(SenderRecoveryStage { commit_threshold: stage_config.sender_recovery.commit_threshold, }) - .set(ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: stage_config.execution.max_blocks, - max_changes: stage_config.execution.max_changes, - }, - )) + .set( + ExecutionStage::new( + factory, + ExecutionStageThresholds { + max_blocks: stage_config.execution.max_blocks, + max_changes: stage_config.execution.max_changes, + }, + ) + .with_metrics_tx(metrics_tx), + ) .set(AccountHashingStage::new( stage_config.account_hashing.clean_threshold, stage_config.account_hashing.commit_threshold, diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages/src/metrics/listener.rs index d05560c76f5e..8ec3ec31da7e 100644 --- a/crates/stages/src/metrics/listener.rs +++ b/crates/stages/src/metrics/listener.rs @@ -1,5 +1,6 @@ use crate::metrics::SyncMetrics; use reth_primitives::{ + constants::MGAS_TO_GAS, stage::{StageCheckpoint, StageId}, BlockNumber, }; @@ -31,6 +32,11 @@ pub enum MetricEvent { /// If specified, `entities_total` metric is updated. max_block_number: Option, }, + /// Execution stage processed some amount of gas. + ExecutionStageGas { + /// Gas processed. + gas: u64, + }, } /// Metrics routine that listens to new metric events on the `events_rx` receiver. @@ -71,6 +77,11 @@ impl MetricsListener { stage_metrics.entities_total.set(total as f64); } } + MetricEvent::ExecutionStageGas { gas } => self + .sync_metrics + .execution_stage + .mgas_processed_total + .increment(gas as f64 / MGAS_TO_GAS as f64), } } } diff --git a/crates/stages/src/metrics/sync_metrics.rs b/crates/stages/src/metrics/sync_metrics.rs index ba440cb2a3b6..93b1c86eeac6 100644 --- a/crates/stages/src/metrics/sync_metrics.rs +++ b/crates/stages/src/metrics/sync_metrics.rs @@ -8,6 +8,7 @@ use std::collections::HashMap; #[derive(Debug, Default)] pub(crate) struct SyncMetrics { pub(crate) stages: HashMap, + pub(crate) execution_stage: ExecutionStageMetrics, } impl SyncMetrics { @@ -29,3 +30,11 @@ pub(crate) struct StageMetrics { /// The number of total entities of the last commit for a stage, if applicable. pub(crate) entities_total: Gauge, } + +/// Execution stage metrics. +#[derive(Metrics)] +#[metrics(scope = "sync.execution")] +pub(crate) struct ExecutionStageMetrics { + /// The total amount of gas processed (in millions) + pub(crate) mgas_processed_total: Gauge, +} diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index e54e7a446d21..38284b1d125d 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -1,4 +1,7 @@ -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use crate::{ + ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, + UnwindOutput, +}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, @@ -7,12 +10,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_interfaces::db::DatabaseError; -use reth_metrics::{ - metrics::{self, Gauge}, - Metrics, -}; use reth_primitives::{ - constants::MGAS_TO_GAS, stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, }, @@ -25,14 +23,6 @@ use reth_provider::{ use std::{ops::RangeInclusive, time::Instant}; use tracing::*; -/// Execution stage metrics. -#[derive(Metrics)] -#[metrics(scope = "sync.execution")] -pub struct ExecutionStageMetrics { - /// The total amount of gas processed (in millions) - mgas_processed_total: Gauge, -} - /// The execution stage executes all transactions and /// update history indexes. /// @@ -64,7 +54,7 @@ pub struct ExecutionStageMetrics { // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { - metrics: ExecutionStageMetrics, + metrics_tx: Option, /// The stage's internal executor executor_factory: EF, /// The commit thresholds of the execution stage. @@ -74,7 +64,7 @@ pub struct ExecutionStage { impl ExecutionStage { /// Create new execution stage with specified config. pub fn new(executor_factory: EF, thresholds: ExecutionStageThresholds) -> Self { - Self { metrics: ExecutionStageMetrics::default(), executor_factory, thresholds } + Self { metrics_tx: None, executor_factory, thresholds } } /// Create an execution stage with the provided executor factory. @@ -84,9 +74,15 @@ impl ExecutionStage { Self::new(executor_factory, ExecutionStageThresholds::default()) } + /// Set the metric events sender. + pub fn with_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { + self.metrics_tx = Some(metrics_tx); + self + } + /// Execute the stage. pub fn execute_inner( - &self, + &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { @@ -129,9 +125,10 @@ impl ExecutionStage { })?; // Gas metrics - self.metrics - .mgas_processed_total - .increment(block.header.gas_used as f64 / MGAS_TO_GAS as f64); + if let Some(metrics_tx) = &mut self.metrics_tx { + let _ = + metrics_tx.send(MetricEvent::ExecutionStageGas { gas: block.header.gas_used }); + } // Merge state changes state.extend(block_state); From 6347aacd15bcb3b3980b3404a9885dd974d1a371 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 13:55:19 +0200 Subject: [PATCH 081/722] chore: add commit to client version (#3621) --- bin/reth/src/version.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/version.rs b/bin/reth/src/version.rs index 17395d16acf0..a016d209c97f 100644 --- a/bin/reth/src/version.rs +++ b/bin/reth/src/version.rs @@ -42,7 +42,7 @@ pub(crate) const LONG_VERSION: &str = concat!( env!("VERGEN_CARGO_FEATURES") ); -/// The version information for reth formatted for P2P. +/// The version information for reth formatted for P2P (devp2p). /// /// - The latest version from Cargo.toml /// - The target triple @@ -50,10 +50,17 @@ pub(crate) const LONG_VERSION: &str = concat!( /// # Example /// /// ```text -/// reth/v{major}.{minor}.{patch}/{target} +/// reth/v{major}.{minor}.{patch}-{sha1}/{target} /// ``` -pub(crate) const P2P_CLIENT_VERSION: &str = - concat!("reth/v", env!("CARGO_PKG_VERSION"), "/", env!("VERGEN_CARGO_TARGET_TRIPLE")); +/// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin` +pub(crate) const P2P_CLIENT_VERSION: &str = concat!( + "reth/v", + env!("CARGO_PKG_VERSION"), + "-", + env!("VERGEN_GIT_SHA"), + "/", + env!("VERGEN_CARGO_TARGET_TRIPLE") +); /// The default extradata used for payload building. /// From 93d66ab32fe4959130bfcb5774a75bb3256b385f Mon Sep 17 00:00:00 2001 From: Bjerg Date: Thu, 6 Jul 2023 14:07:15 +0200 Subject: [PATCH 082/722] docs: add homebrew and arch aur (#3620) --- book/installation/binaries.md | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/book/installation/binaries.md b/book/installation/binaries.md index 0adbf634e2fa..c34539630f02 100644 --- a/book/installation/binaries.md +++ b/book/installation/binaries.md @@ -1,28 +1,18 @@ # Binaries -Precompiled binaries are available from the [GitHub releases page](https://github.com/paradigmxyz/reth/releases). +[**Archives of precompiled binaries of reth are available for Windows, macOS and Linux.**](https://github.com/paradigmxyz/reth/releases) They are static executables. Users of platforms not explicitly listed below should download one of these archives. -Binaries are supplied for 5 platforms: +If you use **macOS Homebrew** or **Linuxbrew**, you can install Reth from Paradigm's homebrew tap: -- Linux x86_64: AMD and Intel 64-bit processors (most desktops, laptops, and servers) -- Linux ARM64: 64-bit arm processors -- macOS x86_64: macOS with Intel chips -- macOS ARM64: macOS with Apple Silicon -- Windows x86_64: AMD and Intel 64-bit processors +```text +brew install paradigmxyz/brew/reth +``` -Each binary is contained in a tarball. +If you use **Arch Linux** you can install stable Reth from the AUR using an [AUR helper](https://wiki.archlinux.org/title/AUR_helpers) ([paru][paru] as an example here): -As an example, you could install the Linux x86_64 version like so: +```text +paru -S reth # Stable +paru -S reth-git # Unstable (git) +``` -1. Go to the [Releases](https://github.com/paradigmxyz/reth/releases) page and select the latest release. -1. Download the `reth-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` tarball. - - For example, to obtain the binary file for v0.0.1-alpha, you can run the following commands in a Linux terminal: - ```bash - cd ~ - curl -LO https://github.com/paradigmxyz/reth/releases/download/v0.1.0-alpha.1/reth-v0.1.0-alpha.1-x86_64-unknown-linux-gnu.tar.gz - tar -xvf reth-v0.1.0-alpha.1-x86_64-unknown-linux-gnu.tar.gz - ``` -1. Test the binary with `./reth --version` (it should print the version). -2. (Optional) Move the `reth` binary to a location in your `PATH`, so the `reth` command can be called from anywhere. - For most Linux distros, you can move the binary to `/usr/local/bin`: `sudo cp ./reth /usr/local/bin`. +[paru]: https://github.com/Morganamilo/paru \ No newline at end of file From d9dc9aac92aadee009fb1b17fb96768caa3b553b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 6 Jul 2023 13:08:29 +0100 Subject: [PATCH 083/722] chore(book): document missing CLI arguments (#3610) --- book/cli/db.md | 14 +++++++++ book/cli/debug.md | 34 +++++++++++++++++++-- book/cli/import.md | 27 +++++++++++----- book/cli/init.md | 14 +++++++++ book/cli/node.md | 76 ++++++++++++++++++++++++++++++++++++---------- book/cli/p2p.md | 14 +++++++++ book/cli/stage.md | 63 +++++++++++++++++++++++++++++++++++--- 7 files changed, 212 insertions(+), 30 deletions(-) diff --git a/book/cli/db.md b/book/cli/db.md index c06640a9b7ac..95627871c42d 100644 --- a/book/cli/db.md +++ b/book/cli/db.md @@ -50,6 +50,20 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + Logging: --log.persistent The flag to enable persistent logs diff --git a/book/cli/debug.md b/book/cli/debug.md index 586a72649770..5139c860e748 100644 --- a/book/cli/debug.md +++ b/book/cli/debug.md @@ -135,6 +135,20 @@ Networking: --port Network listening port. default: 30303 +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + --to The maximum block height @@ -213,15 +227,29 @@ Options: [default: mainnet] + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + --to The height to finish at --skip-node-depth The depth after which we should start comparing branch nodes - -h, --help - Print help (see a summary with '-h') - Logging: --log.persistent The flag to enable persistent logs diff --git a/book/cli/import.md b/book/cli/import.md index ec79c91a635c..7bc08d6437a0 100644 --- a/book/cli/import.md +++ b/book/cli/import.md @@ -7,13 +7,6 @@ $ reth import --help Usage: reth import [OPTIONS] -Arguments: - - The path to a block file for import. - - The online stages (headers and bodies) are replaced by a file import, after which the - remaining stages are executed. - Options: --config The path to the configuration file to use. @@ -44,6 +37,26 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + + The path to a block file for import. + + The online stages (headers and bodies) are replaced by a file import, after which the + remaining stages are executed. + Logging: --log.persistent The flag to enable persistent logs diff --git a/book/cli/init.md b/book/cli/init.md index 5517043fb6ca..a144b0085298 100644 --- a/book/cli/init.md +++ b/book/cli/init.md @@ -34,6 +34,20 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + Logging: --log.persistent The flag to enable persistent logs diff --git a/book/cli/node.md b/book/cli/node.md index 64b62d480a73..4d447428a4c6 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -167,12 +167,7 @@ RPC: [default: 25] - --rpc.gascap - Maximum gas limit for `eth_call` and call tracing RPC methods - - [default: 30000000] - -GAS PRICE ORACLE: +Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price @@ -193,20 +188,56 @@ GAS PRICE ORACLE: [default: 60] - --block-cache-size - Max size for cached block data in megabytes + --block-cache-len + Maximum number of block cache entries - [default: 500] + [default: 5000] - --receipt-cache-size - Max size for cached receipt data in megabytes + --receipt-cache-len + Maximum number of receipt cache entries - [default: 500] + [default: 2000] - --env-cache-size - Max size for cached evm env data in megabytes + --env-cache-len + Maximum number of env cache entries - [default: 1] + [default: 1000] + +TxPool: + --txpool.pending_max_count + Max number of transaction in the pending sub-pool + + [default: 10000] + + --txpool.pending_max_size + Max size of the pending sub-pool in megabytes + + [default: 20] + + --txpool.basefee_max_count + Max number of transaction in the basefee sub-pool + + [default: 10000] + + --txpool.basefee_max_size + Max size of the basefee sub-pool in megabytes + + [default: 20] + + --txpool.queued_max_count + Max number of transaction in the queued sub-pool + + [default: 10000] + + --txpool.queued_max_size + Max size of the queued sub-pool in megabytes + + [default: 20] + + --txpool.max_account_slots + Max number of executable transaction slots guaranteed per account + + [default: 16] Builder: --builder.extradata @@ -263,7 +294,20 @@ Debug: --debug.hook-all Hook on every transaction in a block -Rpc: +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + --auto-mine Automatically mine blocks for new transactions diff --git a/book/cli/p2p.md b/book/cli/p2p.md index ddaa94133d0c..3c9536ef51cf 100644 --- a/book/cli/p2p.md +++ b/book/cli/p2p.md @@ -76,6 +76,20 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + Logging: --log.persistent The flag to enable persistent logs diff --git a/book/cli/stage.md b/book/cli/stage.md index d23bc2bfa4fa..ea2981490e82 100644 --- a/book/cli/stage.md +++ b/book/cli/stage.md @@ -63,10 +63,6 @@ $ reth stage drop --help Usage: reth stage drop [OPTIONS] -Arguments: - - [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] - Options: --datadir The path to the data dir for all reth files and subdirectories. @@ -94,6 +90,23 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + + [possible values: headers, bodies, senders, execution, account-hashing, storage-hashing, hashing, merkle, tx-lookup, history, account-history, storage-history, total-difficulty] + Logging: --log.persistent The flag to enable persistent logs @@ -173,6 +186,20 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + Logging: --log.persistent The flag to enable persistent logs @@ -543,6 +570,20 @@ Networking: --port Network listening port. default: 30303 +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + -c, --commit Commits the changes in the database. WARNING: potentially destructive. @@ -623,6 +664,20 @@ Options: -h, --help Print help (see a summary with '-h') +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + Logging: --log.persistent The flag to enable persistent logs From c4f02425eb59ef36bb99115b6e3b14b026bbdb1e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 6 Jul 2023 15:21:25 +0300 Subject: [PATCH 084/722] chore(engine): warn on inconsistent pipeline (#3623) --- crates/consensus/beacon/src/engine/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 4cc52939e683..d397fcf37874 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -385,6 +385,13 @@ where // If the checkpoint of any stage is less than the checkpoint of the first stage, // retrieve and return the block hash of the latest header and use it as the target. if stage_checkpoint < first_stage_checkpoint { + warn!( + target: "consensus::engine", + first_stage_checkpoint, + inconsistent_stage_id = %stage_id, + inconsistent_stage_checkpoint = stage_checkpoint, + "Pipeline sync progress is inconsistent" + ); return self.blockchain.block_hash(first_stage_checkpoint) } } From 73bfb2c9a4b46215c293a1156a9a18936ef4f3e9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 15:09:57 +0200 Subject: [PATCH 085/722] chore: remove network-api test-utils feature (#3622) --- crates/net/network-api/Cargo.toml | 1 - crates/net/network-api/src/lib.rs | 5 ++--- .../network-api/src/{test_utils.rs => noop.rs} | 6 ++++++ crates/rpc/rpc-builder/Cargo.toml | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 4 ++-- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/eth/api/server.rs | 4 ++-- crates/rpc/rpc/src/eth/api/transactions.rs | 4 ++-- examples/Cargo.toml | 4 ++-- examples/db-access.rs | 4 ++-- examples/rpc-db.rs | 17 +++++++---------- 11 files changed, 27 insertions(+), 26 deletions(-) rename crates/net/network-api/src/{test_utils.rs => noop.rs} (91%) diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 31c93a0769f0..0769c5daec54 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -25,4 +25,3 @@ tokio = { workspace = true, features = ["sync"] } [features] default = ["serde"] serde = ["dep:serde"] -test-utils = [] diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index fb1a92761638..f880fe745e66 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -32,9 +32,8 @@ pub mod error; /// Reputation score pub mod reputation; -#[cfg(feature = "test-utils")] -/// Implementation of network traits for testing purposes. -pub mod test_utils; +/// Implementation of network traits for that does nothing. +pub mod noop; /// Provides general purpose information about the network. #[async_trait] diff --git a/crates/net/network-api/src/test_utils.rs b/crates/net/network-api/src/noop.rs similarity index 91% rename from crates/net/network-api/src/test_utils.rs rename to crates/net/network-api/src/noop.rs index f914138b925f..9a5309993f77 100644 --- a/crates/net/network-api/src/test_utils.rs +++ b/crates/net/network-api/src/noop.rs @@ -1,3 +1,8 @@ +//! A network implementation that does nothing. +//! +//! This is useful for wiring components together that don't require network but still need to be +//! generic over it. + use crate::{ NetworkError, NetworkInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; @@ -11,6 +16,7 @@ use std::net::{IpAddr, SocketAddr}; /// /// Intended for testing purposes where network is not used. #[derive(Debug, Clone, Default)] +#[non_exhaustive] pub struct NoopNetwork; #[async_trait] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index d0dfab2ca3dc..b4159bfb682b 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -39,7 +39,7 @@ reth-tracing = { path = "../../tracing" } reth-rpc-api = { path = "../rpc-api", features = ["client"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } -reth-network-api = { workspace = true, features = ["test-utils"] } +reth-network-api = { workspace = true } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-beacon-consensus = { path = "../../consensus/beacon" } reth-payload-builder = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 1281a8ae07df..e7c4a4eb88ae 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,5 +1,5 @@ use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_network_api::test_utils::NoopNetwork; +use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; @@ -101,7 +101,7 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< RpcModuleBuilder::default() .with_provider(NoopProvider::default()) .with_pool(testing_pool()) - .with_network(NoopNetwork) + .with_network(NoopNetwork::default()) .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()) } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index c3003e496aec..11111cafbc69 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,7 +18,7 @@ reth-rlp = { workspace = true } reth-rpc-types = { workspace = true } reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-network-api = { workspace = true, features = ["test-utils"] } +reth-network-api = { workspace = true } reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-revm = { path = "../../revm" } reth-tasks = { workspace = true } diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index acca88323cdc..b1643e89671a 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -396,7 +396,7 @@ mod tests { }; use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_interfaces::test_utils::{generators, generators::Rng}; - use reth_network_api::test_utils::NoopNetwork; + use reth_network_api::noop::NoopNetwork; use reth_primitives::{ basefee::calculate_next_block_base_fee, Block, BlockNumberOrTag, Header, TransactionSigned, H256, U256, @@ -424,7 +424,7 @@ mod tests { EthApi::new( provider.clone(), testing_pool(), - NoopNetwork, + NoopNetwork::default(), cache.clone(), GasPriceOracle::new(provider, Default::default(), cache), ) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index fe4eca4be8f8..1cbd5df2ba28 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -892,7 +892,7 @@ mod tests { eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, EthApi, }; - use reth_network_api::test_utils::NoopNetwork; + use reth_network_api::noop::NoopNetwork; use reth_primitives::{hex_literal::hex, Bytes}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; @@ -900,7 +900,7 @@ mod tests { #[tokio::test] async fn send_raw_transaction() { let noop_provider = NoopProvider::default(); - let noop_network_provider = NoopNetwork; + let noop_network_provider = NoopNetwork::default(); let pool = testing_pool(); diff --git a/examples/Cargo.toml b/examples/Cargo.toml index ed1a18eb6351..c77a2d6879db 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -17,8 +17,8 @@ reth-rpc-types = { workspace = true } reth-revm = { workspace = true } reth-blockchain-tree = { workspace = true } reth-beacon-consensus = { workspace = true } -reth-network-api = { workspace = true, features = ["test-utils"] } -reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-network-api = { workspace = true } +reth-transaction-pool = { workspace = true } reth-tasks = { workspace = true } diff --git a/examples/db-access.rs b/examples/db-access.rs index fee4239a1661..5b8aec77f4ff 100644 --- a/examples/db-access.rs +++ b/examples/db-access.rs @@ -18,7 +18,7 @@ fn main() -> eyre::Result<()> { // Opens a RO handle to the database file. // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of // doing in 2 steps. - let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?; + let db = open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?; // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? @@ -197,7 +197,7 @@ fn receipts_provider_example eyre::Result<()> { // 1. Setup the DB - let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?); + let db = Arc::new(open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?); let spec = Arc::new(ChainSpecBuilder::mainnet().build()); let factory = ProviderFactory::new(db.clone(), spec.clone()); @@ -47,20 +46,18 @@ async fn main() -> eyre::Result<()> { let tree = ShareableBlockchainTree::new(BlockchainTree::new( externals, - canon_state_notification_sender.clone(), + canon_state_notification_sender, tree_config, )?); BlockchainProvider::new(factory, tree)? }; - let noop_pool = testing_pool(); let rpc_builder = RpcModuleBuilder::default() .with_provider(provider) - // Rest is just defaults - // TODO: How do we make this easier to configure? - .with_pool(noop_pool) - .with_network(NoopNetwork) + // Rest is just noops that do nothing + .with_pool(NoopTransactionPool::default()) + .with_network(NoopNetwork::default()) .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()); From 350f1a1e49a422bf0a727e97407f3490dd39e8fe Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:29:21 -0400 Subject: [PATCH 086/722] fix: populate transaction trace error field (#3627) --- crates/revm/revm-inspectors/src/tracing/types.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 34c990abdd55..37305eb687dd 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -321,9 +321,10 @@ impl CallTraceNode { pub(crate) fn parity_transaction_trace(&self, trace_address: Vec) -> TransactionTrace { let action = self.parity_action(); let output = self.parity_trace_output(); + let error = self.trace.as_error(); TransactionTrace { action, - error: None, + error, result: Some(output), trace_address, subtraces: self.children.len(), From 6d5b6c66bed13a6bbcf5a624d3883b627650e3dc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 16:57:17 +0200 Subject: [PATCH 087/722] fix: rm outdated flatten (#3630) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 35 ++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index e3bb31b4fdcd..7d1276cb905f 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -223,9 +223,8 @@ pub enum TraceOutput { pub struct TransactionTrace { #[serde(flatten)] pub action: Action, - #[serde(flatten)] + #[serde(flatten, skip_serializing_if = "Option::is_none")] pub error: Option, - #[serde(flatten)] pub result: Option, pub subtraces: usize, pub trace_address: Vec, @@ -303,3 +302,35 @@ pub struct StorageDelta { pub key: U256, pub val: U256, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_transaction_trace() { + let s = r#"{ + "action": { + "from": "0x66e29f0b6b1b07071f2fde4345d512386cb66f5f", + "callType": "call", + "gas": "0x10bfc", + "input": "0xf6cd1e8d0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ec6952892271c8ee13f12e118484e03149281c9600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010480862479000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000160f5f00288e9e1cc8655b327e081566e580a71d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000011c37937e080000fffffffffffffffffffffffffffffffffffffffffffffffffee3c86c81f8000000000000000000000000000000000000000000000000000000000000", + "to": "0x160f5f00288e9e1cc8655b327e081566e580a71d", + "value": "0x244b" + }, + "blockHash": "0xbca9ee244882bd00a19737a66f24002a4562a949c4d5ebd03c32e04111cff536", + "blockNumber": 17600209, + "error": "Reverted", + "result": { + "gasUsed": "0x9daf", + "output": "0x000000000000000000000000000000000000000000000000011c37937e080000" + }, + "subtraces": 3, + "traceAddress": [], + "transactionHash": "0x0e48a8d4419efaa2d3a9b8f625a1c559a4179fd19ddd10c02842965f3a7e7b63", + "transactionPosition": 0, + "type": "call" + }"#; + let _val = serde_json::from_str::(s).unwrap(); + } +} From 77cd4abbdc2dbdb6e9ddcb482fdc1809ca32f805 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 18:00:11 +0200 Subject: [PATCH 088/722] fix: put attribute on correct line (#3636) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 7d1276cb905f..8158abbfcecb 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -223,8 +223,8 @@ pub enum TraceOutput { pub struct TransactionTrace { #[serde(flatten)] pub action: Action, - #[serde(flatten, skip_serializing_if = "Option::is_none")] pub error: Option, + #[serde(flatten, skip_serializing_if = "Option::is_none")] pub result: Option, pub subtraces: usize, pub trace_address: Vec, @@ -331,6 +331,7 @@ mod tests { "transactionPosition": 0, "type": "call" }"#; - let _val = serde_json::from_str::(s).unwrap(); + let val = serde_json::from_str::(s).unwrap(); + serde_json::to_value(val).unwrap(); } } From aef9480da04aa9f673e21acf43652d1d311f70e3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 17:41:14 +0200 Subject: [PATCH 089/722] chore: move BeaconConsensusEngineHandle to separate file (#3633) --- crates/consensus/beacon/src/engine/handle.rs | 87 +++++++++++++++ crates/consensus/beacon/src/engine/mod.rs | 106 ++++--------------- 2 files changed, 105 insertions(+), 88 deletions(-) create mode 100644 crates/consensus/beacon/src/engine/handle.rs diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs new file mode 100644 index 000000000000..d2b8661b6080 --- /dev/null +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -0,0 +1,87 @@ +//! `BeaconConsensusEngine` external API + +use crate::{ + engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, + BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, +}; +use futures::TryFutureExt; +use reth_rpc_types::engine::{ + ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, +}; +use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// A _shareable_ beacon consensus frontend type. Used to interact with the spawned beacon consensus +/// engine task. +/// +/// See also [`BeaconConsensusEngine`](crate::engine::BeaconConsensusEngine). +#[derive(Clone, Debug)] +pub struct BeaconConsensusEngineHandle { + pub(crate) to_engine: UnboundedSender, +} + +// === impl BeaconConsensusEngineHandle === + +impl BeaconConsensusEngineHandle { + /// Creates a new beacon consensus engine handle. + pub fn new(to_engine: UnboundedSender) -> Self { + Self { to_engine } + } + + /// Sends a new payload message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn new_payload( + &self, + payload: ExecutionPayload, + ) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx }); + rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? + } + + /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + ) -> Result { + Ok(self + .send_fork_choice_updated(state, payload_attrs) + .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) + .await?? + .await?) + } + + /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to + /// wait for a response. + fn send_fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + }); + rx + } + + /// Sends a transition configuration exchagne message to the beacon consensus engine. + /// + /// See also + pub async fn transition_configuration_exchanged(&self) { + let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); + } + + /// Creates a new [`BeaconConsensusEngineEvent`] listener stream. + pub fn event_listener(&self) -> UnboundedReceiverStream { + let (tx, rx) = mpsc::unbounded_channel(); + let _ = self.to_engine.send(BeaconEngineMessage::EventListener(tx)); + UnboundedReceiverStream::new(rx) + } +} diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index d397fcf37874..175f6c5dca74 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,13 +1,17 @@ use crate::{ - engine::{message::OnForkChoiceUpdated, metrics::EngineMetrics}, + engine::{ + forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, + message::OnForkChoiceUpdated, + metrics::EngineMetrics, + }, sync::{EngineSyncController, EngineSyncEvent}, }; -use futures::{Future, StreamExt, TryFutureExt}; +use futures::{Future, StreamExt}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockchainTreeEngine, + BlockStatus, BlockchainTreeEngine, InsertPayloadOk, }, consensus::ForkchoiceState, executor::{BlockExecutionError, BlockValidationError}, @@ -17,15 +21,14 @@ use reth_interfaces::{ }; use reth_payload_builder::{PayloadBuilderAttributes, PayloadBuilderHandle}; use reth_primitives::{ - listener::EventListeners, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, - SealedHeader, H256, U256, + constants::EPOCH_SLOTS, listener::EventListeners, stage::StageId, BlockNumHash, BlockNumber, + Head, Header, SealedBlock, SealedHeader, H256, U256, }; use reth_provider::{ BlockReader, BlockSource, CanonChainTracker, ProviderError, StageCheckpointReader, }; use reth_rpc_types::engine::{ - ExecutionPayload, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, + ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use reth_stages::{ControlFlow, Pipeline, PipelineError}; use reth_tasks::TaskSpawner; @@ -53,16 +56,15 @@ pub use error::{ mod invalid_headers; use invalid_headers::InvalidHeaderCache; -mod metrics; mod event; +pub use event::BeaconConsensusEngineEvent; mod forkchoice; +mod metrics; pub(crate) mod sync; -use crate::engine::forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; -pub use event::BeaconConsensusEngineEvent; -use reth_interfaces::blockchain_tree::InsertPayloadOk; -use reth_primitives::constants::EPOCH_SLOTS; +mod handle; +pub use handle::BeaconConsensusEngineHandle; /// The maximum number of invalid headers that can be tracked by the engine. const MAX_INVALID_HEADERS: u32 = 512u32; @@ -74,81 +76,6 @@ const MAX_INVALID_HEADERS: u32 = 512u32; /// If the distance exceeds this threshold, the pipeline will be used for sync. pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; -/// A _shareable_ beacon consensus frontend. Used to interact with the spawned beacon consensus -/// engine. -/// -/// See also [`BeaconConsensusEngine`]. -#[derive(Clone, Debug)] -pub struct BeaconConsensusEngineHandle { - to_engine: UnboundedSender, -} - -// === impl BeaconConsensusEngineHandle === - -impl BeaconConsensusEngineHandle { - /// Creates a new beacon consensus engine handle. - pub fn new(to_engine: UnboundedSender) -> Self { - Self { to_engine } - } - - /// Sends a new payload message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn new_payload( - &self, - payload: ExecutionPayload, - ) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx }); - rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? - } - - /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - ) -> Result { - Ok(self - .send_fork_choice_updated(state, payload_attrs) - .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) - .await?? - .await?) - } - - /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to - /// wait for a response. - fn send_fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - ) -> oneshot::Receiver> { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - }); - rx - } - - /// Sends a transition configuration exchagne message to the beacon consensus engine. - /// - /// See also - pub async fn transition_configuration_exchanged(&self) { - let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); - } - - /// Creates a new [`BeaconConsensusEngineEvent`] listener stream. - pub fn event_listener(&self) -> UnboundedReceiverStream { - let (tx, rx) = mpsc::unbounded_channel(); - let _ = self.to_engine.send(BeaconEngineMessage::EventListener(tx)); - UnboundedReceiverStream::new(rx) - } -} - /// The beacon consensus engine is the driver that switches between historical and live sync. /// /// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are @@ -1493,7 +1420,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::engine::error::BeaconForkChoiceUpdateError; + use crate::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; use assert_matches::assert_matches; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, post_state::PostState, @@ -1510,6 +1437,9 @@ mod tests { providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockWriter, ProviderFactory, }; + use reth_rpc_types::engine::{ + ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + }; use reth_stages::{test_utils::TestStages, ExecOutput, PipelineError, StageError}; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc, time::Duration}; From fb898d05b15e6080325ac26c66476985cc4b08a8 Mon Sep 17 00:00:00 2001 From: Brian Date: Thu, 6 Jul 2023 12:14:02 -0400 Subject: [PATCH 090/722] fix(docs): fix malformed link (#3635) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b39f04fa145d..8098e4063aa8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -207,7 +207,7 @@ jobs: ## Binaries - [See pre-built binaries documentation.](https://https://paradigmxyz.github.io/reth/installation/binaries.html) + [See pre-built binaries documentation.](https://paradigmxyz.github.io/reth/installation/binaries.html) The binaries are signed with the PGP key: `A3AE 097C 8909 3A12 4049 DF1F 5391 A3C4 1005 30B4` From a66a1286a44a1faee11396adb2eaa57361505a44 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 18:41:29 +0200 Subject: [PATCH 091/722] fix: rm bad flatten (#3637) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 8158abbfcecb..3c4b0bc57c7b 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -223,8 +223,8 @@ pub enum TraceOutput { pub struct TransactionTrace { #[serde(flatten)] pub action: Action, + #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, - #[serde(flatten, skip_serializing_if = "Option::is_none")] pub result: Option, pub subtraces: usize, pub trace_address: Vec, From bfa84bb1b739ec16605646c09ef51406989a499a Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Thu, 6 Jul 2023 13:16:01 -0400 Subject: [PATCH 092/722] Match statement for handling error messages - reverted and default case (#3640) Co-authored-by: Matthias Seitz --- crates/revm/revm-inspectors/src/tracing/types.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 37305eb687dd..b66bd67261bc 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -161,7 +161,10 @@ impl CallTrace { /// Returns the error message if it is an erroneous result. pub(crate) fn as_error(&self) -> Option { - self.is_error().then(|| format!("{:?}", self.status)) + self.is_error().then(|| match self.status { + InstructionResult::Revert => "Reverted".to_string(), + status => format!("{:?}", status), + }) } } From 5d904eba1eff6d3187022aad1a7b0c1796843834 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 6 Jul 2023 20:21:25 +0200 Subject: [PATCH 093/722] test: use concrete type for arbitrary strategy (#3632) --- crates/storage/db/src/tables/utils.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/storage/db/src/tables/utils.rs b/crates/storage/db/src/tables/utils.rs index 6a6d22412cf9..f05ed6a28740 100644 --- a/crates/storage/db/src/tables/utils.rs +++ b/crates/storage/db/src/tables/utils.rs @@ -1,16 +1,14 @@ -//! Small database table utilities and helper functions +//! Small database table utilities and helper functions. use crate::{ table::{Decode, Decompress, Table}, DatabaseError, }; - use std::borrow::Cow; #[macro_export] -/// Implements the `Arbitrary` trait for types with fixed array -/// types. +/// Implements the `Arbitrary` trait for types with fixed array types. macro_rules! impl_fixed_arbitrary { - ($name:tt, $size:tt) => { + ($name:ident, $size:tt) => { #[cfg(any(test, feature = "arbitrary"))] use arbitrary::{Arbitrary, Unstructured}; @@ -24,17 +22,18 @@ macro_rules! impl_fixed_arbitrary { } } - #[cfg(any(test, feature = "arbitrary"))] - use proptest::strategy::Strategy; #[cfg(any(test, feature = "arbitrary"))] impl proptest::prelude::Arbitrary for $name { type Parameters = (); - type Strategy = proptest::prelude::BoxedStrategy<$name>; + type Strategy = proptest::strategy::Map< + proptest::collection::VecStrategy<::Strategy>, + fn(Vec) -> Self, + >; fn arbitrary_with(args: Self::Parameters) -> Self::Strategy { + use proptest::strategy::Strategy; proptest::collection::vec(proptest::arbitrary::any_with::(args), $size) .prop_map(move |vec| Decode::decode(vec).unwrap()) - .boxed() } } }; From fdc8a0532089dd1cfd1855012059c1a0aec3615b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 6 Jul 2023 21:55:37 +0300 Subject: [PATCH 094/722] fix(txpool): emit replaced events (#3642) --- crates/transaction-pool/src/pool/listener.rs | 7 ++++++- crates/transaction-pool/src/pool/mod.rs | 15 +++++++++++---- crates/transaction-pool/src/pool/txpool.rs | 6 ++++-- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index f3ff53545397..994c1fac5c8f 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -116,10 +116,15 @@ impl PoolEventBroadcast { if let Some(replaced) = replaced { // notify listeners that this transaction was replaced - self.broadcast_event(replaced, TransactionEvent::Replaced(*tx)); + self.replaced(replaced, tx); } } + /// Notify listeners about a transaction that was replaced. + pub(crate) fn replaced(&mut self, tx: &TxHash, replaced_by: &TxHash) { + self.broadcast_event(tx, TransactionEvent::Replaced(*replaced_by)); + } + /// Notify listeners about a transaction that was added to the queued pool. pub(crate) fn queued(&mut self, tx: &TxHash) { self.broadcast_event(tx, TransactionEvent::Queued); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 0d6b0983fa09..23590d7416d9 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -415,14 +415,17 @@ where match tx { AddedTransaction::Pending(tx) => { - let AddedPendingTransaction { transaction, promoted, discarded, .. } = tx; + let AddedPendingTransaction { transaction, promoted, discarded, replaced } = tx; - listener.pending(transaction.hash(), None); + listener.pending(transaction.hash(), replaced.as_ref().map(|tx| tx.hash())); promoted.iter().for_each(|tx| listener.pending(tx, None)); discarded.iter().for_each(|tx| listener.discarded(tx)); } - AddedTransaction::Parked { transaction, .. } => { + AddedTransaction::Parked { transaction, replaced, .. } => { listener.queued(transaction.hash()); + if let Some(replaced) = replaced { + listener.replaced(replaced.hash(), transaction.hash()); + } } } } @@ -532,6 +535,8 @@ impl fmt::Debug for PoolInner { /// Inserted transaction. transaction: Arc>, + /// Replaced transaction. + replaced: Option>>, /// transactions promoted to the ready queue promoted: Vec, /// transaction that failed and became discarded @@ -548,6 +553,8 @@ pub enum AddedTransaction { Parked { /// Inserted transaction. transaction: Arc>, + /// Replaced transaction. + replaced: Option>>, /// The subpool it was moved to. subpool: SubPool, }, @@ -577,7 +584,7 @@ impl AddedTransaction { AddedTransaction::Pending(tx) => { NewTransactionEvent { subpool: SubPool::Pending, transaction: tx.transaction } } - AddedTransaction::Parked { transaction, subpool } => { + AddedTransaction::Parked { transaction, subpool, .. } => { NewTransactionEvent { transaction, subpool } } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 4368a240f26c..71b5590a688c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -314,20 +314,22 @@ impl TxPool { match self.all_transactions.insert_tx(tx, on_chain_balance, on_chain_nonce) { Ok(InsertOk { transaction, move_to, replaced_tx, updates, .. }) => { - self.add_new_transaction(transaction.clone(), replaced_tx, move_to); + self.add_new_transaction(transaction.clone(), replaced_tx.clone(), move_to); // Update inserted transactions metric self.metrics.inserted_transactions.increment(1); let UpdateOutcome { promoted, discarded } = self.process_updates(updates); // This transaction was moved to the pending pool. + let replaced = replaced_tx.map(|(tx, _)| tx); let res = if move_to.is_pending() { AddedTransaction::Pending(AddedPendingTransaction { transaction, promoted, discarded, + replaced, }) } else { - AddedTransaction::Parked { transaction, subpool: move_to } + AddedTransaction::Parked { transaction, subpool: move_to, replaced } }; Ok(res) From b8a547670485f3571f49a348f221688f84495e91 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 6 Jul 2023 21:11:16 +0100 Subject: [PATCH 095/722] chore(prometheus): add `localhost:9001` scrape target (#3606) --- book/installation/docker.md | 4 ++-- etc/prometheus/prometheus.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/book/installation/docker.md b/book/installation/docker.md index 6a101141b09e..95ddb86b450e 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -63,11 +63,11 @@ To run Reth with Docker, run: docker run \ -v rethdata:/root/.local/share/reth/db \ -d \ - -p 9000:9000 \ + -p 9001:9001 \ --name reth \ reth:local \ node \ - --metrics 0.0.0.0:9000 + --metrics 0.0.0.0:9001 ``` The above command will create a container named `reth` and a named volume called `rethdata` for data persistence. diff --git a/etc/prometheus/prometheus.yml b/etc/prometheus/prometheus.yml index e2a690b71d38..483dcaa050e5 100644 --- a/etc/prometheus/prometheus.yml +++ b/etc/prometheus/prometheus.yml @@ -3,7 +3,7 @@ scrape_configs: metrics_path: "/" scrape_interval: 5s static_configs: - - targets: ['reth:9001'] + - targets: ['reth:9001', 'localhost:9001'] - job_name: ethereum-metrics-exporter metrics_path: "/metrics" scrape_interval: 5s From db77c279d03fd48235b44b4d400b6ec4c33b2640 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 7 Jul 2023 19:50:00 +1000 Subject: [PATCH 096/722] add build profile to version info (#3652) --- Cargo.lock | 7 +++++++ bin/reth/Cargo.toml | 1 + bin/reth/src/version.rs | 13 +++++++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50a480581a06..33cc8b156e4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1036,6 +1036,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const-str" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6" + [[package]] name = "convert_case" version = "0.4.0" @@ -4957,6 +4963,7 @@ dependencies = [ "clap 4.1.8", "comfy-table", "confy", + "const-str", "crossterm", "dirs-next", "eyre", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 7009807f75b0..1a2fc9591254 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -85,6 +85,7 @@ hex = "0.4" thiserror = { workspace = true } pretty_assertions = "1.3.0" humantime = "2.1.0" +const-str = "0.5.6" [features] jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl"] diff --git a/bin/reth/src/version.rs b/bin/reth/src/version.rs index a016d209c97f..783397f2cdb7 100644 --- a/bin/reth/src/version.rs +++ b/bin/reth/src/version.rs @@ -28,7 +28,7 @@ pub(crate) const SHORT_VERSION: &str = /// Build Timestamp: 2023-05-19T01:47:19.815651705Z /// Build Features: jemalloc /// ``` -pub(crate) const LONG_VERSION: &str = concat!( +pub(crate) const LONG_VERSION: &str = const_str::concat!( "Version: ", env!("CARGO_PKG_VERSION"), "\n", @@ -39,7 +39,10 @@ pub(crate) const LONG_VERSION: &str = concat!( env!("VERGEN_BUILD_TIMESTAMP"), "\n", "Build Features: ", - env!("VERGEN_CARGO_FEATURES") + env!("VERGEN_CARGO_FEATURES"), + "\n", + "Build Profile: ", + build_profile_name() ); /// The version information for reth formatted for P2P (devp2p). @@ -76,6 +79,12 @@ pub fn default_extradata() -> String { format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS) } +const fn build_profile_name() -> &'static str { + // Nice hack from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + let out_dir_path = const_str::split!(env!("OUT_DIR"), std::path::MAIN_SEPARATOR_STR); + out_dir_path[out_dir_path.len() - 4] +} + #[cfg(test)] mod tests { use super::*; From 5cc55dbc7be06fc81d48d7b690ecd5672b149d9c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Jul 2023 11:55:38 +0200 Subject: [PATCH 097/722] chore: add missing helper functions and docs (#3646) --- crates/transaction-pool/src/pool/state.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index 87be686ccda8..95fa94b24466 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -48,24 +48,43 @@ impl TxState { } } -/// Identifier for the used Sub-pool +/// Identifier for the transaction Sub-pool #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] #[repr(u8)] pub enum SubPool { + /// The queued sub-pool contains transactions that are not ready to be included in the next + /// block because they have missing or queued ancestors. Queued = 0, + /// The pending sub-pool contains transactions that are ready to be included in the next block. Pending, + /// The base-fee sub-pool contains transactions that are not ready to be included in the next + /// block because they don't meet the base fee requirement. BaseFee, } -// === impl PoolDestination === +// === impl SubPool === impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. + #[inline] pub fn is_pending(&self) -> bool { matches!(self, SubPool::Pending) } + /// Whether this transaction is in the queued pool. + #[inline] + pub fn is_queued(&self) -> bool { + matches!(self, SubPool::Queued) + } + + /// Whether this transaction is in the base fee pool. + #[inline] + pub fn is_base_fee(&self) -> bool { + matches!(self, SubPool::BaseFee) + } + /// Returns whether this is a promotion depending on the current sub-pool location. + #[inline] pub fn is_promoted(&self, other: SubPool) -> bool { self > &other } From 0d76dd762ae980ce235529d737524043b86c4b01 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 7 Jul 2023 14:02:20 +0300 Subject: [PATCH 098/722] chore(txpool): enriched pool transaction event (#3653) --- crates/transaction-pool/src/lib.rs | 4 +- crates/transaction-pool/src/noop.rs | 2 +- crates/transaction-pool/src/pool/events.rs | 60 ++++++---- crates/transaction-pool/src/pool/listener.rs | 110 ++++++++++++------- crates/transaction-pool/src/pool/mod.rs | 12 +- crates/transaction-pool/src/traits.rs | 2 +- 6 files changed, 120 insertions(+), 70 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 46a744cfb8f3..8689dcc377f3 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,7 @@ pub use crate::{ }, error::PoolResult, ordering::{GasCostOrdering, TransactionOrdering}, - pool::{AllTransactionsEvents, PoolTransactionEvent, TransactionEvent, TransactionEvents}, + pool::{AllTransactionsEvents, FullTransactionEvent, TransactionEvent, TransactionEvents}, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, NewTransactionEvent, PoolSize, PoolTransaction, PooledTransaction, PropagateKind, @@ -354,7 +354,7 @@ where self.pool.add_transaction_event_listener(tx_hash) } - fn all_transactions_event_listener(&self) -> AllTransactionsEvents { + fn all_transactions_event_listener(&self) -> AllTransactionsEvents { self.pool.add_all_transactions_event_listener() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 50be4a2bcf6d..2fec5391c1eb 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -73,7 +73,7 @@ impl TransactionPool for NoopTransactionPool { None } - fn all_transactions_event_listener(&self) -> AllTransactionsEvents { + fn all_transactions_event_listener(&self) -> AllTransactionsEvents { AllTransactionsEvents { events: mpsc::channel(1).1 } } diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 62da13392e84..03856aea27eb 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -1,33 +1,49 @@ -use crate::traits::PropagateKind; +use crate::{traits::PropagateKind, PoolTransaction, ValidPoolTransaction}; use reth_primitives::{TxHash, H256}; use std::sync::Arc; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// Wrapper around a transaction hash and the event that happened to it. +/// An event that happened to a transaction and contains its full body where possible. #[derive(Debug)] -pub struct PoolTransactionEvent(TxHash, TransactionEvent); - -impl PoolTransactionEvent { - /// Create a new transaction event. - pub fn new(hash: TxHash, event: TransactionEvent) -> Self { - Self(hash, event) - } - - /// The hash of the transaction this event is about. - pub fn hash(&self) -> TxHash { - self.0 - } - - /// The event that happened to the transaction. - pub fn event(&self) -> &TransactionEvent { - &self.1 - } +pub enum FullTransactionEvent { + /// Transaction has been added to the pending pool. + Pending(TxHash), + /// Transaction has been added to the queued pool. + Queued(TxHash), + /// Transaction has been included in the block belonging to this hash. + Mined { + /// The hash of the mined transaction. + tx_hash: TxHash, + /// The hash of the mined block that contains the transaction. + block_hash: H256, + }, + /// Transaction has been replaced by the transaction belonging to the hash. + /// + /// E.g. same (sender + nonce) pair + Replaced { + /// The transaction that was replaced. + transaction: Arc>, + /// The transaction that replaced the event subject. + replaced_by: TxHash, + }, + /// Transaction was dropped due to configured limits. + Discarded(TxHash), + /// Transaction became invalid indefinitely. + Invalid(TxHash), + /// Transaction was propagated to peers. + Propagated(Arc>), +} - /// Split the event into its components. - pub fn split(self) -> (TxHash, TransactionEvent) { - (self.0, self.1) +impl Clone for FullTransactionEvent { + fn clone(&self) -> Self { + match self { + Self::Replaced { transaction, replaced_by } => { + Self::Replaced { transaction: Arc::clone(transaction), replaced_by: *replaced_by } + } + other => other.clone(), + } } } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 994c1fac5c8f..c64fa71f5edd 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,8 +1,9 @@ //! Listeners for the transaction-pool use crate::{ - pool::events::{PoolTransactionEvent, TransactionEvent}, + pool::events::{FullTransactionEvent, TransactionEvent}, traits::PropagateKind, + PoolTransaction, ValidPoolTransaction, }; use futures_util::Stream; use reth_primitives::{TxHash, H256}; @@ -24,7 +25,7 @@ const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; #[must_use = "streams do nothing unless polled"] pub struct TransactionEvents { hash: TxHash, - events: UnboundedReceiver, + events: UnboundedReceiver, } impl TransactionEvents { @@ -35,7 +36,7 @@ impl TransactionEvents { } impl Stream for TransactionEvents { - type Item = PoolTransactionEvent; + type Item = TransactionEvent; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -45,15 +46,15 @@ impl Stream for TransactionEvents { } } -/// A Stream that receives [PoolTransactionEvent] for _all_ transaction. +/// A Stream that receives [FullTransactionEvent] for _all_ transaction. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] -pub struct AllTransactionsEvents { - pub(crate) events: Receiver, +pub struct AllTransactionsEvents { + pub(crate) events: Receiver>, } -impl Stream for AllTransactionsEvents { - type Item = PoolTransactionEvent; +impl Stream for AllTransactionsEvents { + type Item = FullTransactionEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().events.poll_recv(cx) @@ -64,20 +65,34 @@ impl Stream for AllTransactionsEvents { /// /// This is essentially a multi-producer, multi-consumer channel where each event is broadcast to /// all active receivers. -#[derive(Default, Debug)] -pub(crate) struct PoolEventBroadcast { +#[derive(Debug)] +pub(crate) struct PoolEventBroadcast { /// All listeners for all transaction events. - all_events_broadcaster: AllPoolEventsBroadcaster, + all_events_broadcaster: AllPoolEventsBroadcaster, /// All listeners for events for a certain transaction hash. broadcasters_by_hash: HashMap, } -impl PoolEventBroadcast { +impl Default for PoolEventBroadcast { + fn default() -> Self { + Self { + all_events_broadcaster: AllPoolEventsBroadcaster::default(), + broadcasters_by_hash: HashMap::default(), + } + } +} + +impl PoolEventBroadcast { /// Calls the broadcast callback with the `PoolEventBroadcaster` that belongs to the hash. - fn broadcast_event(&mut self, hash: &TxHash, event: TransactionEvent) { + fn broadcast_event( + &mut self, + hash: &TxHash, + event: TransactionEvent, + pool_event: FullTransactionEvent, + ) { // Broadcast to all listeners for the transaction hash. if let Entry::Occupied(mut sink) = self.broadcasters_by_hash.entry(*hash) { - sink.get_mut().broadcast(*hash, event.clone()); + sink.get_mut().broadcast(event.clone()); if sink.get().is_empty() || event.is_final() { sink.remove(); @@ -85,7 +100,7 @@ impl PoolEventBroadcast { } // Broadcast to all listeners for all transactions. - self.all_events_broadcaster.broadcast(*hash, event); + self.all_events_broadcaster.broadcast(pool_event); } /// Create a new subscription for the given transaction hash. @@ -104,65 +119,83 @@ impl PoolEventBroadcast { } /// Create a new subscription for all transactions. - pub(crate) fn subscribe_all(&mut self) -> AllTransactionsEvents { + pub(crate) fn subscribe_all(&mut self) -> AllTransactionsEvents { let (tx, rx) = tokio::sync::mpsc::channel(TX_POOL_EVENT_CHANNEL_SIZE); self.all_events_broadcaster.senders.push(tx); AllTransactionsEvents { events: rx } } /// Notify listeners about a transaction that was added to the pending queue. - pub(crate) fn pending(&mut self, tx: &TxHash, replaced: Option<&TxHash>) { - self.broadcast_event(tx, TransactionEvent::Pending); + pub(crate) fn pending(&mut self, tx: &TxHash, replaced: Option>>) { + self.broadcast_event(tx, TransactionEvent::Pending, FullTransactionEvent::Pending(*tx)); if let Some(replaced) = replaced { // notify listeners that this transaction was replaced - self.replaced(replaced, tx); + self.replaced(replaced, *tx); } } /// Notify listeners about a transaction that was replaced. - pub(crate) fn replaced(&mut self, tx: &TxHash, replaced_by: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Replaced(*replaced_by)); + pub(crate) fn replaced(&mut self, tx: Arc>, replaced_by: TxHash) { + let transaction = Arc::clone(&tx); + self.broadcast_event( + tx.hash(), + TransactionEvent::Replaced(replaced_by), + FullTransactionEvent::Replaced { transaction, replaced_by }, + ); } /// Notify listeners about a transaction that was added to the queued pool. pub(crate) fn queued(&mut self, tx: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Queued); + self.broadcast_event(tx, TransactionEvent::Queued, FullTransactionEvent::Queued(*tx)); } /// Notify listeners about a transaction that was propagated. pub(crate) fn propagated(&mut self, tx: &TxHash, peers: Vec) { - self.broadcast_event(tx, TransactionEvent::Propagated(Arc::new(peers))); + let peers = Arc::new(peers); + self.broadcast_event( + tx, + TransactionEvent::Propagated(Arc::clone(&peers)), + FullTransactionEvent::Propagated(peers), + ); } /// Notify listeners about a transaction that was discarded. pub(crate) fn discarded(&mut self, tx: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Discarded); + self.broadcast_event(tx, TransactionEvent::Discarded, FullTransactionEvent::Discarded(*tx)); } /// Notify listeners that the transaction was mined pub(crate) fn mined(&mut self, tx: &TxHash, block_hash: H256) { - self.broadcast_event(tx, TransactionEvent::Mined(block_hash)); + self.broadcast_event( + tx, + TransactionEvent::Mined(block_hash), + FullTransactionEvent::Mined { tx_hash: *tx, block_hash }, + ); } } /// All Sender half(s) of the event channels for all transactions. /// /// This mimics [tokio::sync::broadcast] but uses separate channels. -#[derive(Default, Debug)] -struct AllPoolEventsBroadcaster { +#[derive(Debug)] +struct AllPoolEventsBroadcaster { /// Corresponding sender half(s) for event listener channel - senders: Vec>, + senders: Vec>>, +} + +impl Default for AllPoolEventsBroadcaster { + fn default() -> Self { + Self { senders: Vec::new() } + } } -impl AllPoolEventsBroadcaster { +impl AllPoolEventsBroadcaster { // Broadcast an event to all listeners. Dropped listeners are silently evicted. - fn broadcast(&mut self, tx_hash: TxHash, event: TransactionEvent) { - self.senders.retain(|sender| { - match sender.try_send(PoolTransactionEvent::new(tx_hash, event.clone())) { - Ok(_) | Err(TrySendError::Full(_)) => true, - Err(TrySendError::Closed(_)) => false, - } + fn broadcast(&mut self, event: FullTransactionEvent) { + self.senders.retain(|sender| match sender.try_send(event.clone()) { + Ok(_) | Err(TrySendError::Full(_)) => true, + Err(TrySendError::Closed(_)) => false, }) } } @@ -173,7 +206,7 @@ impl AllPoolEventsBroadcaster { #[derive(Default, Debug)] struct PoolEventBroadcaster { /// Corresponding sender half(s) for event listener channel - senders: Vec>, + senders: Vec>, } impl PoolEventBroadcaster { @@ -183,8 +216,7 @@ impl PoolEventBroadcaster { } // Broadcast an event to all listeners. Dropped listeners are silently evicted. - fn broadcast(&mut self, tx_hash: TxHash, event: TransactionEvent) { - self.senders - .retain(|sender| sender.send(PoolTransactionEvent::new(tx_hash, event.clone())).is_ok()) + fn broadcast(&mut self, event: TransactionEvent) { + self.senders.retain(|sender| sender.send(event.clone()).is_ok()) } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 23590d7416d9..6b16c26470ea 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -93,7 +93,7 @@ use tokio::sync::mpsc; use tracing::debug; mod events; -pub use events::{PoolTransactionEvent, TransactionEvent}; +pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -117,7 +117,7 @@ pub struct PoolInner { /// Pool settings. config: PoolConfig, /// Manages listeners for transaction state change events. - event_listener: RwLock, + event_listener: RwLock>, /// Listeners for new ready transactions. pending_transaction_listener: Mutex>>, /// Listeners for new transactions added to the pool. @@ -223,7 +223,9 @@ where } /// Adds a listener for all transaction events. - pub(crate) fn add_all_transactions_event_listener(&self) -> AllTransactionsEvents { + pub(crate) fn add_all_transactions_event_listener( + &self, + ) -> AllTransactionsEvents { self.event_listener.write().subscribe_all() } @@ -417,14 +419,14 @@ where AddedTransaction::Pending(tx) => { let AddedPendingTransaction { transaction, promoted, discarded, replaced } = tx; - listener.pending(transaction.hash(), replaced.as_ref().map(|tx| tx.hash())); + listener.pending(transaction.hash(), replaced.clone()); promoted.iter().for_each(|tx| listener.pending(tx, None)); discarded.iter().for_each(|tx| listener.discarded(tx)); } AddedTransaction::Parked { transaction, replaced, .. } => { listener.queued(transaction.hash()); if let Some(replaced) = replaced { - listener.replaced(replaced.hash(), transaction.hash()); + listener.replaced(replaced.clone(), *transaction.hash()); } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6e5ebe197d80..afbfb47a3496 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -96,7 +96,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn transaction_event_listener(&self, tx_hash: TxHash) -> Option; /// Returns a new transaction change event stream for _all_ transactions in the pool. - fn all_transactions_event_listener(&self) -> AllTransactionsEvents; + fn all_transactions_event_listener(&self) -> AllTransactionsEvents; /// Returns a new Stream that yields transactions hashes for new ready transactions. /// From 15bd88c30b7ac8d006e7f0498614e163c138c478 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Jul 2023 13:49:04 +0200 Subject: [PATCH 099/722] fix: subpool variant order (#3656) --- crates/transaction-pool/src/pool/state.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index 95fa94b24466..581611df99d3 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -55,11 +55,11 @@ pub enum SubPool { /// The queued sub-pool contains transactions that are not ready to be included in the next /// block because they have missing or queued ancestors. Queued = 0, - /// The pending sub-pool contains transactions that are ready to be included in the next block. - Pending, /// The base-fee sub-pool contains transactions that are not ready to be included in the next /// block because they don't meet the base fee requirement. BaseFee, + /// The pending sub-pool contains transactions that are ready to be included in the next block. + Pending, } // === impl SubPool === @@ -106,6 +106,15 @@ impl From for SubPool { mod tests { use super::*; + #[test] + fn test_promoted() { + assert!(SubPool::BaseFee.is_promoted(SubPool::Queued)); + assert!(SubPool::Pending.is_promoted(SubPool::BaseFee)); + assert!(SubPool::Pending.is_promoted(SubPool::Queued)); + assert!(!SubPool::BaseFee.is_promoted(SubPool::Pending)); + assert!(!SubPool::Queued.is_promoted(SubPool::BaseFee)); + } + #[test] fn test_tx_state() { let mut state = TxState::default(); From 9e9e2b22ebbe0c1a7a0466e6a8b4e5a0f85c3705 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 7 Jul 2023 15:10:31 +0300 Subject: [PATCH 100/722] Revert "add build profile to version info (#3652)" ref: https://github.com/paradigmxyz/reth/actions/runs/5486088295/jobs/9995719536 This reverts commit db77c279d03fd48235b44b4d400b6ec4c33b2640. --- Cargo.lock | 7 ------- bin/reth/Cargo.toml | 1 - bin/reth/src/version.rs | 13 ++----------- 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33cc8b156e4e..50a480581a06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1036,12 +1036,6 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" -[[package]] -name = "const-str" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6" - [[package]] name = "convert_case" version = "0.4.0" @@ -4963,7 +4957,6 @@ dependencies = [ "clap 4.1.8", "comfy-table", "confy", - "const-str", "crossterm", "dirs-next", "eyre", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 1a2fc9591254..7009807f75b0 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -85,7 +85,6 @@ hex = "0.4" thiserror = { workspace = true } pretty_assertions = "1.3.0" humantime = "2.1.0" -const-str = "0.5.6" [features] jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl"] diff --git a/bin/reth/src/version.rs b/bin/reth/src/version.rs index 783397f2cdb7..a016d209c97f 100644 --- a/bin/reth/src/version.rs +++ b/bin/reth/src/version.rs @@ -28,7 +28,7 @@ pub(crate) const SHORT_VERSION: &str = /// Build Timestamp: 2023-05-19T01:47:19.815651705Z /// Build Features: jemalloc /// ``` -pub(crate) const LONG_VERSION: &str = const_str::concat!( +pub(crate) const LONG_VERSION: &str = concat!( "Version: ", env!("CARGO_PKG_VERSION"), "\n", @@ -39,10 +39,7 @@ pub(crate) const LONG_VERSION: &str = const_str::concat!( env!("VERGEN_BUILD_TIMESTAMP"), "\n", "Build Features: ", - env!("VERGEN_CARGO_FEATURES"), - "\n", - "Build Profile: ", - build_profile_name() + env!("VERGEN_CARGO_FEATURES") ); /// The version information for reth formatted for P2P (devp2p). @@ -79,12 +76,6 @@ pub fn default_extradata() -> String { format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS) } -const fn build_profile_name() -> &'static str { - // Nice hack from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime - let out_dir_path = const_str::split!(env!("OUT_DIR"), std::path::MAIN_SEPARATOR_STR); - out_dir_path[out_dir_path.len() - 4] -} - #[cfg(test)] mod tests { use super::*; From 21db2936e617d1018c0bfa5690d8f035dee2843f Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 7 Jul 2023 15:26:37 +0300 Subject: [PATCH 101/722] ci: pin ubuntu to 20.04 to avoid glibc errors ref: https://github.com/paradigmxyz/reth/actions/runs/5486255403/jobs/9996118421 ref: https://github.com/foundry-rs/foundry/issues/3827 Compiling serde v1.0.164 error: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.25' not found (required by /target/maxperf/deps/libserde_derive-6e9709c79bb5c7fa.so) --> /cargo/registry/src/index.crates.io-6f17d22bba15001f/serde-1.0.164/src/lib.rs:340:1 | 340 | extern crate serde_derive; | ^^^^^^^^^^^^^^^^^^^^^^^^^^ --- .github/workflows/bench.yml | 6 +++--- .github/workflows/book.yml | 8 ++++---- .github/workflows/ci.yml | 8 ++++---- .github/workflows/deny.yml | 2 +- .github/workflows/docker.yml | 2 +- .github/workflows/fuzz.yml | 4 ++-- .github/workflows/hive.yml | 4 ++-- .github/workflows/integration.yml | 6 +++--- .github/workflows/release.yml | 10 +++++----- .github/workflows/sanity.yml | 4 ++-- .github/workflows/stale.yml | 2 +- .github/workflows/unit.yml | 8 ++++---- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index c7b35a38e87d..5fec9c0790da 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -13,7 +13,7 @@ concurrency: name: bench jobs: iai: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 # Only run benchmarks in merge groups if: github.event_name != 'pull_request' steps: @@ -60,7 +60,7 @@ jobs: # Checks that benchmarks not run in CI compile bench-check: name: check - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - name: Install toolchain @@ -74,7 +74,7 @@ jobs: if: always() name: bench success needs: bench-check - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 684acb8e966c..d099b25491d0 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -8,7 +8,7 @@ on: jobs: test: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: test steps: @@ -30,7 +30,7 @@ jobs: run: mdbook test lint: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: lint steps: @@ -48,7 +48,7 @@ jobs: run: mdbook-linkcheck --standalone build: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 with: @@ -108,7 +108,7 @@ jobs: deploy: # Only deploy if a push to main if: github.ref_name == 'main' && github.event_name == 'push' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test, lint, build] # Grant GITHUB_TOKEN the permissions required to make a Pages deployment diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9707365066eb..0164ead1a15f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ name: ci jobs: lint: name: code lint - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout sources uses: actions/checkout@v3 @@ -40,7 +40,7 @@ jobs: doc-lint: name: doc lint - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - name: Install toolchain @@ -51,7 +51,7 @@ jobs: grafana-lint: name: grafana lint - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - name: Check dashboard JSON with jq @@ -62,7 +62,7 @@ jobs: lint-success: if: always() name: lint success - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [lint, doc-lint, grafana-lint] steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index f4ee00981068..8332723b8129 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -18,7 +18,7 @@ concurrency: deny-${{ github.head_ref || github.run_id }} jobs: deny: name: deny - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bb3809dc19e1..80f02f0cf8c9 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -16,7 +16,7 @@ env: jobs: build: name: build and push - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 permissions: packages: write contents: read diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index e2005ee504cf..61459252cbd9 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -16,7 +16,7 @@ jobs: # Skip the Fuzzing Jobs until we make them run fast and reliably. Currently they will # always recompile the codebase for each test and that takes way too long. if: false - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: target: @@ -62,7 +62,7 @@ jobs: fuzz-success: if: always() name: fuzz success - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: all steps: # Note: This check is a dummy because we currently have fuzz tests disabled. diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 694e065451c0..20def9eed9c7 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -14,7 +14,7 @@ concurrency: name: hive jobs: prepare: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout sources uses: actions/checkout@v3 @@ -104,7 +104,7 @@ jobs: fail-fast: false needs: prepare name: run - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Download artifacts diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index d4375665de0b..07c58b8675df 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -16,7 +16,7 @@ name: integration jobs: test: name: test (partition ${{ matrix.partition }}/${{ strategy.job-total }}) - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: partition: [1, 2] @@ -65,7 +65,7 @@ jobs: name: sync / 100k blocks # Only run sync tests in merge groups if: github.event_name == 'merge_group' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: RUST_LOG: info,sync=error steps: @@ -93,7 +93,7 @@ jobs: integration-success: if: always() name: integration success - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test] steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8098e4063aa8..5a5bedaaf7c7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ env: jobs: extract-version: name: extract version - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Extract version run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT @@ -34,10 +34,10 @@ jobs: x86_64-pc-windows-gnu] include: - arch: aarch64-unknown-linux-gnu - platform: ubuntu-latest + platform: ubuntu-20.04 profile: maxperf - arch: x86_64-unknown-linux-gnu - platform: ubuntu-latest + platform: ubuntu-20.04 profile: maxperf - arch: x86_64-apple-darwin platform: macos-latest @@ -46,7 +46,7 @@ jobs: platform: macos-latest profile: maxperf - arch: x86_64-pc-windows-gnu - platform: ubuntu-latest + platform: ubuntu-20.04 profile: maxperf runs-on: ${{ matrix.platform }} @@ -129,7 +129,7 @@ jobs: draft-release: name: draft release needs: [build, extract-version] - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: VERSION: ${{ needs.extract-version.outputs.VERSION }} permissions: diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml index 8b1e6f67161d..71ec1b495c22 100644 --- a/.github/workflows/sanity.yml +++ b/.github/workflows/sanity.yml @@ -15,7 +15,7 @@ env: name: sanity jobs: dep-version-constraints: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: dep version constraints test (partition ${{ matrix.partition }}/${{ strategy.job-total }}) strategy: matrix: @@ -64,7 +64,7 @@ jobs: filename: .github/SANITY_DEPS_ISSUE_TEMPLATE.md unused-deps: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: unused dependencies steps: - name: Checkout sources diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 4a73a8a1bfc7..9caae6965958 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -6,7 +6,7 @@ on: jobs: close-issues: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 permissions: issues: write pull-requests: write diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 7873ee19707f..041eadbe800a 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -15,7 +15,7 @@ name: unit jobs: test: name: test (partition ${{ matrix.partition }}/${{ strategy.job-total }}) - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: partition: [1, 2, 3, 4] @@ -51,7 +51,7 @@ jobs: eth-blockchain: name: ethereum / state tests (stable) - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: RUST_LOG: info,sync=error steps: @@ -83,7 +83,7 @@ jobs: doc-test: name: rustdoc - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - name: Install toolchain @@ -95,7 +95,7 @@ jobs: unit-success: if: always() name: unit success - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test, eth-blockchain, doc-test] steps: - name: Decide whether the needed jobs succeeded or failed From f3d7988f684d087a895d288a0041e95a6596b8a6 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 7 Jul 2023 14:52:02 +0300 Subject: [PATCH 102/722] release: 0.1.0-alpha.2 (#3657) --- Cargo.lock | 90 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50a480581a06..354bd6b818cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1742,7 +1742,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "reth-db", "reth-interfaces", @@ -4951,7 +4951,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "backon", "clap 4.1.8", @@ -5015,7 +5015,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5032,7 +5032,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "futures-core", "futures-util", @@ -5051,7 +5051,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "futures", @@ -5076,7 +5076,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "aquamarine", "assert_matches", @@ -5095,7 +5095,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arbitrary", "bytes", @@ -5110,7 +5110,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "confy", "reth-discv4", @@ -5127,7 +5127,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "mockall", @@ -5138,7 +5138,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arbitrary", "assert_matches", @@ -5179,7 +5179,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "discv5", "enr", @@ -5202,7 +5202,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "async-trait", "data-encoding", @@ -5226,7 +5226,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "futures", @@ -5251,7 +5251,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "aes 0.8.2", "block-padding", @@ -5282,7 +5282,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arbitrary", "async-trait", @@ -5315,7 +5315,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arbitrary", "async-trait", @@ -5343,7 +5343,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "async-trait", "bytes", @@ -5363,7 +5363,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "bitflags 2.3.2", "byteorder", @@ -5383,7 +5383,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "bindgen 0.65.1", "cc", @@ -5392,7 +5392,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "futures", "metrics", @@ -5402,7 +5402,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "metrics", "once_cell", @@ -5416,7 +5416,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "pin-project", "reth-primitives", @@ -5425,7 +5425,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "igd", "pin-project-lite", @@ -5439,7 +5439,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "aquamarine", "async-trait", @@ -5489,7 +5489,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "async-trait", "reth-eth-wire", @@ -5502,7 +5502,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "futures-util", "reth-interfaces", @@ -5521,7 +5521,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arbitrary", "assert_matches", @@ -5568,7 +5568,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "auto_impl", "derive_more", @@ -5589,7 +5589,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "once_cell", "reth-consensus-common", @@ -5605,7 +5605,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "boa_engine", "boa_gc", @@ -5621,7 +5621,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "reth-primitives", "revm", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "arrayvec", "auto_impl", @@ -5647,7 +5647,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.28", @@ -5656,7 +5656,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "async-trait", @@ -5702,7 +5702,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "jsonrpsee", "reth-primitives", @@ -5712,7 +5712,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "async-trait", "futures", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "hyper", "jsonrpsee", @@ -5756,7 +5756,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "async-trait", @@ -5776,7 +5776,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "assert_matches", "jsonrpsee-types", @@ -5792,7 +5792,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "aquamarine", "assert_matches", @@ -5828,7 +5828,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "dyn-clone", "futures-util", @@ -5841,7 +5841,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "tracing", "tracing-appender", @@ -5851,7 +5851,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "aquamarine", "async-trait", @@ -5877,7 +5877,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 11ba1a26fbb1..560282518493 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,7 +54,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From 526f624e1cbfd659184873bffe12ec602678b57f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Jul 2023 19:50:15 +0200 Subject: [PATCH 103/722] test: tmp ignore another flaky geth test (#3663) --- crates/net/network/tests/it/connect.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index f8ffd584db84..5669a5acdc90 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -310,7 +310,7 @@ async fn test_connect_to_trusted_peer() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] -#[cfg_attr(not(feature = "geth-tests"), ignore)] +#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_incoming_node_id_blacklist() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { From 93865ef916c1337ad2b0a0263f27ea91a46d6f98 Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Fri, 7 Jul 2023 14:51:34 -0400 Subject: [PATCH 104/722] pop duplicates entries when returning downloaded blocks in engine (#3644) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/sync.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 73bb85dbb93d..b422583f2dbb 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -13,7 +13,7 @@ use reth_stages::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use std::{ cmp::{Ordering, Reverse}, - collections::BinaryHeap, + collections::{binary_heap::PeekMut, BinaryHeap}, task::{ready, Context, Poll}, }; use tokio::sync::oneshot; @@ -304,6 +304,14 @@ where // drain an element of the block buffer if there are any if let Some(block) = self.range_buffered_blocks.pop() { + // peek ahead and pop duplicates + while let Some(peek) = self.range_buffered_blocks.peek_mut() { + if peek.0 .0.hash() == block.0 .0.hash() { + PeekMut::pop(peek); + } else { + break + } + } return Poll::Ready(EngineSyncEvent::FetchedFullBlock(block.0 .0)) } From 74b21c108c5232e48d2bd6d75c4d045b51aace91 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Jul 2023 21:04:19 +0200 Subject: [PATCH 105/722] feat: support all alias (#3660) --- crates/rpc/rpc-builder/src/lib.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 9a15440239b9..bcd41aa7608e 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -563,9 +563,12 @@ impl FromStr for RpcModuleSelection { type Err = ParseError; fn from_str(s: &str) -> Result { - let modules = s.split(','); - - RpcModuleSelection::try_from_selection(modules) + let mut modules = s.split(',').peekable(); + let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; + match first { + "all" | "All" => Ok(RpcModuleSelection::All), + _ => RpcModuleSelection::try_from_selection(modules), + } } } @@ -1667,6 +1670,12 @@ impl fmt::Debug for RpcServerHandle { mod tests { use super::*; + #[test] + fn parse_rpc_module_selection() { + let selection = "all".parse::().unwrap(); + assert_eq!(selection, RpcModuleSelection::All); + } + #[test] fn identical_selection() { assert!(RpcModuleSelection::are_identical( From a7f32dbb0b9a297f5df335418e6b8f588426bd7e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Jul 2023 21:08:47 +0200 Subject: [PATCH 106/722] chore: add a few noop functions to builder (#3659) --- crates/rpc/rpc-builder/src/lib.rs | 45 +++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index bcd41aa7608e..eb9f0d1fd390 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -125,7 +125,7 @@ use reth_rpc::{ NetApi, RPCApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, }; use reth_rpc_api::{servers::*, EngineApiServer}; -use reth_tasks::TaskSpawner; +use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::TransactionPool; use serde::{Deserialize, Serialize, Serializer}; use std::{ @@ -158,6 +158,8 @@ pub mod constants; pub use crate::eth::{EthConfig, EthHandlers}; pub use jsonrpsee::server::ServerBuilder; pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +use reth_network_api::noop::NoopNetwork; +use reth_transaction_pool::noop::NoopTransactionPool; /// Convenience function for starting a server in one step. pub async fn launch( @@ -192,7 +194,7 @@ where /// A builder type to configure the RPC module: See [RpcModule] /// -/// This is the main entrypoint for up RPC servers. +/// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] pub struct RpcModuleBuilder { /// The Provider type to when creating all rpc handlers @@ -241,6 +243,24 @@ impl RpcModuleBuilder { provider, network, pool, executor, events } } + /// Configure a [NoopTransactionPool] instance. + /// + /// Caution: This will configure a pool API that does abosultely nothing. + /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which + /// requires a [TransactionPool] implementation. + pub fn with_noop_pool( + self, + ) -> RpcModuleBuilder { + let Self { provider, executor, events, network, .. } = self; + RpcModuleBuilder { + provider, + executor, + events, + network, + pool: NoopTransactionPool::default(), + } + } + /// Configure the network instance. pub fn with_network(self, network: N) -> RpcModuleBuilder where @@ -250,6 +270,16 @@ impl RpcModuleBuilder { provider, network, pool, executor, events } } + /// Configure a [NoopNetwork] instance. + /// + /// Caution: This will configure a network API that does abosultely nothing. + /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which + /// requires a [NetworkInfo] implementation. + pub fn with_noop_network(self) -> RpcModuleBuilder { + let Self { provider, pool, executor, events, .. } = self; + RpcModuleBuilder { provider, pool, executor, events, network: NoopNetwork::default() } + } + /// Configure the task executor to use for additional tasks. pub fn with_executor( self, @@ -262,6 +292,17 @@ impl RpcModuleBuilder { provider, network, pool, executor, events } } + /// Configure [TokioTaskExecutor] as the task executor to use for additional tasks. + /// + /// This will spawn additional tasks directly via `tokio::task::spawn`, See + /// [TokioTaskExecutor]. + pub fn with_tokio_executor( + self, + ) -> RpcModuleBuilder { + let Self { pool, network, provider, events, .. } = self; + RpcModuleBuilder { provider, network, pool, events, executor: TokioTaskExecutor::default() } + } + /// Configure the event subscriber instance pub fn with_events(self, events: E) -> RpcModuleBuilder where From 42a824cf95e3cfdf2a76efe90bc65daf9012f267 Mon Sep 17 00:00:00 2001 From: "lukebrich.eth" Date: Fri, 7 Jul 2023 18:32:40 -0400 Subject: [PATCH 107/722] Add propagate field to TransactionValidationOutcome (#3664) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/noop.rs | 1 + crates/transaction-pool/src/pool/mod.rs | 9 +++++++-- crates/transaction-pool/src/validate/eth.rs | 1 + crates/transaction-pool/src/validate/mod.rs | 2 ++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 2fec5391c1eb..0ccec210c131 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -170,6 +170,7 @@ impl TransactionValidator for NoopTransactionValidator { balance: Default::default(), state_nonce: 0, transaction, + propagate: true, } } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 6b16c26470ea..5ae0147d6430 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -274,7 +274,12 @@ where tx: TransactionValidationOutcome, ) -> PoolResult { match tx { - TransactionValidationOutcome::Valid { balance, state_nonce, transaction } => { + TransactionValidationOutcome::Valid { + balance, + state_nonce, + transaction, + propagate, + } => { let sender_id = self.get_sender_id(transaction.sender()); let transaction_id = TransactionId::new(sender_id, transaction.nonce()); let encoded_length = transaction.encoded_length(); @@ -282,7 +287,7 @@ where let tx = ValidPoolTransaction { transaction, transaction_id, - propagate: false, + propagate, timestamp: Instant::now(), origin, encoded_length, diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 605c7d08a41b..b5ca93709dda 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -311,6 +311,7 @@ where balance: account.balance, state_nonce: account.nonce, transaction, + propagate: true, } } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 1f7b7c5377e0..a3892f52ffe6 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -30,6 +30,8 @@ pub enum TransactionValidationOutcome { state_nonce: u64, /// Validated transaction. transaction: T, + /// Whether to propagate the transaction to the network. + propagate: bool, }, /// The transaction is considered invalid indefinitely: It violates constraints that prevent /// this transaction from ever becoming valid. From 1330fc11df1a89aa58b5c0a79d8d8720f6b0e374 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 8 Jul 2023 21:16:37 +1000 Subject: [PATCH 108/722] add build profile to version info take II (#3669) Co-authored-by: Matthias Seitz --- Cargo.lock | 7 +++++++ bin/reth/Cargo.toml | 1 + bin/reth/src/version.rs | 17 +++++++++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 354bd6b818cf..f968525ed50f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1036,6 +1036,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const-str" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6" + [[package]] name = "convert_case" version = "0.4.0" @@ -4957,6 +4963,7 @@ dependencies = [ "clap 4.1.8", "comfy-table", "confy", + "const-str", "crossterm", "dirs-next", "eyre", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 7009807f75b0..1a2fc9591254 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -85,6 +85,7 @@ hex = "0.4" thiserror = { workspace = true } pretty_assertions = "1.3.0" humantime = "2.1.0" +const-str = "0.5.6" [features] jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl"] diff --git a/bin/reth/src/version.rs b/bin/reth/src/version.rs index a016d209c97f..56933848a586 100644 --- a/bin/reth/src/version.rs +++ b/bin/reth/src/version.rs @@ -28,7 +28,7 @@ pub(crate) const SHORT_VERSION: &str = /// Build Timestamp: 2023-05-19T01:47:19.815651705Z /// Build Features: jemalloc /// ``` -pub(crate) const LONG_VERSION: &str = concat!( +pub(crate) const LONG_VERSION: &str = const_str::concat!( "Version: ", env!("CARGO_PKG_VERSION"), "\n", @@ -39,7 +39,10 @@ pub(crate) const LONG_VERSION: &str = concat!( env!("VERGEN_BUILD_TIMESTAMP"), "\n", "Build Features: ", - env!("VERGEN_CARGO_FEATURES") + env!("VERGEN_CARGO_FEATURES"), + "\n", + "Build Profile: ", + build_profile_name() ); /// The version information for reth formatted for P2P (devp2p). @@ -76,6 +79,16 @@ pub fn default_extradata() -> String { format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS) } +const fn build_profile_name() -> &'static str { + // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // We split on the path separator of the *host* machine, which may be different from + // `std::path::MAIN_SEPARATOR_STR`. + const OUT_DIR: &str = env!("OUT_DIR"); + const SEP: char = if const_str::contains!(OUT_DIR, "/") { '/' } else { '\\' }; + let parts = const_str::split!(OUT_DIR, SEP); + parts[parts.len() - 4] +} + #[cfg(test)] mod tests { use super::*; From f116040e63fa3033139d27a906bd509998413748 Mon Sep 17 00:00:00 2001 From: Josh Stevens Date: Sun, 9 Jul 2023 15:14:36 +0100 Subject: [PATCH 109/722] fix: expose call method so a consumer can use it (#3680) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/api/call.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index cca5d4b26e6e..29cbdcfc4460 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -38,18 +38,14 @@ where Network: NetworkInfo + Send + Sync + 'static, { /// Estimate gas needed for execution of the `request` at the [BlockId]. - pub(crate) async fn estimate_gas_at( - &self, - request: CallRequest, - at: BlockId, - ) -> EthResult { + pub async fn estimate_gas_at(&self, request: CallRequest, at: BlockId) -> EthResult { let (cfg, block_env, at) = self.evm_env_at(at).await?; let state = self.state_at(at)?; self.estimate_gas_with(cfg, block_env, request, state) } /// Executes the call request (`eth_call`) and returns the output - pub(crate) async fn call( + pub async fn call( &self, request: CallRequest, block_number: Option, From b68116c9be7178a040ce23e0e3ab6f244156b3ca Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 9 Jul 2023 18:16:53 +0100 Subject: [PATCH 110/722] fix(tree): update metrics only on canonical/side chain changes (#3671) --- crates/blockchain-tree/src/blockchain_tree.rs | 8 ++++++-- crates/blockchain-tree/src/shareable.rs | 16 ++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 0fad7079917a..92c821064c1a 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1084,8 +1084,12 @@ impl BlockchainTree } } - /// Update blockchain tree and sync metrics - pub(crate) fn update_metrics(&mut self) { + /// Update blockchain tree chains (canonical and sidechains) and sync metrics. + /// + /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync + /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in + /// [BlockBuffer] during the pipeline sync. + pub(crate) fn update_chains_metrics(&mut self) { let height = self.canonical_chain().tip().number; self.metrics.sidechains.set(self.chains.len() as f64); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 663c2e03bae9..a63b18dd2657 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -43,9 +43,9 @@ impl BlockchainTreeEngine { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); - let res = tree.buffer_block(block); - tree.update_metrics(); - res + // Blockchain tree metrics shouldn't be updated here, see + // `BlockchainTree::update_chains_metrics` documentation. + tree.buffer_block(block) } fn insert_block( @@ -55,7 +55,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", hash=?block.hash, number=block.number, parent_hash=?block.parent_hash, "Inserting block"); let mut tree = self.tree.write(); let res = tree.insert_block(block); - tree.update_metrics(); + tree.update_chains_metrics(); res } @@ -63,14 +63,14 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?finalized_block, "Finalizing block"); let mut tree = self.tree.write(); tree.finalize_block(finalized_block); - tree.update_metrics(); + tree.update_chains_metrics(); } fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<(), Error> { trace!(target: "blockchain_tree", ?last_finalized_block, "Restoring canonical hashes for last finalized block"); let mut tree = self.tree.write(); let res = tree.restore_canonical_hashes(last_finalized_block); - tree.update_metrics(); + tree.update_chains_metrics(); res } @@ -78,7 +78,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?block_hash, "Making block canonical"); let mut tree = self.tree.write(); let res = tree.make_canonical(block_hash); - tree.update_metrics(); + tree.update_chains_metrics(); res } @@ -86,7 +86,7 @@ impl BlockchainTreeEngine trace!(target: "blockchain_tree", ?unwind_to, "Unwinding to block number"); let mut tree = self.tree.write(); let res = tree.unwind(unwind_to); - tree.update_metrics(); + tree.update_chains_metrics(); res } } From 15b21a031ade0ccfb992feec79f2b507db39b822 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 9 Jul 2023 18:16:55 +0100 Subject: [PATCH 111/722] feat(stages): trace log for metric event receive (#3670) --- crates/stages/src/metrics/listener.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages/src/metrics/listener.rs index 8ec3ec31da7e..8e8102d3f34c 100644 --- a/crates/stages/src/metrics/listener.rs +++ b/crates/stages/src/metrics/listener.rs @@ -10,6 +10,7 @@ use std::{ task::{ready, Context, Poll}, }; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use tracing::trace; /// Alias type for metric producers to use. pub type MetricEventsSender = UnboundedSender; @@ -54,6 +55,7 @@ impl MetricsListener { } fn handle_event(&mut self, event: MetricEvent) { + trace!(target: "sync::metrics", ?event, "Metric event received"); match event { MetricEvent::SyncHeight { height } => { for stage_id in StageId::ALL { From f20117666ee1047e5e4b7b3381409942811f0565 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Sun, 9 Jul 2023 19:40:12 +0200 Subject: [PATCH 112/722] chore: use units on dashboard (#3684) --- etc/grafana/dashboards/overview.json | 855 +++++++++++++++------------ 1 file changed, 492 insertions(+), 363 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index d1855a239170..4d27f32f7e8d 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -27,7 +27,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "10.0.1" + "version": "9.5.3" }, { "type": "panel", @@ -159,7 +159,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "9.5.3", "targets": [ { "datasource": { @@ -226,7 +226,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "10.0.1", + "pluginVersion": "9.5.3", "targets": [ { "datasource": { @@ -435,7 +435,7 @@ "h": 1, "w": 24, "x": 0, - "y": 34 + "y": 17 }, "id": 38, "panels": [], @@ -492,7 +492,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -504,7 +505,7 @@ "h": 8, "w": 12, "x": 0, - "y": 35 + "y": 18 }, "id": 40, "options": { @@ -564,7 +565,7 @@ "h": 8, "w": 12, "x": 12, - "y": 35 + "y": 18 }, "id": 42, "maxDataPoints": 25, @@ -608,7 +609,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.1", + "pluginVersion": "9.5.3", "targets": [ { "datasource": { @@ -655,7 +656,7 @@ "h": 8, "w": 12, "x": 0, - "y": 43 + "y": 26 }, "id": 48, "options": { @@ -747,7 +748,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -763,7 +765,7 @@ "h": 8, "w": 12, "x": 12, - "y": 43 + "y": 26 }, "id": 52, "options": { @@ -821,7 +823,7 @@ "h": 8, "w": 12, "x": 0, - "y": 51 + "y": 34 }, "id": 50, "options": { @@ -884,7 +886,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -988,7 +991,7 @@ "h": 8, "w": 12, "x": 12, - "y": 51 + "y": 34 }, "id": 58, "options": { @@ -1003,7 +1006,7 @@ }, "showHeader": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "9.5.3", "targets": [ { "datasource": { @@ -1029,7 +1032,7 @@ "h": 1, "w": 24, "x": 0, - "y": 84 + "y": 42 }, "id": 46, "panels": [], @@ -1084,7 +1087,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -1096,7 +1100,7 @@ "h": 8, "w": 24, "x": 0, - "y": 85 + "y": 43 }, "id": 56, "options": { @@ -1169,7 +1173,7 @@ "h": 1, "w": 24, "x": 0, - "y": 102 + "y": 51 }, "id": 6, "panels": [], @@ -1224,7 +1228,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1239,7 +1244,7 @@ "h": 8, "w": 8, "x": 0, - "y": 103 + "y": 52 }, "id": 18, "options": { @@ -1316,7 +1321,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1331,7 +1337,7 @@ "h": 8, "w": 8, "x": 8, - "y": 103 + "y": 52 }, "id": 16, "options": { @@ -1433,14 +1439,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "cps" }, "overrides": [] }, @@ -1448,7 +1456,7 @@ "h": 8, "w": 8, "x": 16, - "y": 103 + "y": 52 }, "id": 8, "options": { @@ -1473,7 +1481,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_p2pstream_disconnected_errors{instance=~\"$instance\"}", + "expr": "rate(reth_p2pstream_disconnected_errors{instance=~\"$instance\"}[$__rate_interval])", "legendFormat": "P2P stream disconnected", "range": true, "refId": "A" @@ -1484,7 +1492,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_network_pending_session_failures{instance=~\"$instance\"}", + "expr": "rate(reth_network_pending_session_failures{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Failed pending sessions", "range": true, @@ -1496,7 +1504,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_network_invalid_messages_received{instance=~\"$instance\"}", + "expr": "rate(reth_network_invalid_messages_received{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid messages", "range": true, @@ -1527,7 +1535,7 @@ "h": 8, "w": 8, "x": 0, - "y": 111 + "y": 60 }, "id": 54, "options": { @@ -1691,7 +1699,7 @@ "h": 1, "w": 24, "x": 0, - "y": 136 + "y": 68 }, "id": 24, "panels": [], @@ -1745,7 +1753,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1785,7 +1794,7 @@ "h": 8, "w": 12, "x": 0, - "y": 137 + "y": 69 }, "id": 26, "options": { @@ -1899,14 +1908,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "cps" }, "overrides": [] }, @@ -1914,7 +1925,7 @@ "h": 8, "w": 12, "x": 12, - "y": 137 + "y": 69 }, "id": 33, "options": { @@ -1936,7 +1947,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_timeout_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_headers_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", "legendFormat": "Request timed out", "range": true, "refId": "A" @@ -1947,7 +1958,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_unexpected_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_headers_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Unexpected error", "range": true, @@ -1959,7 +1970,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_headers_validation_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_headers_validation_errors{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid response", "range": true, @@ -2015,7 +2026,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2030,7 +2042,7 @@ "h": 8, "w": 12, "x": 0, - "y": 145 + "y": 77 }, "id": 36, "options": { @@ -2079,7 +2091,7 @@ "h": 1, "w": 24, "x": 0, - "y": 170 + "y": 85 }, "id": 32, "panels": [], @@ -2134,14 +2146,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "locale" }, "overrides": [ { @@ -2153,6 +2167,10 @@ { "id": "custom.axisPlacement", "value": "right" + }, + { + "id": "unit", + "value": "ops" } ] }, @@ -2165,6 +2183,10 @@ { "id": "custom.axisPlacement", "value": "right" + }, + { + "id": "unit", + "value": "ops" } ] } @@ -2174,7 +2196,7 @@ "h": 8, "w": 12, "x": 0, - "y": 171 + "y": 86 }, "id": 30, "options": { @@ -2324,10 +2346,12 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] - } + }, + "unit": "cps" }, "overrides": [] }, @@ -2335,7 +2359,7 @@ "h": 8, "w": 12, "x": 12, - "y": 171 + "y": 86 }, "id": 28, "options": { @@ -2357,7 +2381,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_timeout_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_bodies_timeout_errors{instance=~\"$instance\"}[$__rate_interval])", "legendFormat": "Request timed out", "range": true, "refId": "A" @@ -2368,7 +2392,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_unexpected_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_bodies_unexpected_errors{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Unexpected error", "range": true, @@ -2380,7 +2404,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_downloaders_bodies_validation_errors{instance=~\"$instance\"}", + "expr": "rate(reth_downloaders_bodies_validation_errors{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid response", "range": true, @@ -2436,7 +2460,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2451,7 +2476,7 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 94 }, "id": 35, "options": { @@ -2540,14 +2565,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "bytes" }, "overrides": [ { @@ -2559,6 +2586,10 @@ { "id": "custom.axisPlacement", "value": "right" + }, + { + "id": "unit", + "value": "blocks" } ] } @@ -2568,7 +2599,7 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 94 }, "id": 73, "options": { @@ -2592,7 +2623,7 @@ "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{instance=~\"$instance\"}", "hide": false, - "legendFormat": "Buffered blocks size (bytes)", + "legendFormat": "Buffered blocks size ", "range": true, "refId": "A" }, @@ -2618,7 +2649,7 @@ "h": 1, "w": 24, "x": 0, - "y": 204 + "y": 102 }, "id": 89, "panels": [], @@ -2673,14 +2704,16 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "bytes" }, "overrides": [] }, @@ -2688,7 +2721,7 @@ "h": 8, "w": 12, "x": 0, - "y": 205 + "y": 103 }, "id": 91, "options": { @@ -2711,7 +2744,7 @@ }, "editorMode": "builder", "expr": "reth_transaction_pool_basefee_pool_size_bytes{instance=~\"$instance\"}", - "legendFormat": "Base fee pool size (bytes)", + "legendFormat": "Base fee pool size", "range": true, "refId": "A" }, @@ -2723,7 +2756,7 @@ "editorMode": "builder", "expr": "reth_transaction_pool_pending_pool_size_bytes{instance=~\"$instance\"}", "hide": false, - "legendFormat": "Pending pool size (bytes)", + "legendFormat": "Pending pool size", "range": true, "refId": "B" }, @@ -2735,7 +2768,7 @@ "editorMode": "builder", "expr": "reth_transaction_pool_queued_pool_size_bytes{instance=~\"$instance\"}", "hide": false, - "legendFormat": "Queued pool size (bytes)", + "legendFormat": "Queued pool size", "range": true, "refId": "C" } @@ -2789,7 +2822,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2804,7 +2838,7 @@ "h": 8, "w": 12, "x": 12, - "y": 205 + "y": 103 }, "id": 92, "options": { @@ -2871,7 +2905,7 @@ "mode": "palette-classic" }, "custom": { - "axisCenteredZero": false, + "axisCenteredZero": true, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", @@ -2905,22 +2939,53 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "ops" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "cps" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 213 + "y": 111 }, "id": 93, "options": { @@ -2931,7 +2996,7 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -2942,7 +3007,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}", + "expr": "increase(reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}[$__rate_interval])", "legendFormat": "Inserted transactions", "range": true, "refId": "A" @@ -2953,7 +3018,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_transaction_pool_removed_transactions{instance=~\"$instance\"}", + "expr": "increase(reth_transaction_pool_removed_transactions{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Removed transactions", "range": true, @@ -2965,7 +3030,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}", + "expr": "increase(reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "legendFormat": "Invalid transactions", "range": true, @@ -3021,7 +3086,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3036,7 +3102,7 @@ "h": 8, "w": 12, "x": 12, - "y": 213 + "y": 111 }, "id": 94, "options": { @@ -3080,7 +3146,7 @@ "mode": "palette-classic" }, "custom": { - "axisCenteredZero": false, + "axisCenteredZero": true, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", @@ -3114,22 +3180,49 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - } + }, + "unit": "cps" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "events" + } + ] + } + ] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 221 + "y": 119 }, "id": 95, "options": { @@ -3150,13 +3243,37 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", + "editorMode": "builder", + "expr": "increase(reth_network_pool_transactions_messages_sent{instance=~\"$instance\"}[$__rate_interval])", "hide": false, "instant": false, - "legendFormat": "Total events in the channel", + "legendFormat": "Tx", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "increase(reth_network_pool_transactions_messages_received{instance=~\"$instance\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Rx", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_network_pool_transactions_messages_sent{instance=~\"$instance\"} - reth_network_pool_transactions_messages_received{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Messages in channel", + "range": true, + "refId": "C" } ], "title": "Network transaction channel", @@ -3168,7 +3285,7 @@ "h": 1, "w": 24, "x": 0, - "y": 254 + "y": 127 }, "id": 79, "panels": [], @@ -3223,7 +3340,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3238,7 +3356,7 @@ "h": 8, "w": 12, "x": 0, - "y": 255 + "y": 128 }, "id": 74, "options": { @@ -3316,7 +3434,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3331,7 +3450,7 @@ "h": 8, "w": 12, "x": 12, - "y": 255 + "y": 128 }, "id": 80, "options": { @@ -3409,7 +3528,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3424,7 +3544,7 @@ "h": 8, "w": 12, "x": 0, - "y": 263 + "y": 136 }, "id": 81, "options": { @@ -3462,7 +3582,7 @@ "h": 1, "w": 24, "x": 0, - "y": 288 + "y": 144 }, "id": 87, "panels": [], @@ -3517,7 +3637,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3532,7 +3653,7 @@ "h": 8, "w": 12, "x": 0, - "y": 289 + "y": 145 }, "id": 83, "options": { @@ -3609,7 +3730,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3624,7 +3746,7 @@ "h": 8, "w": 12, "x": 12, - "y": 289 + "y": 145 }, "id": 84, "options": { @@ -3713,7 +3835,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3728,7 +3851,7 @@ "h": 8, "w": 12, "x": 0, - "y": 297 + "y": 153 }, "id": 85, "options": { @@ -3760,296 +3883,298 @@ "type": "timeseries" }, { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 322 + "y": 161 }, "id": 68, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Payload Builder ($instance)", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of active jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "description": "Number of active jobs", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 11, - "x": 0, - "y": 33 - }, - "id": 60, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "tooltip": { - "mode": "single", - "sort": "none" + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "builder", - "expr": "reth_payloads_active_jobs{instance=~\"$instance\"}", - "legendFormat": "Active Jobs", - "range": true, - "refId": "A" - } - ], - "title": "Active Jobs", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 11, + "x": 0, + "y": 162 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Total number of initiated jobs", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "editorMode": "builder", + "expr": "reth_payloads_active_jobs{instance=~\"$instance\"}", + "legendFormat": "Active Jobs", + "range": true, + "refId": "A" + } + ], + "title": "Active Jobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Total number of initiated jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 13, - "x": 11, - "y": 33 - }, - "id": 62, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "tooltip": { - "mode": "single", - "sort": "none" + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "builder", - "expr": "reth_payloads_initiated_jobs{instance=~\"$instance\"}", - "legendFormat": "Initiated Jobs", - "range": true, - "refId": "A" - } - ], - "title": "Initiated Jobs", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 13, + "x": 11, + "y": 162 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Total number of failed jobs", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "editorMode": "builder", + "expr": "reth_payloads_initiated_jobs{instance=~\"$instance\"}", + "legendFormat": "Initiated Jobs", + "range": true, + "refId": "A" + } + ], + "title": "Initiated Jobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Total number of failed jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 11, - "x": 0, - "y": 41 - }, - "id": 64, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "tooltip": { - "mode": "single", - "sort": "none" + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "builder", - "expr": "reth_payloads_failed_jobs{instance=~\"$instance\"}", - "legendFormat": "Failed Jobs", - "range": true, - "refId": "A" - } - ], - "title": "Failed Jobs", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 11, + "x": 0, + "y": 170 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_payloads_failed_jobs{instance=~\"$instance\"}", + "legendFormat": "Failed Jobs", + "range": true, + "refId": "A" } ], - "repeat": "instance", - "repeatDirection": "h", - "title": "Payload Builder ($instance)", - "type": "row" + "title": "Failed Jobs", + "type": "timeseries" }, { "collapsed": false, @@ -4057,7 +4182,7 @@ "h": 1, "w": 24, "x": 0, - "y": 324 + "y": 177 }, "id": 97, "panels": [], @@ -4109,7 +4234,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4125,7 +4251,7 @@ "h": 8, "w": 12, "x": 0, - "y": 325 + "y": 178 }, "id": 98, "options": { @@ -4268,7 +4394,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4284,7 +4411,7 @@ "h": 8, "w": 12, "x": 12, - "y": 325 + "y": 178 }, "id": 101, "options": { @@ -4362,7 +4489,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4378,7 +4506,7 @@ "h": 8, "w": 12, "x": 0, - "y": 333 + "y": 186 }, "id": 99, "options": { @@ -4456,7 +4584,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4472,7 +4601,7 @@ "h": 8, "w": 12, "x": 12, - "y": 333 + "y": 186 }, "id": 100, "options": { @@ -4544,6 +4673,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 8, + "version": 2, "weekStart": "" } \ No newline at end of file From 6cbb639f6b4f32ee9908007b64b0a61ea50879d9 Mon Sep 17 00:00:00 2001 From: Josh Stevens Date: Sun, 9 Jul 2023 20:57:16 +0100 Subject: [PATCH 113/722] fix: expose the revm_utils to consumer as needed structs from it (#3686) --- crates/rpc/rpc/src/eth/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 19032dd8e9b9..bff4361e4868 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -8,7 +8,7 @@ pub mod gas_oracle; mod id_provider; mod logs_utils; mod pubsub; -pub(crate) mod revm_utils; +pub mod revm_utils; mod signer; pub(crate) mod utils; From d35531e79ea56b4f2fa97b3c4f4f6eff68ddcc6c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 10 Jul 2023 19:41:28 +1000 Subject: [PATCH 114/722] fix: remove txn header from getPayloadBodies (#3688) --- crates/rpc/rpc-types/src/eth/engine/payload.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index d60c81b5ae2d..7b593d802600 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -4,7 +4,7 @@ use reth_primitives::{ Address, Block, Bloom, Bytes, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, H256, H64, U256, U64, }; -use reth_rlp::{Decodable, Encodable}; +use reth_rlp::Decodable; use serde::{ser::SerializeMap, Deserialize, Serialize, Serializer}; /// The execution payload body response that allows for `null` values. @@ -229,7 +229,7 @@ impl From for ExecutionPayloadBody { fn from(value: Block) -> Self { let transactions = value.body.into_iter().map(|tx| { let mut out = Vec::new(); - tx.encode(&mut out); + tx.encode_enveloped(&mut out); out.into() }); ExecutionPayloadBody { From 93a6ff98aaaf28ed656893aff01682c7da388b62 Mon Sep 17 00:00:00 2001 From: "lukebrich.eth" Date: Mon, 10 Jul 2023 06:12:45 -0400 Subject: [PATCH 115/722] feat: enforce txpool propagation setting (#3677) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/mod.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 5ae0147d6430..e7f167c82a6e 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -232,13 +232,13 @@ where /// Returns hashes of _all_ transactions in the pool. pub(crate) fn pooled_transactions_hashes(&self) -> Vec { let pool = self.pool.read(); - pool.all().hashes_iter().collect() + pool.all().transactions_iter().filter(|tx| tx.propagate).map(|tx| *tx.hash()).collect() } /// Returns _all_ transactions in the pool. pub(crate) fn pooled_transactions(&self) -> Vec>> { let pool = self.pool.read(); - pool.all().transactions_iter().collect() + pool.all().transactions_iter().filter(|tx| tx.propagate).collect() } /// Updates the entire pool after a new block was executed. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 71b5590a688c..9793da32a30b 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -637,6 +637,7 @@ impl AllTransactions { } /// Returns an iterator over all _unique_ hashes in the pool + #[allow(unused)] pub(crate) fn hashes_iter(&self) -> impl Iterator + '_ { self.by_hash.keys().copied() } From 7d33db874853d61db740830c11f1ba1c1ec5812d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 10 Jul 2023 12:16:02 +0200 Subject: [PATCH 116/722] feat: add subpool transaction streams (#3668) --- crates/transaction-pool/src/lib.rs | 5 +- crates/transaction-pool/src/traits.rs | 71 ++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8689dcc377f3..f5c92b8086d2 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,10 @@ pub use crate::{ }, error::PoolResult, ordering::{GasCostOrdering, TransactionOrdering}, - pool::{AllTransactionsEvents, FullTransactionEvent, TransactionEvent, TransactionEvents}, + pool::{ + state::SubPool, AllTransactionsEvents, FullTransactionEvent, TransactionEvent, + TransactionEvents, + }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, NewTransactionEvent, PoolSize, PoolTransaction, PooledTransaction, PropagateKind, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index afbfb47a3496..b5f5d3186341 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -4,12 +4,19 @@ use crate::{ validate::ValidPoolTransaction, AllTransactionsEvents, }; +use futures_util::{ready, Stream}; use reth_primitives::{ Address, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, Transaction, TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, H256, U256, }; use reth_rlp::Encodable; -use std::{collections::HashMap, fmt, sync::Arc}; +use std::{ + collections::HashMap, + fmt, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use tokio::sync::mpsc::Receiver; #[cfg(feature = "serde")] @@ -106,6 +113,34 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns a new stream that yields new valid transactions added to the pool. fn new_transactions_listener(&self) -> Receiver>; + /// Returns a new Stream that yields new transactions added to the basefee-pool. + /// + /// This is a convenience wrapper around [Self::new_transactions_listener] that filters for + /// [SubPool::Pending](crate::SubPool). + fn new_pending_pool_transactions_listener( + &self, + ) -> NewSubpoolTransactionStream { + NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Pending) + } + + /// Returns a new Stream that yields new transactions added to the basefee sub-pool. + /// + /// This is a convenience wrapper around [Self::new_transactions_listener] that filters for + /// [SubPool::BaseFee](crate::SubPool). + fn new_basefee_pool_transactions_listener( + &self, + ) -> NewSubpoolTransactionStream { + NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::BaseFee) + } + + /// Returns a new Stream that yields new transactions added to the queued-pool. + /// + /// This is a convenience wrapper around [Self::new_transactions_listener] that filters for + /// [SubPool::Queued](crate::SubPool). + fn new_queued_transactions_listener(&self) -> NewSubpoolTransactionStream { + NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Queued) + } + /// Returns the _hashes_ of all transactions in the pool. /// /// Note: This returns a `Vec` but should guarantee that all hashes are unique. @@ -620,3 +655,37 @@ pub struct BlockInfo { /// currently tracking. pub pending_basefee: u128, } + +/// A Stream that yields full transactions the subpool +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct NewSubpoolTransactionStream { + st: Receiver>, + subpool: SubPool, +} + +// === impl NewSubpoolTransactionStream === + +impl NewSubpoolTransactionStream { + /// Create a new stream that yields full transactions from the subpool + pub fn new(st: Receiver>, subpool: SubPool) -> Self { + Self { st, subpool } + } +} + +impl Stream for NewSubpoolTransactionStream { + type Item = NewTransactionEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match ready!(self.st.poll_recv(cx)) { + Some(event) => { + if event.subpool == self.subpool { + return Poll::Ready(Some(event)) + } + } + None => return Poll::Ready(None), + } + } + } +} From 31af4d55bc5ad5fe976ddf4aa50ce441699e5f9c Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 10 Jul 2023 14:21:54 +0300 Subject: [PATCH 117/722] release: 0.1.0-alpha.3 (#3691) --- Cargo.lock | 90 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f968525ed50f..53fd07832bed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1748,7 +1748,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "reth-db", "reth-interfaces", @@ -4957,7 +4957,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "backon", "clap 4.1.8", @@ -5022,7 +5022,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5039,7 +5039,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "futures-core", "futures-util", @@ -5058,7 +5058,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "futures", @@ -5083,7 +5083,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "aquamarine", "assert_matches", @@ -5102,7 +5102,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arbitrary", "bytes", @@ -5117,7 +5117,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "confy", "reth-discv4", @@ -5134,7 +5134,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "mockall", @@ -5145,7 +5145,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arbitrary", "assert_matches", @@ -5186,7 +5186,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "discv5", "enr", @@ -5209,7 +5209,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "async-trait", "data-encoding", @@ -5233,7 +5233,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "futures", @@ -5258,7 +5258,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "aes 0.8.2", "block-padding", @@ -5289,7 +5289,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arbitrary", "async-trait", @@ -5322,7 +5322,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arbitrary", "async-trait", @@ -5350,7 +5350,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "async-trait", "bytes", @@ -5370,7 +5370,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "bitflags 2.3.2", "byteorder", @@ -5390,7 +5390,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "bindgen 0.65.1", "cc", @@ -5399,7 +5399,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "futures", "metrics", @@ -5409,7 +5409,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "metrics", "once_cell", @@ -5423,7 +5423,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "pin-project", "reth-primitives", @@ -5432,7 +5432,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "igd", "pin-project-lite", @@ -5446,7 +5446,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "aquamarine", "async-trait", @@ -5496,7 +5496,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "async-trait", "reth-eth-wire", @@ -5509,7 +5509,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "futures-util", "reth-interfaces", @@ -5528,7 +5528,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arbitrary", "assert_matches", @@ -5575,7 +5575,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "auto_impl", "derive_more", @@ -5596,7 +5596,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "once_cell", "reth-consensus-common", @@ -5612,7 +5612,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "boa_engine", "boa_gc", @@ -5628,7 +5628,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "reth-primitives", "revm", @@ -5636,7 +5636,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "arrayvec", "auto_impl", @@ -5654,7 +5654,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.28", @@ -5663,7 +5663,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "async-trait", @@ -5709,7 +5709,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "jsonrpsee", "reth-primitives", @@ -5719,7 +5719,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "async-trait", "futures", @@ -5733,7 +5733,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "hyper", "jsonrpsee", @@ -5763,7 +5763,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "async-trait", @@ -5783,7 +5783,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "assert_matches", "jsonrpsee-types", @@ -5799,7 +5799,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "aquamarine", "assert_matches", @@ -5835,7 +5835,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "dyn-clone", "futures-util", @@ -5848,7 +5848,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "tracing", "tracing-appender", @@ -5858,7 +5858,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "aquamarine", "async-trait", @@ -5884,7 +5884,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 560282518493..c73f2f87aba3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,7 +54,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From 91dd782243e42e17532b143209b0925320e3e19e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 10 Jul 2023 12:07:13 +0100 Subject: [PATCH 118/722] feat(config, primitives): validate Receipts prune part (#3587) --- crates/config/src/config.rs | 7 ++- crates/primitives/src/serde_helper/mod.rs | 2 + crates/primitives/src/serde_helper/prune.rs | 49 +++++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 crates/primitives/src/serde_helper/prune.rs diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index f0dfb4ac2bbb..40836b8c02c5 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -5,7 +5,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; -use reth_primitives::PruneMode; +use reth_primitives::{serde_helper::deserialize_opt_prune_mode_with_min_distance, PruneMode}; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -305,7 +305,10 @@ pub struct PruneParts { #[serde(skip_serializing_if = "Option::is_none")] pub transaction_lookup: Option, /// Receipts pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_distance::<64, _>" + )] pub receipts: Option, /// Account History pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] diff --git a/crates/primitives/src/serde_helper/mod.rs b/crates/primitives/src/serde_helper/mod.rs index c63e7f62756f..7cd85f892f98 100644 --- a/crates/primitives/src/serde_helper/mod.rs +++ b/crates/primitives/src/serde_helper/mod.rs @@ -10,6 +10,8 @@ use crate::H256; pub use jsonu256::*; pub mod num; +mod prune; +pub use prune::deserialize_opt_prune_mode_with_min_distance; /// serde functions for handling primitive `u64` as [U64](crate::U64) pub mod u64_hex { diff --git a/crates/primitives/src/serde_helper/prune.rs b/crates/primitives/src/serde_helper/prune.rs new file mode 100644 index 000000000000..7dffafdf8f7f --- /dev/null +++ b/crates/primitives/src/serde_helper/prune.rs @@ -0,0 +1,49 @@ +use crate::PruneMode; +use serde::{Deserialize, Deserializer}; + +/// Deserializes [`Option`] and validates that the value contained in +/// [PruneMode::Distance] (if any) is not less than the const generic parameter `MIN_DISTANCE`. +pub fn deserialize_opt_prune_mode_with_min_distance< + 'de, + const MIN_DISTANCE: u64, + D: Deserializer<'de>, +>( + deserializer: D, +) -> Result, D::Error> { + let prune_mode = Option::::deserialize(deserializer)?; + + match prune_mode { + Some(PruneMode::Distance(distance)) if distance < MIN_DISTANCE => { + Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Unsigned(distance), + // This message should have "expected" wording, so we say "not less than" + &format!("prune mode distance not less than {MIN_DISTANCE} blocks").as_str(), + )) + } + _ => Ok(prune_mode), + } +} + +#[cfg(test)] +mod test { + use crate::PruneMode; + use assert_matches::assert_matches; + use serde::Deserialize; + + #[test] + fn deserialize_opt_prune_mode_with_min_distance() { + #[derive(Debug, Deserialize, PartialEq, Eq)] + struct V( + #[serde( + deserialize_with = "super::deserialize_opt_prune_mode_with_min_distance::<10, _>" + )] + Option, + ); + + assert!(serde_json::from_str::(r#"{"distance": 10}"#).is_ok()); + assert_matches!( + serde_json::from_str::(r#"{"distance": 9}"#), + Err(err) if err.to_string() == "invalid value: integer `9`, expected prune mode distance not less than 10 blocks" + ); + } +} From c8d0e7e9b34e1ff3658084a34fa21a759d32928b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 10 Jul 2023 07:23:24 -0400 Subject: [PATCH 119/722] feat: run pipeline if latest finalized is far from pipeline progress (#3662) --- crates/consensus/beacon/src/engine/mod.rs | 71 ++++++++++++++++------- 1 file changed, 51 insertions(+), 20 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 175f6c5dca74..f8b95d4682ce 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -335,7 +335,9 @@ where } /// Returns true if the distance from the local tip to the block is greater than the configured - /// threshold + /// threshold. + /// + /// If the `local_tip` is greater than the `block`, then this will return false. #[inline] fn exceeds_pipeline_run_threshold(&self, local_tip: u64, block: u64) -> bool { block > local_tip && block - local_tip > self.pipeline_run_threshold @@ -1311,27 +1313,56 @@ where ) .is_none() { - // Update the state and hashes of the blockchain tree if possible. - match self - .update_tree_on_finished_pipeline(sync_target_state.finalized_block_hash) + let newest_finalized = self + .forkchoice_state_tracker + .sync_target_state() + .map(|s| s.finalized_block_hash) + .and_then(|h| self.blockchain.buffered_header_by_hash(h)) + .map(|header| header.number); + + // The block number that the pipeline finished at - if the progress or newest + // finalized is None then we can't check the distance anyways. + // + // If both are Some, we perform another distance check and return the desired + // pipeline target + let pipeline_target = if let (Some(progress), Some(finalized_number)) = + (ctrl.progress(), newest_finalized) { - Ok(synced) => { - if synced { - // we're consider this synced and transition to live sync - self.sync_state_updater.update_sync_state(SyncState::Idle); - } else { - // We don't have the finalized block in the database, so - // we need to run another pipeline. - self.sync.set_pipeline_sync_target( - sync_target_state.finalized_block_hash, - ); - } - } - Err(error) => { - error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); - return Some(Err(error.into())) - } + // Determines whether or not we should run the pipeline again, in case the + // new gap is large enough to warrant running the pipeline. + self.can_pipeline_sync_to_finalized(progress, finalized_number, None) + } else { + None }; + + // If the distance is large enough, we should run the pipeline again to prevent + // the tree update from executing too many blocks and blocking. + if let Some(target) = pipeline_target { + // run the pipeline to the target since the distance is sufficient + self.sync.set_pipeline_sync_target(target); + } else { + // Update the state and hashes of the blockchain tree if possible. + match self.update_tree_on_finished_pipeline( + sync_target_state.finalized_block_hash, + ) { + Ok(synced) => { + if synced { + // we're consider this synced and transition to live sync + self.sync_state_updater.update_sync_state(SyncState::Idle); + } else { + // We don't have the finalized block in the database, so + // we need to run another pipeline. + self.sync.set_pipeline_sync_target( + sync_target_state.finalized_block_hash, + ); + } + } + Err(error) => { + error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); + return Some(Err(error.into())) + } + }; + } } } // Any pipeline error at this point is fatal. From 3910babb913e17debd2479c385a48799d7fa6157 Mon Sep 17 00:00:00 2001 From: fomotrader <82184770+fomotrader@users.noreply.github.com> Date: Mon, 10 Jul 2023 07:34:12 -0400 Subject: [PATCH 120/722] feat: add full pending txs to stream (#3649) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/eth/pubsub.rs | 20 ++++++++--- crates/rpc/rpc/src/eth/pubsub.rs | 46 ++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/pubsub.rs b/crates/rpc/rpc-types/src/eth/pubsub.rs index 63f152b66283..f027037c176a 100644 --- a/crates/rpc/rpc-types/src/eth/pubsub.rs +++ b/crates/rpc/rpc-types/src/eth/pubsub.rs @@ -1,6 +1,10 @@ //! Ethereum types for pub-sub -use crate::{eth::Filter, Log, RichHeader}; +use crate::{ + eth::{Filter, Transaction}, + Log, RichHeader, +}; + use reth_primitives::H256; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; @@ -14,6 +18,8 @@ pub enum SubscriptionResult { Log(Box), /// Transaction hash TransactionHash(H256), + /// Full Transaction + FullTransaction(Box), /// SyncStatus SyncState(PubSubSyncStatus), } @@ -49,6 +55,7 @@ impl Serialize for SubscriptionResult { SubscriptionResult::Header(ref header) => header.serialize(serializer), SubscriptionResult::Log(ref log) => log.serialize(serializer), SubscriptionResult::TransactionHash(ref hash) => hash.serialize(serializer), + SubscriptionResult::FullTransaction(ref tx) => tx.serialize(serializer), SubscriptionResult::SyncState(ref sync) => sync.serialize(serializer), } } @@ -76,10 +83,10 @@ pub enum SubscriptionKind { Logs, /// New Pending Transactions subscription. /// - /// Returns the hash for all transactions that are added to the pending state and are signed - /// with a key that is available in the node. When a transaction that was previously part of - /// the canonical chain isn't part of the new canonical chain after a reorganization its again - /// emitted. + /// Returns the hash or full tx for all transactions that are added to the pending state and + /// are signed with a key that is available in the node. When a transaction that was + /// previously part of the canonical chain isn't part of the new canonical chain after a + /// reorganization its again emitted. NewPendingTransactions, /// Node syncing status subscription. /// @@ -97,6 +104,8 @@ pub enum Params { None, /// Log parameters. Logs(Box), + /// New pending transaction parameters. + NewPendingTransactions(bool), } impl Serialize for Params { @@ -107,6 +116,7 @@ impl Serialize for Params { match self { Params::None => (&[] as &[serde_json::Value]).serialize(serializer), Params::Logs(logs) => logs.serialize(serializer), + Params::NewPendingTransactions(full) => full.serialize(serializer), } } } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index f474426037e9..aa5dd7363f54 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -3,21 +3,22 @@ use crate::eth::logs_utils; use futures::StreamExt; use jsonrpsee::{server::SubscriptionMessage, PendingSubscriptionSink, SubscriptionSink}; use reth_network_api::NetworkInfo; -use reth_primitives::TxHash; +use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_api::EthPubSubApiServer; use reth_rpc_types::FilteredParams; use std::sync::Arc; +use crate::result::invalid_params_rpc_err; use reth_rpc_types::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - Header, Log, + Header, Log, Transaction, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; use serde::Serialize; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, @@ -121,8 +122,34 @@ where pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::NewPendingTransactions => { - let stream = - pubsub.pending_transaction_stream().map(EthSubscriptionResult::TransactionHash); + if let Some(params) = params { + match params { + Params::NewPendingTransactions(true) => { + // full transaction objects requested + let stream = pubsub.full_pending_transaction_stream().map(|tx| { + EthSubscriptionResult::FullTransaction(Box::new( + Transaction::from_recovered( + tx.transaction.to_recovered_transaction(), + ), + )) + }); + return pipe_from_stream(accepted_sink, stream).await + } + Params::NewPendingTransactions(false) | Params::None => { + // only hashes requested + } + Params::Logs(_) => { + return Err(invalid_params_rpc_err( + "Invalid params for newPendingTransactions", + ) + .into()) + } + } + } + + let stream = pubsub + .pending_transaction_hashes_stream() + .map(EthSubscriptionResult::TransactionHash); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Syncing => { @@ -241,9 +268,16 @@ where Pool: TransactionPool + 'static, { /// Returns a stream that yields all transactions emitted by the txpool. - fn pending_transaction_stream(&self) -> impl Stream { + fn pending_transaction_hashes_stream(&self) -> impl Stream { ReceiverStream::new(self.pool.pending_transactions_listener()) } + + /// Returns a stream that yields all transactions emitted by the txpool. + fn full_pending_transaction_stream( + &self, + ) -> impl Stream::Transaction>> { + self.pool.new_pending_pool_transactions_listener() + } } impl EthPubSubInner From 7a1a48a8e13df8c3268d071ac68609660a6c16c5 Mon Sep 17 00:00:00 2001 From: Sanket Shanbhag Date: Mon, 10 Jul 2023 17:53:41 +0530 Subject: [PATCH 121/722] test: add payload status error serde tests (#2803) Co-authored-by: Georgios Konstantopoulos Co-authored-by: Matthias Seitz --- .../rpc/rpc-types/src/eth/engine/payload.rs | 50 ++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 7b593d802600..5b4da25aab15 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -413,7 +413,7 @@ pub enum PayloadValidationError { #[error("invalid block number")] InvalidBlockNumber, /// Thrown when a new payload contains a wrong state root - #[error("invalid merkle root (remote: {remote:?} local: {local:?})")] + #[error("invalid merkle root: (remote: {remote:?} local: {local:?})")] InvalidStateRoot { /// The state root of the payload we received from remote (CL) remote: H256, @@ -568,6 +568,54 @@ mod tests { assert_eq!(serde_json::to_string(&status).unwrap(), full); } + #[test] + fn serde_payload_status_error_deserialize() { + let s = r#"{"status":"INVALID","latestValidHash":null,"validationError":"Failed to decode block"}"#; + let q = PayloadStatus { + latest_valid_hash: None, + status: PayloadStatusEnum::Invalid { + validation_error: "Failed to decode block".to_string(), + }, + }; + assert_eq!(q, serde_json::from_str(s).unwrap()); + + let s = r#"{"status":"INVALID","latestValidHash":null,"validationError":"links to previously rejected block"}"#; + let q = PayloadStatus { + latest_valid_hash: None, + status: PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }, + }; + assert_eq!(q, serde_json::from_str(s).unwrap()); + + let s = r#"{"status":"INVALID","latestValidHash":null,"validationError":"invalid block number"}"#; + let q = PayloadStatus { + latest_valid_hash: None, + status: PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::InvalidBlockNumber.to_string(), + }, + }; + assert_eq!(q, serde_json::from_str(s).unwrap()); + + let s = r#"{"status":"INVALID","latestValidHash":null,"validationError": + "invalid merkle root: (remote: 0x3f77fb29ce67436532fee970e1add8f5cc80e8878c79b967af53b1fd92a0cab7 local: 0x603b9628dabdaadb442a3bb3d7e0360efc110e1948472909230909f1690fed17)"}"#; + let q = PayloadStatus { + latest_valid_hash: None, + status: PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::InvalidStateRoot { + remote: "0x3f77fb29ce67436532fee970e1add8f5cc80e8878c79b967af53b1fd92a0cab7" + .parse() + .unwrap(), + local: "0x603b9628dabdaadb442a3bb3d7e0360efc110e1948472909230909f1690fed17" + .parse() + .unwrap(), + } + .to_string(), + }, + }; + similar_asserts::assert_eq!(q, serde_json::from_str(s).unwrap()); + } + #[test] fn serde_roundtrip_legacy_txs_payload() { // pulled from hive tests From ab2dc70368d953dc0df8d1b87f6af1421af53e22 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 10 Jul 2023 15:47:29 +0200 Subject: [PATCH 122/722] chore(deps): bump smolstr hex-literal (#3693) --- Cargo.lock | 6 +++--- crates/net/eth-wire/Cargo.toml | 2 +- crates/rlp/Cargo.toml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53fd07832bed..fd96bf6bdbe2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5644,7 +5644,7 @@ dependencies = [ "criterion", "ethereum-types", "ethnum", - "hex-literal 0.3.4", + "hex-literal 0.4.1", "pprof", "reth-rlp", "reth-rlp-derive", @@ -6693,9 +6693,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "smol_str" -version = "0.1.24" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad6c857cbab2627dcf01ec85a623ca4e7dcb5691cbaa3d7fb7653671f0d09c9" +checksum = "74212e6bbe9a4352329b2f68ba3130c15a3f26fe88ff22dbdc6cdd58fa85e99c" dependencies = [ "serde", ] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index de43afd7130b..987f7505d9c3 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -36,7 +36,7 @@ tokio-stream = { workspace = true } pin-project = { workspace = true } tracing = { workspace = true } snap = "1.0.5" -smol_str = "0.1" +smol_str = "0.2" async-trait = { workspace = true } # arbitrary utils diff --git a/crates/rlp/Cargo.toml b/crates/rlp/Cargo.toml index 234e9a52c983..bae5cf288271 100644 --- a/crates/rlp/Cargo.toml +++ b/crates/rlp/Cargo.toml @@ -13,7 +13,7 @@ arrayvec = { version = "0.7", default-features = false } auto_impl = "1" bytes.workspace = true ethnum = { version = "1", default-features = false, optional = true } -smol_str = { version = "0.1", default-features = false, optional = true } +smol_str = { version = "0.2", default-features = false, optional = true } ethereum-types = { version = "0.14", features = ["codec"], optional = true } revm-primitives = { workspace = true, features = ["serde"] } reth-rlp-derive = { path = "./rlp-derive", optional = true } @@ -27,7 +27,7 @@ reth-rlp = { workspace = true, features = [ "smol_str", ] } criterion = "0.4.0" -hex-literal = "0.3" +hex-literal = "0.4" pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } [features] From ad2a8a5a1754321741b48381f11a7d65a3cd3470 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 10 Jul 2023 17:31:43 +0200 Subject: [PATCH 123/722] chore(deps): bump pprof criterion (#3698) --- Cargo.lock | 87 +++++++--------------------- crates/primitives/Cargo.toml | 4 +- crates/rlp/Cargo.toml | 4 +- crates/stages/Cargo.toml | 4 +- crates/storage/db/Cargo.toml | 4 +- crates/storage/libmdbx-rs/Cargo.toml | 4 +- crates/trie/Cargo.toml | 2 +- 7 files changed, 31 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd96bf6bdbe2..8c6da07c2819 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -268,17 +268,6 @@ dependencies = [ "wildmatch", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auto_impl" version = "1.1.0" @@ -852,18 +841,6 @@ dependencies = [ "libloading", ] -[[package]] -name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "bitflags 1.3.2", - "clap_lex 0.2.4", - "indexmap 1.9.3", - "textwrap", -] - [[package]] name = "clap" version = "4.1.8" @@ -872,7 +849,7 @@ checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5" dependencies = [ "bitflags 1.3.2", "clap_derive", - "clap_lex 0.3.2", + "clap_lex", "is-terminal", "once_cell", "strsim 0.10.0", @@ -892,15 +869,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.3.2" @@ -1075,9 +1043,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpp_demangle" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b446fd40bcc17eddd6a4a78f24315eb90afdb3334999ddfd4909985c47722442" +checksum = "ee34052ee3d93d6d8f3e6f81d85c47921f6653a19a7b70e939e3e602d893a674" dependencies = [ "cfg-if", ] @@ -1117,20 +1085,20 @@ dependencies = [ [[package]] name = "criterion" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", - "atty", "cast", "ciborium", - "clap 3.2.23", + "clap", "criterion-plot", "futures", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", @@ -2691,15 +2659,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.2.6" @@ -3194,14 +3153,14 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.36.11", - "windows-sys 0.45.0", + "rustix 0.37.11", + "windows-sys 0.48.0", ] [[package]] @@ -4442,9 +4401,9 @@ dependencies = [ [[package]] name = "pprof" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" +checksum = "6b90f8560ad8bd57b207b8293bc5226e48e89039a6e590c12a297d91b84c7e60" dependencies = [ "backtrace", "cfg-if", @@ -4960,7 +4919,7 @@ name = "reth" version = "0.1.0-alpha.3" dependencies = [ "backon", - "clap 4.1.8", + "clap", "comfy-table", "confy", "const-str", @@ -5327,7 +5286,7 @@ dependencies = [ "arbitrary", "async-trait", "auto_impl", - "clap 4.1.8", + "clap", "futures", "hex-literal 0.3.4", "modular-bitfield", @@ -6855,9 +6814,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "10.2.1" +version = "12.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" +checksum = "38f7afd8bcd36190409e6b71d89928f7f09d918a7aa3460d847bc49a538d672e" dependencies = [ "debugid", "memmap2", @@ -6867,9 +6826,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "10.2.1" +version = "12.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" +checksum = "ec64922563a36e3fe686b6d99f06f25dacad2a202ac7502ed642930a188fb20a" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7024,12 +6983,6 @@ dependencies = [ "test-fuzz-internal", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thin-vec" version = "0.2.12" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 12822404dde0..0f0573363d1f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -84,8 +84,8 @@ toml = "0.7.4" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 secp256k1 = { workspace = true } -criterion = "0.4.0" -pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = "0.5" +pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } [features] default = [] diff --git a/crates/rlp/Cargo.toml b/crates/rlp/Cargo.toml index bae5cf288271..653543b51b94 100644 --- a/crates/rlp/Cargo.toml +++ b/crates/rlp/Cargo.toml @@ -26,9 +26,9 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } -criterion = "0.4.0" hex-literal = "0.4" -pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = "0.5.0" +pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } [features] alloc = [] diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index bfe2b62884e1..3bfa592616da 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -63,8 +63,8 @@ rand = { workspace = true } paste = "1.0" # Stage benchmarks -pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = { version = "0.4.0", features = ["async_futures"] } +pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = { version = "0.5", features = ["async_futures"] } # io serde_json = { workspace = true } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 5b5987b31569..4141cabef87b 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -54,8 +54,8 @@ reth-interfaces = { workspace = true } tempfile = "3.3.0" test-fuzz = "4" -pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = "0.4.0" +pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = "0.5" iai = "0.1.1" tokio = { workspace = true, features = ["full"] } reth-db = { path = ".", features = ["test-utils", "bench"] } diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index bf67c037f968..669b63a54eab 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -29,8 +29,8 @@ default = [] return-borrowed = [] [dev-dependencies] -pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = "0.4" +pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = "0.5" rand = { workspace = true } rand_xorshift = "0.3" tempfile = "3" diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 1c9369413bd6..9664d3192c27 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -44,7 +44,7 @@ triehash = "0.8" proptest = "1.0" tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } tokio-stream = { workspace = true } -criterion = "0.4" +criterion = "0.5" [features] test-utils = ["triehash"] From 4b261cef45768ef2bc597e799c0ffb80e4583168 Mon Sep 17 00:00:00 2001 From: ChosunOne Date: Tue, 11 Jul 2023 01:15:40 +0900 Subject: [PATCH 124/722] style: replace next_sync_target Receiver loop with call to `wait_for` (#3618) --- crates/stages/src/stages/headers.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index a1b7e26f05a7..a56e408ab990 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -119,27 +119,25 @@ where // reverse from there. Else, it should use whatever the forkchoice state reports. let target = match next_header { Some(header) if checkpoint + 1 != header.number => SyncTarget::Gap(header), - None => self.next_sync_target(head_num).await, + None => self + .next_sync_target(head_num) + .await + .ok_or(StageError::StageCheckpoint(checkpoint))?, _ => return Err(StageError::StageCheckpoint(checkpoint)), }; Ok(SyncGap { local_head, target }) } - async fn next_sync_target(&mut self, head: BlockNumber) -> SyncTarget { + async fn next_sync_target(&mut self, head: BlockNumber) -> Option { match self.mode { HeaderSyncMode::Tip(ref mut rx) => { - loop { - let _ = rx.changed().await; // TODO: remove this await? - let tip = rx.borrow(); - if !tip.is_zero() { - return SyncTarget::Tip(*tip) - } - } + let tip = rx.wait_for(|tip| !tip.is_zero()).await.ok()?; + Some(SyncTarget::Tip(*tip)) } HeaderSyncMode::Continuous => { - tracing::trace!(target: "sync::stages::headers", head, "No next header found, using continuous sync strategy"); - SyncTarget::TipNum(head + 1) + trace!(target: "sync::stages::headers", head, "No next header found, using continuous sync strategy"); + Some(SyncTarget::TipNum(head + 1)) } } } From 139ab93586effe48834eb9ba577f47c49d2db794 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Mon, 10 Jul 2023 23:00:22 +0530 Subject: [PATCH 125/722] test: `eth_getProof` without storage proof (#3643) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/eth/account.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/account.rs b/crates/rpc/rpc-types/src/eth/account.rs index a863d1c4d35c..205e3c41f5a9 100644 --- a/crates/rpc/rpc-types/src/eth/account.rs +++ b/crates/rpc/rpc-types/src/eth/account.rs @@ -61,3 +61,26 @@ pub struct RecoveredAccount { /// matches the current chain this would be true, otherwise false. pub is_valid_for_current_chain: bool, } + +#[test] +fn test_eip_1186_account_without_storage_proof() { + let response = r#"{ + "address":"0xc36442b4a4522e871399cd717abdd847ab11fe88", + "accountProof":["0xf90211a0a3deb2d4417de23e3c64a80ab58fa1cf4b62d7f193e36e507c8cf3794477b5fba0fc7ce8769dcfa9ae8d9d9537098c5cc5477b5920ed494e856049f5783c843c50a0f7d083f1e79a4c0ba1686b97a0e27c79c3a49432d333dc3574d5879cad1ca897a0cd36cf391201df64a786187d99013bdbaf5f0da6bfb8f5f2d6f0f60504f76ad9a03a9f09c92c3cefe87840938dc15fe68a3586d3b28b0f47c7037b6413c95a9feda0decb7e1969758d401af2d1cab14c0951814c094a3da108dd9f606a96840bae2ba060bf0c44ccc3ccbb5ab674841858cc5ea16495529442061295f1cecefd436659a039f8b307e0a295d6d03df089ee8211b52c5ae510d071f17ae5734a7055858002a0508040aef23dfe9c8ab16813258d95c4e765b4a557c2987fb7f3751693f34f4fa0c07e58aa6cd257695cdf147acd800c6197c235e2b5242c22e9da5d86b169d56aa00f2e89ddd874d28e62326ba365fd4f26a86cbd9f867ec0b3de69441ef8870f4ea06c1eb5455e43a36ec41a0372bde915f889cee070b8c8b8a78173d4d7df3ccebaa0cee4848c4119ed28e165e963c5b46ffa6dbeb0b14c8c51726124e7d26ff3f27aa0fc5b82dce2ee5a1691aa92b91dbeec7b2ba94df8116ea985dd7d3f4d5b8292c0a03675e148c987494e22a9767b931611fb1b7c7c287af128ea23aa70b88a1c458ba04f269f556f0f8d9cb2a9a6de52d35cf5a9098f7bb8badb1dc1d496096236aed880", + "0xf90211a0715ed9b0b002d050084eaecb878f457a348ccd47c7a597134766a7d705303de9a0c49f0fe23b0ca61892d75aebaf7277f00fdfd2022e746bab94de5d049a96edfca0b01f9c91f2bc1373862d7936198a5d11efaf370e2b9bb1dac2134b8e256ecdafa0888395aa7e0f699bb632215f08cdf92840b01e5d8e9a61d18355098cdfd50283a0ba748d609b0018667d311527a2302267209a38b08378f7d833fdead048de0defa098878e5d1461ceddeddf62bd8277586b120b5097202aa243607bc3fc8f30fc0ba0ad4111ee1952b6db0939a384986ee3fb34e0a5fc522955588fc22e159949196fa00fc948964dff427566bad468d62b0498c59df7ca7ae799ab29555d5d829d3742a0766922a88ebc6db7dfb06b03a5b17d0773094e46e42e7f2ba6a0b8567d9f1000a0db25676c4a36591f37c5e16f7199ab16559d82a2bed8c0c6a35f528a3c166bfda0149a5d50d238722e7d44c555169ed32a7f182fcb487ea378b4410a46a63a4e66a06b2298bbfe4972113e7e18cac0a8a39792c1a940ea128218343b8f88057d90aea096b2adb84105ae2aca8a7edf937e91e40872070a8641a74891e64db94d059df0a0ddbb162125ecfbd42edad8d8ef5d5e97ca7c72f54ddc404a61ae318bad0d2108a00e9a68f3e2b0c793d5fcd607edc5c55226d53fdfacd713077d6e01cb38d00d5ba05dc099f1685b2a4b7308e063e8e7905994f5c36969b1c6bfe3780c9878a4d85c80", + "0xf90211a05fc921be4d63ee07fe47a509e1abf2d69b00b6ea582a755467bf4371c2d2bd1fa0d552faa477e95f4631e2f7247aeb58693d90b03b2eee57e3fe8a9ddbd19ee42da028682c15041aa6ced1a5306aff311f5dbb8bbf7e77615994305ab3132e7842b5a0e5e0316b5046bde22d09676210885c5bea6a71703bf3b4dbac2a7199910f54faa0527fccccef17df926ccfb608f76d3c259848ed43cd24857a59c2a9352b6f1fa4a02b3863355b927b78c80ca379a4f7165bbe1644aaefed8a0bfa2001ae6284b392a09964c73eccc3d12e44dba112e31d8bd3eacbc6a42b4f17985d5b99dff968f24ea0cc426479c7ff0573629dcb2872e57f7438a28bd112a5c3fb2241bdda8031432ba04987fe755f260c2f7218640078af5f6ac4d98c2d0c001e398debc30221b14668a0e811d046c21c6cbaee464bf55553cbf88e70c2bda6951800c75c3896fdeb8e13a04aa8d0ab4946ac86e784e29000a0842cd6eebddaf8a82ece8aa69b72c98cfff5a0dfc010051ddceeec55e4146027c0eb4c72d7c242a103bf1977033ebe00a57b5da039e4da79576281284bf46ce6ca90d47832e4aefea4846615d7a61a7b976c8e3ea0dad1dfff731f7dcf37c499f4afbd5618247289c2e8c14525534b826a13b0a5a6a025f356cbc0469cb4dc326d98479e3b756e4418a67cbbb8ffb2d1abab6b1910e9a03f4082bf1da27b2a76f6bdc930eaaaf1e3f0e4d3135c2a9fb85e301f47f5174d80", + "0xf90211a0df6448f21c4e19da33f9c64c90bbcc02a499866d344c73576f63e3b4cbd4c000a010efb3b0f1d6365e2e4a389965e114e2a508ef8901f7d6c7564ba88793ff974aa0295bef2313a4f603614a5d5af3c659f63edfaa5b59a6ea2ac1da05f69ff4657ba0d8f16d5ddf4ba09616008148d2993dc50658accc2edf9111b6f464112db5d369a084604d9e06ddb53aeb7b13bb70fbe91f60df6bdc30f59bc7dc57ff37b6fe3325a04c64bd1dbeaecc54f18b23ab1ade2200970757f437e75e285f79a8c405315a14a0868075fc7f73b13863fc653c806f9a20f8e52dce44c15d2c4f94d6711021b985a01e85c49da7a8c91068468779e79b267d93d4fad01f44183353a381207304723ea05fcf186d55c53413f6988b16aa34721f0539f1cf0917f02e9d1a6ec8d3e191ffa00ad581842eab665351913e0afb3bfc070b9e4fad4d354c073f44c4f2a0c425c9a0000cb2066d81bf07f80703a40a5c5012e2c4b387bc53d381d37ee1d0f0a6643ba061f221d01c98721e79c525af5fc2eb9cc648c2ca54bb70520b868e2bdc037967a0e580f297c477df46362eb8e20371d8f0528091454bb5ad00d40368ca3ffdbd1fa079a13d35f79699f9e51d4fa07d03cd9b9dec4de9906559c0470629a663181652a0dbb402183633dbaa73e6e6a6b66bfffc4570763b264d3a702de165032298b858a065d5321015531309bb3abe0235f825d5be4270d2e511dca3b984d1e70ef308d880", + "0xf90211a06d0adafe89896724704275a42a8a63f0910dce83188add0073f621b8ca1167aaa00de7d4efad36d08f5a0320cdfd964484eba803d9933efae12c292d3ff2d06a20a083341fc12fffccf4b11df314b14f7bcead154525a097493fdf15dde4ec0c0d2aa088b7759fe3aef617828e7abd9e554add2e84ef3e2e024b1a0e2f537fce7d37f9a01e73c28722d825063304c6b51be3a8c7b6312ba8be4c6e99602e623993c014c0a0e50fbe12ddbaf184f3ba0cda971675a55abbf44c73f771bc5824b393262e5255a0b1a937d4c50528cb6aeb80aa5fe83bcfa8c294124a086302caf42cead1f99f96a04c4376b13859af218b5b09ffb33e3465288837c37fa254a46f8d0e75afecae10a0f158c0171bdb454eab6bb6dc5e276e749b6aa550f53b497492c0a392425035c3a0ac496050db1fbb1d34180ee7fd7bed18efa4cf43299390a72dcf530cc3422630a02cacb30ac3b4bab293d31833be4865cd1d1de8db8630edac4af056979cc903aea090cbb538f0f4601289db4cf49485ab3a178044daeae325c525bc3978714a7219a0542021427adbe890896fcc888418a747a555b2a7121fe3c683e07dcf5012e96ca006569c5e3715f52f62dd856dec2136e60c49bbadc1cf9fb625930da3e8f1c16ea0a2539ebb66a2c10c3809626181a2389f043e0b54867cd356eb5f20daaeb521b4a0ab49972dced10010275f2604e6182722dbc426ca1b0ae128defe80c0baefd3c080", + "0xf90211a006c1d8a7c5deeb435ea0b080aea8b7acb58d2d898e12e3560d399594a77863a1a088105243bc96e1f10baa73d670929a834c51eb7f695cf43f4fab94e73c9a5b8da0fce3a21f09b62d65607bbdabb8d675d58a5f3bfb19ae46510a4ea2205070aa03a0039ae7a999ed83bfdb49b6df7074589059ba6c2eed22bfc6dac8ff5241c71bd7a09feca6f7331b6c147f4fd7bd94de496144b85543d868f47be6345330b3f8ccd3a00e55c30d16438567979c92d387a2b99e51a4026192ccfda2ac87a190c3aee511a0a86c5bb52651e490203c63670b569b2337e838e4d80d455cc83e64571e2552f1a0cfb31ae59b691c15ffd97658bab646ff4b90dbc72a81ec52731b3fbd38d0dd5ba0d83936fc4143cc885be5fa420ef22fb97f6a8dd24e9ece9af965792565a7b2c8a0abb179481f4b29578adb8768aa4f6ba6ed6bd43c7572d7c3405c879a362f1ab1a0506651daa07d44901dfd76c12d302b2242e5ceac385f95ea928f20a0336eccf6a010e8a7f461231438987fb26adc4c5004721dc401dc2b77e9b79d26b1308d0079a09174afa82e6d27dfdde74f556d0e782ae6222dc66104d84ea0f1e21e093578c4a0391e24ed0033cc58f149af753b485de3c8b9e4b3c8e145c308db60e51cabbefca03b0991359019197dd53e3798e55a14c8795d655b0693efd37404cf8f8d979cfba0594d95bbfe8e2ea5040b571010549a233bc33bf959792e1e41c515c65abac14480", + "0xf90151a0e8ed81735d358657020dd6bc4bc58cf751cc037fa57e1d0c668bf24049e720d280a03e8bf7abdd8a4190a0ee5f92a78bf1dba529312ed66dd7ead7c9be55c81a2db480a006312425a007cda585740355f52db74d0ae43c21d562c599112546e3ffe22f01a023bbbb0ffb33c7a5477ab514c0f4f3c94ba1748a5ea1dc3edc7c4b5330cd70fe80a03ed45ab6045a10fa00b2fba662914f4dedbf3f3a5f2ce1e6e53a12ee3ea21235a01e02c98684cea92a7c0b04a01658530a09d268b395840a66263923e44b93d2b5a0a585db4a911fe6452a4540bf7dc143981ca31035ccb2c51d02eccd021a6163a480a06032919dcb44e22852b6367473bbc3f43311226ac28991a90b9c9da669f9e08a80a0146aee58a46c30bc84f6e99cd76bf29b3bd238053102679498a3ea15d4ff6d53a04cf57cfdc046c135004b9579059c84b2d902a51fb6feaed51ea272f0ca1cdc648080", + "0xf871a059ce2e1f470580853d88511bf8672f9ffaefadd80bc07b2e3d5a18c3d7812007a0867e978faf3461d2238ccf8d6a138406cb6d8bd36dfa60caddb62af14447a6f880808080a0fc6209fdaa57d224ee35f73e96469a7f95760a54d5de3da07953430b001aee6980808080808080808080", + "0xf8669d20852b2b985cd8c252fddae2acb4f798d0fecdcb1e2da53726332eb559b846f8440180a079fe22fe88fc4b45db10ce94d975e02e8a42b57dc190f8ae15e321f72bbc08eaa0692e658b31cbe3407682854806658d315d61a58c7e4933a2f91d383dc00736c6"], + "balance":"0x0", + "codeHash":"0x692e658b31cbe3407682854806658d315d61a58c7e4933a2f91d383dc00736c6", + "nonce":"0x1", + "storageHash":"0x79fe22fe88fc4b45db10ce94d975e02e8a42b57dc190f8ae15e321f72bbc08ea", + "storageProof":[] + }"#; + let val = serde_json::from_str::(response).unwrap(); + serde_json::to_value(val).unwrap(); +} From 2bc5e19edb49bd9a4a8c441f65958ae99cc01d02 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 10 Jul 2023 20:45:21 +0100 Subject: [PATCH 126/722] chore(prometheus): add host.docker.internal:9001 scrape target (#3689) --- etc/prometheus/prometheus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/prometheus/prometheus.yml b/etc/prometheus/prometheus.yml index 483dcaa050e5..4906c3979671 100644 --- a/etc/prometheus/prometheus.yml +++ b/etc/prometheus/prometheus.yml @@ -3,7 +3,7 @@ scrape_configs: metrics_path: "/" scrape_interval: 5s static_configs: - - targets: ['reth:9001', 'localhost:9001'] + - targets: ['reth:9001', 'localhost:9001', 'host.docker.internal:9001'] - job_name: ethereum-metrics-exporter metrics_path: "/metrics" scrape_interval: 5s From 467f6f919995a6f74ff4460cbece584ba5af1935 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 02:55:09 +0200 Subject: [PATCH 127/722] fix: check if value is bool (#3708) --- crates/rpc/rpc-types/src/eth/pubsub.rs | 10 +++++++--- crates/rpc/rpc/src/eth/pubsub.rs | 4 ++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/pubsub.rs b/crates/rpc/rpc-types/src/eth/pubsub.rs index f027037c176a..e363cb754c39 100644 --- a/crates/rpc/rpc-types/src/eth/pubsub.rs +++ b/crates/rpc/rpc-types/src/eth/pubsub.rs @@ -104,8 +104,8 @@ pub enum Params { None, /// Log parameters. Logs(Box), - /// New pending transaction parameters. - NewPendingTransactions(bool), + /// Boolean parameter for new pending transactions. + Bool(bool), } impl Serialize for Params { @@ -116,7 +116,7 @@ impl Serialize for Params { match self { Params::None => (&[] as &[serde_json::Value]).serialize(serializer), Params::Logs(logs) => logs.serialize(serializer), - Params::NewPendingTransactions(full) => full.serialize(serializer), + Params::Bool(full) => full.serialize(serializer), } } } @@ -132,6 +132,10 @@ impl<'a> Deserialize<'a> for Params { return Ok(Params::None) } + if let Some(val) = v.as_bool() { + return Ok(Params::Bool(val)) + } + serde_json::from_value(v) .map(|f| Params::Logs(Box::new(f))) .map_err(|e| D::Error::custom(format!("Invalid Pub-Sub parameters: {e}"))) diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index aa5dd7363f54..a66930011369 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -124,7 +124,7 @@ where SubscriptionKind::NewPendingTransactions => { if let Some(params) = params { match params { - Params::NewPendingTransactions(true) => { + Params::Bool(true) => { // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().map(|tx| { EthSubscriptionResult::FullTransaction(Box::new( @@ -135,7 +135,7 @@ where }); return pipe_from_stream(accepted_sink, stream).await } - Params::NewPendingTransactions(false) | Params::None => { + Params::Bool(false) | Params::None => { // only hashes requested } Params::Logs(_) => { From 65b07b981e8ffa0e7cdcb39dd5eb6438c1750a01 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 11 Jul 2023 11:51:34 +0100 Subject: [PATCH 128/722] feat(pruning): prune `Receipts` during pipeline (#3585) --- Cargo.lock | 1 + bin/reth/src/chain/import.rs | 4 +- bin/reth/src/debug_cmd/execution.rs | 1 + bin/reth/src/debug_cmd/merkle.rs | 3 +- bin/reth/src/init.rs | 2 +- bin/reth/src/node/mod.rs | 1 + bin/reth/src/stage/dump/merkle.rs | 3 +- bin/reth/src/stage/run.rs | 1 + crates/config/src/config.rs | 30 +---- crates/primitives/Cargo.toml | 1 + crates/primitives/src/lib.rs | 2 +- crates/primitives/src/prune/mod.rs | 2 + crates/primitives/src/prune/mode.rs | 2 +- crates/primitives/src/prune/target.rs | 82 ++++++++++++++ crates/stages/src/stage.rs | 30 +---- crates/stages/src/stages/execution.rs | 104 +++++++++++++++++- crates/storage/provider/src/post_state/mod.rs | 56 +++++++--- .../src/providers/database/provider.rs | 2 +- 18 files changed, 241 insertions(+), 86 deletions(-) create mode 100644 crates/primitives/src/prune/target.rs diff --git a/Cargo.lock b/Cargo.lock index 8c6da07c2819..e8b62e26467c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5504,6 +5504,7 @@ dependencies = [ "impl-serde", "modular-bitfield", "once_cell", + "paste", "plain_hasher", "pprof", "proptest", diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 38bed8996f81..e08f7164b585 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -159,10 +159,11 @@ impl ImportCommand { let (tip_tx, tip_rx) = watch::channel(H256::zero()); let factory = reth_revm::Factory::new(self.chain.clone()); + let max_block = file_client.max_block().unwrap_or(0); let mut pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(file_client.max_block().unwrap_or(0)) + .with_max_block(max_block) .add_stages( DefaultStages::new( HeaderSyncMode::Tip(tip_rx), @@ -184,6 +185,7 @@ impl ImportCommand { max_blocks: config.stages.execution.max_blocks, max_changes: config.stages.execution.max_changes, }, + config.prune.map(|prune| prune.parts).unwrap_or_default(), )), ) .build(db, self.chain.clone()); diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 7a6f61334626..88481b1cc147 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -142,6 +142,7 @@ impl Command { .set(ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: None, max_changes: None }, + config.prune.map(|prune| prune.parts).unwrap_or_default(), )), ) .build(db, self.chain.clone()); diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 1188dbfcb675..e672ec3e027e 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -8,7 +8,7 @@ use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx}; use reth_primitives::{ fs, stage::{StageCheckpoint, StageId}, - ChainSpec, + ChainSpec, PruneTargets, }; use reth_provider::{ProviderFactory, StageCheckpointReader}; use reth_stages::{ @@ -96,6 +96,7 @@ impl Command { let mut execution_stage = ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(1), max_changes: None }, + PruneTargets::all(), ); let mut account_hashing_stage = AccountHashingStage::default(); diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 30373b3100be..10b4fa14d883 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -109,7 +109,7 @@ pub fn insert_genesis_state( state.change_storage(0, *address, storage_changes); } } - state.write_to_db(tx)?; + state.write_to_db(tx, 0)?; Ok(()) } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index b7bf59466be0..529d67713ac7 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -719,6 +719,7 @@ impl Command { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, }, + config.prune.map(|prune| prune.parts).unwrap_or_default(), ) .with_metrics_tx(metrics_tx), ) diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 601abd569cf3..dd5c8f75809f 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -2,7 +2,7 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; -use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec}; +use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec, PruneTargets}; use reth_provider::ProviderFactory; use reth_stages::{ stages::{ @@ -70,6 +70,7 @@ async fn unwind_and_copy( let mut exec_stage = ExecutionStage::new( reth_revm::Factory::new(db_tool.chain.clone()), ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None }, + PruneTargets::all(), ); exec_stage diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 23166e595a28..f53412c3dbbf 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -202,6 +202,7 @@ impl Command { max_blocks: Some(batch_size), max_changes: None, }, + config.prune.map(|prune| prune.parts).unwrap_or_default(), )), None, ) diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 40836b8c02c5..b622ed47ee3d 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -5,7 +5,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; -use reth_primitives::{serde_helper::deserialize_opt_prune_mode_with_min_distance, PruneMode}; +use reth_primitives::PruneTargets; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -285,39 +285,15 @@ pub struct PruneConfig { /// Minimum pruning interval measured in blocks. pub block_interval: u64, /// Pruning configuration for every part of the data that can be pruned. - pub parts: PruneParts, + pub parts: PruneTargets, } impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 10, parts: PruneParts::default() } + Self { block_interval: 10, parts: PruneTargets::default() } } } -/// Pruning configuration for every part of the data that can be pruned. -#[derive(Debug, Clone, Default, Copy, Deserialize, PartialEq, Serialize)] -#[serde(default)] -pub struct PruneParts { - /// Sender Recovery pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] - pub sender_recovery: Option, - /// Transaction Lookup pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] - pub transaction_lookup: Option, - /// Receipts pruning configuration. - #[serde( - skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_distance::<64, _>" - )] - pub receipts: Option, - /// Account History pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] - pub account_history: Option, - /// Storage History pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] - pub storage_history: Option, -} - #[cfg(test)] mod tests { use super::Config; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 0f0573363d1f..34039aa85448 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -56,6 +56,7 @@ url = "2.3" impl-serde = "0.4.0" once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } +paste = "1.0" # proof related triehash = "0.8" diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a76bcba902bf..6e96a36b8db5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -78,7 +78,7 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; -pub use prune::{PruneCheckpoint, PruneMode}; +pub use prune::{PruneCheckpoint, PruneMode, PruneTargets}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 2814f8bc4b71..18cb943fb975 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -1,5 +1,7 @@ mod checkpoint; mod mode; +mod target; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; +pub use target::PruneTargets; diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index 47e6778e8018..3fba10fe59af 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -8,7 +8,7 @@ use reth_codecs::{main_codec, Compact}; pub enum PruneMode { /// Prune all blocks. Full, - /// Prune blocks before the `head-N` block number. In other words, keep last N blocks. + /// Prune blocks before the `head-N` block number. In other words, keep last N + 1 blocks. Distance(u64), /// Prune blocks before the specified block number. The specified block number is not pruned. Before(BlockNumber), diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs new file mode 100644 index 000000000000..987d0883a30a --- /dev/null +++ b/crates/primitives/src/prune/target.rs @@ -0,0 +1,82 @@ +use crate::{serde_helper::deserialize_opt_prune_mode_with_min_distance, BlockNumber, PruneMode}; +use paste::paste; +use serde::{Deserialize, Serialize}; + +/// Pruning configuration for every part of the data that can be pruned. +#[derive(Debug, Clone, Default, Copy, Deserialize, Eq, PartialEq, Serialize)] +#[serde(default)] +pub struct PruneTargets { + /// Sender Recovery pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub sender_recovery: Option, + /// Transaction Lookup pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub transaction_lookup: Option, + /// Receipts pruning configuration. + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_distance::<64, _>" + )] + pub receipts: Option, + /// Account History pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub account_history: Option, + /// Storage History pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub storage_history: Option, +} + +macro_rules! should_prune_method { + ($($config:ident),+) => { + $( + paste! { + #[allow(missing_docs)] + pub fn [](&self, block: BlockNumber, tip: BlockNumber) -> bool { + if let Some(config) = &self.$config { + return self.should_prune(config, block, tip) + } + false + } + } + )+ + + /// Sets pruning to all targets. + pub fn all() -> Self { + PruneTargets { + $( + $config: Some(PruneMode::Full), + )+ + } + } + + }; +} + +impl PruneTargets { + /// Sets pruning to no target. + pub fn none() -> Self { + PruneTargets::default() + } + + /// Check if target block should be pruned + pub fn should_prune(&self, target: &PruneMode, block: BlockNumber, tip: BlockNumber) -> bool { + match target { + PruneMode::Full => true, + PruneMode::Distance(distance) => { + if *distance > tip { + return false + } + block < tip - *distance + } + PruneMode::Before(n) => *n > block, + } + } + + should_prune_method!( + sender_recovery, + transaction_lookup, + receipts, + account_history, + storage_history + ); +} diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 71c5f1ad8c26..7580c6bab422 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, - BlockNumber, PruneMode, TxNumber, + BlockNumber, TxNumber, }; use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError}; use std::{ @@ -194,31 +194,3 @@ pub trait Stage: Send + Sync { input: UnwindInput, ) -> Result; } - -/// Prune target. -#[derive(Debug, Clone, Copy)] -pub enum PruneTarget { - /// Prune all blocks, i.e. not save any data. - All, - /// Prune blocks up to the specified block number, inclusive. - Block(BlockNumber), -} - -impl PruneTarget { - /// Returns new target to prune towards, according to stage prune mode [PruneMode] - /// and current head [BlockNumber]. - pub fn new(prune_mode: PruneMode, head: BlockNumber) -> Self { - match prune_mode { - PruneMode::Full => PruneTarget::All, - PruneMode::Distance(distance) => { - Self::Block(head.saturating_sub(distance).saturating_sub(1)) - } - PruneMode::Before(before_block) => Self::Block(before_block.saturating_sub(1)), - } - } - - /// Returns true if the target is [PruneTarget::All], i.e. prune all blocks. - pub fn is_all(&self) -> bool { - matches!(self, Self::All) - } -} diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 38284b1d125d..70d07f8d736b 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -14,7 +14,7 @@ use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, }, - BlockNumber, Header, U256, + BlockNumber, Header, PruneTargets, U256, }; use reth_provider::{ post_state::PostState, BlockExecutor, BlockReader, DatabaseProviderRW, ExecutorFactory, @@ -59,19 +59,25 @@ pub struct ExecutionStage { executor_factory: EF, /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, + /// Pruning configuration. + prune_targets: PruneTargets, } impl ExecutionStage { /// Create new execution stage with specified config. - pub fn new(executor_factory: EF, thresholds: ExecutionStageThresholds) -> Self { - Self { metrics_tx: None, executor_factory, thresholds } + pub fn new( + executor_factory: EF, + thresholds: ExecutionStageThresholds, + prune_targets: PruneTargets, + ) -> Self { + Self { metrics_tx: None, executor_factory, thresholds, prune_targets } } /// Create an execution stage with the provided executor factory. /// /// The commit threshold will be set to 10_000. pub fn new_with_factory(executor_factory: EF) -> Self { - Self::new(executor_factory, ExecutionStageThresholds::default()) + Self::new(executor_factory, ExecutionStageThresholds::default(), PruneTargets::default()) } /// Set the metric events sender. @@ -104,6 +110,8 @@ impl ExecutionStage { // Execute block range let mut state = PostState::default(); + state.add_prune_targets(self.prune_targets); + for block_number in start_block..=max_block { let td = provider .header_td_by_number(block_number)? @@ -145,7 +153,7 @@ impl ExecutionStage { // Write remaining changes trace!(target: "sync::stages::execution", accounts = state.accounts().len(), "Writing updated state to database"); let start = Instant::now(); - state.write_to_db(provider.tx_ref())?; + state.write_to_db(provider.tx_ref(), max_block)?; trace!(target: "sync::stages::execution", took = ?start.elapsed(), "Wrote state"); let done = stage_progress == max_block; @@ -417,7 +425,8 @@ mod tests { use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db}; use reth_primitives::{ hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode, - ChainSpecBuilder, SealedBlock, StorageEntry, H160, H256, MAINNET, U256, + ChainSpecBuilder, PruneMode, PruneTargets, SealedBlock, StorageEntry, H160, H256, MAINNET, + U256, }; use reth_provider::{AccountReader, BlockWriter, ProviderFactory, ReceiptProvider}; use reth_revm::Factory; @@ -430,6 +439,7 @@ mod tests { ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + PruneTargets::none(), ) } @@ -884,4 +894,86 @@ mod tests { ] ); } + + #[tokio::test] + async fn test_prune() { + let test_tx = TestTransaction::default(); + let factory = Arc::new(ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone())); + + let provider = factory.provider_rw().unwrap(); + let input = ExecInput { + target: Some(1), + /// The progress of this stage the last time it was executed. + checkpoint: None, + }; + let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); + let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); + let block = SealedBlock::decode(&mut block_rlp).unwrap(); + provider.insert_block(genesis, None).unwrap(); + provider.insert_block(block.clone(), None).unwrap(); + provider.commit().unwrap(); + + // insert pre state + let provider = factory.provider_rw().unwrap(); + let code = hex!("5a465a905090036002900360015500"); + let code_hash = keccak256(hex!("5a465a905090036002900360015500")); + provider + .tx_ref() + .put::( + H160(hex!("1000000000000000000000000000000000000000")), + Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }, + ) + .unwrap(); + provider + .tx_ref() + .put::( + H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")), + Account { + nonce: 0, + balance: U256::from(0x3635c9adc5dea00000u128), + bytecode_hash: None, + }, + ) + .unwrap(); + provider + .tx_ref() + .put::(code_hash, Bytecode::new_raw(code.to_vec().into())) + .unwrap(); + provider.commit().unwrap(); + + let check_pruning = |factory: Arc>, + prune_targets: PruneTargets, + expect_num_receipts: usize| async move { + let provider = factory.provider_rw().unwrap(); + + let mut execution_stage = ExecutionStage::new( + Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), + ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + prune_targets, + ); + + execution_stage.execute(&provider, input).await.unwrap(); + assert_eq!( + provider.receipts_by_block(1.into()).unwrap().unwrap().len(), + expect_num_receipts + ); + }; + + let mut prune = PruneTargets::none(); + + check_pruning(factory.clone(), prune, 1).await; + + prune.receipts = Some(PruneMode::Full); + check_pruning(factory.clone(), prune, 0).await; + + prune.receipts = Some(PruneMode::Before(1)); + check_pruning(factory.clone(), prune, 1).await; + + prune.receipts = Some(PruneMode::Before(2)); + check_pruning(factory.clone(), prune, 0).await; + + prune.receipts = Some(PruneMode::Distance(0)); + check_pruning(factory.clone(), prune, 1).await; + } } diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 6fb6b02989f4..9e8aa189ef1a 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -8,7 +8,7 @@ use reth_db::{ }; use reth_primitives::{ bloom::logs_bloom, keccak256, proofs::calculate_receipt_root_ref, Account, Address, - BlockNumber, Bloom, Bytecode, Log, Receipt, StorageEntry, H256, U256, + BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneTargets, Receipt, StorageEntry, H256, U256, }; use reth_trie::{ hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, @@ -78,6 +78,8 @@ pub struct PostState { bytecode: BTreeMap, /// The receipt(s) of the executed transaction(s). receipts: BTreeMap>, + /// Pruning configuration. + prune_targets: PruneTargets, } impl PostState { @@ -91,6 +93,11 @@ impl PostState { Self { receipts: BTreeMap::from([(block, Vec::with_capacity(txs))]), ..Default::default() } } + /// Add a pruning configuration. + pub fn add_prune_targets(&mut self, prune_targets: PruneTargets) { + self.prune_targets = prune_targets; + } + /// Return the current size of the poststate. /// /// Size is the sum of individual changes to accounts, storage, bytecode and receipts. @@ -319,6 +326,7 @@ impl PostState { } self.receipts.extend(other.receipts); + self.bytecode.extend(other.bytecode); } @@ -579,7 +587,11 @@ impl PostState { } /// Write the post state to the database. - pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>(mut self, tx: &TX) -> Result<(), DbError> { + pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( + mut self, + tx: &TX, + tip: BlockNumber, + ) -> Result<(), DbError> { self.write_history_to_db(tx)?; // Write new storage state @@ -630,16 +642,24 @@ impl PostState { bytecodes_cursor.upsert(hash, bytecode)?; } - // Write the receipts of the transactions + // Write the receipts of the transactions if not pruned tracing::trace!(target: "provider::post_state", len = self.receipts.len(), "Writing receipts"); - let mut bodies_cursor = tx.cursor_read::()?; - let mut receipts_cursor = tx.cursor_write::()?; - for (block, receipts) in self.receipts { - let (_, body_indices) = bodies_cursor.seek_exact(block)?.expect("body indices exist"); - let tx_range = body_indices.tx_num_range(); - assert_eq!(receipts.len(), tx_range.clone().count(), "Receipt length mismatch"); - for (tx_num, receipt) in tx_range.zip(receipts) { - receipts_cursor.append(tx_num, receipt)?; + if !self.receipts.is_empty() && self.prune_targets.receipts != Some(PruneMode::Full) { + let mut bodies_cursor = tx.cursor_read::()?; + let mut receipts_cursor = tx.cursor_write::()?; + + for (block, receipts) in self.receipts { + if self.prune_targets.should_prune_receipts(block, tip) { + continue + } + + let (_, body_indices) = + bodies_cursor.seek_exact(block)?.expect("body indices exist"); + let tx_range = body_indices.tx_num_range(); + assert_eq!(receipts.len(), tx_range.clone().count(), "Receipt length mismatch"); + for (tx_num, receipt) in tx_range.zip(receipts) { + receipts_cursor.append(tx_num, receipt)?; + } } } @@ -1091,7 +1111,7 @@ mod tests { post_state.create_account(1, address_a, account_a); // 0x11.. is changed (balance + 1, nonce + 1) post_state.change_account(1, address_b, account_b, account_b_changed); - post_state.write_to_db(provider.tx_ref()).expect("Could not write post state to DB"); + post_state.write_to_db(provider.tx_ref(), 0).expect("Could not write post state to DB"); // Check plain state assert_eq!( @@ -1124,7 +1144,9 @@ mod tests { let mut post_state = PostState::new(); // 0x11.. is destroyed post_state.destroy_account(2, address_b, account_b_changed); - post_state.write_to_db(provider.tx_ref()).expect("Could not write second post state to DB"); + post_state + .write_to_db(provider.tx_ref(), 0) + .expect("Could not write second post state to DB"); // Check new plain state for account B assert_eq!( @@ -1163,7 +1185,7 @@ mod tests { post_state.change_storage(1, address_a, storage_a_changeset); post_state.change_storage(1, address_b, storage_b_changeset); - post_state.write_to_db(&tx).expect("Could not write post state to DB"); + post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); // Check plain storage state let mut storage_cursor = tx @@ -1246,7 +1268,7 @@ mod tests { // Delete account A let mut post_state = PostState::new(); post_state.destroy_account(2, address_a, Account::default()); - post_state.write_to_db(&tx).expect("Could not write post state to DB"); + post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); assert_eq!( storage_cursor.seek_exact(address_a).unwrap(), @@ -1296,7 +1318,7 @@ mod tests { (U256::from(1), (U256::ZERO, U256::from(2))), ]), ); - init_state.write_to_db(&tx).expect("Could not write init state to DB"); + init_state.write_to_db(&tx, 0).expect("Could not write init state to DB"); let mut post_state = PostState::new(); post_state.change_storage( @@ -1339,7 +1361,7 @@ mod tests { BTreeMap::from([(U256::from(0), (U256::ZERO, U256::from(9)))]), ); - post_state.write_to_db(&tx).expect("Could not write post state to DB"); + post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); let mut storage_changeset_cursor = tx .cursor_dup_read::() diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 5650b85e3dde..2e0fe5a4b9ff 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1774,7 +1774,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - state.write_to_db(self.tx_ref())?; + state.write_to_db(self.tx_ref(), new_tip_number)?; self.insert_hashes(first_number..=last_block_number, last_block_hash, expected_state_root)?; From b07dfb55259db022455e267900a02e599869b78b Mon Sep 17 00:00:00 2001 From: Josh Stevens Date: Tue, 11 Jul 2023 12:17:46 +0100 Subject: [PATCH 129/722] fix: transaction calls on the reth-provider should not generate a hash by default (#3675) --- crates/storage/provider/src/providers/database/mod.rs | 4 ++++ crates/storage/provider/src/providers/database/provider.rs | 4 ++++ crates/storage/provider/src/providers/mod.rs | 4 ++++ crates/storage/provider/src/test_utils/mock.rs | 7 ++++++- crates/storage/provider/src/test_utils/noop.rs | 7 ++++++- crates/storage/provider/src/traits/transactions.rs | 5 ++++- 6 files changed, 28 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 686257c10368..fde0ff2865c1 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -228,6 +228,10 @@ impl TransactionsProvider for ProviderFactory { self.provider()?.transaction_by_id(id) } + fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result> { + self.provider()?.transaction_by_id_no_hash(id) + } + fn transaction_by_hash(&self, hash: TxHash) -> Result> { self.provider()?.transaction_by_hash(hash) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2e0fe5a4b9ff..28a00f45e20a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -934,6 +934,10 @@ impl<'this, TX: DbTx<'this>> TransactionsProvider for DatabaseProvider<'this, TX Ok(self.tx.get::(id)?.map(Into::into)) } + fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result> { + Ok(self.tx.get::(id)?.map(Into::into)) + } + fn transaction_by_hash(&self, hash: TxHash) -> Result> { if let Some(id) = self.transaction_id(hash)? { Ok(self.transaction_by_id(id)?) diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 7dfeae75f21d..81c123323fa4 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -271,6 +271,10 @@ where self.database.provider()?.transaction_by_id(id) } + fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result> { + self.database.provider()?.transaction_by_id_no_hash(id) + } + fn transaction_by_hash(&self, hash: TxHash) -> Result> { self.database.provider()?.transaction_by_hash(hash) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 98a7ee7f1731..d679a7ed5446 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -11,7 +11,8 @@ use reth_interfaces::{provider::ProviderError, Result}; use reth_primitives::{ keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, Header, Receipt, SealedBlock, SealedHeader, - StorageKey, StorageValue, TransactionMeta, TransactionSigned, TxHash, TxNumber, H256, U256, + StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, + TxNumber, H256, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; use std::{ @@ -168,6 +169,10 @@ impl TransactionsProvider for MockEthProvider { Ok(None) } + fn transaction_by_id_no_hash(&self, _id: TxNumber) -> Result> { + Ok(None) + } + fn transaction_by_hash(&self, hash: TxHash) -> Result> { Ok(self .blocks diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index b330697b8f84..8fa349b11b5b 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -11,7 +11,8 @@ use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, StorageKey, StorageValue, - TransactionMeta, TransactionSigned, TxHash, TxNumber, H256, KECCAK_EMPTY, MAINNET, U256, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, H256, + KECCAK_EMPTY, MAINNET, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; use std::{ops::RangeBounds, sync::Arc}; @@ -130,6 +131,10 @@ impl TransactionsProvider for NoopProvider { Ok(None) } + fn transaction_by_id_no_hash(&self, _id: TxNumber) -> Result> { + Ok(None) + } + fn transaction_by_hash(&self, _hash: TxHash) -> Result> { Ok(None) } diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 6bbd17454443..d9da859534d8 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -15,9 +15,12 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// Returns None if the transaction is not found. fn transaction_id(&self, tx_hash: TxHash) -> Result>; - /// Get transaction by id. + /// Get transaction by id, computes hash everytime so more expensive. fn transaction_by_id(&self, id: TxNumber) -> Result>; + /// Get transaction by id without computing the hash. + fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result>; + /// Get transaction by transaction hash. fn transaction_by_hash(&self, hash: TxHash) -> Result>; From a39ee4a23199575a44b978e5c435378293b78427 Mon Sep 17 00:00:00 2001 From: fomotrader <82184770+fomotrader@users.noreply.github.com> Date: Tue, 11 Jul 2023 09:05:58 -0400 Subject: [PATCH 130/722] docs: how to enable JSON-RPC endpoints for http/ws (#3711) --- book/jsonrpc/intro.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/book/jsonrpc/intro.md b/book/jsonrpc/intro.md index 4f3e72bb3e18..1c602f6d2b5b 100644 --- a/book/jsonrpc/intro.md +++ b/book/jsonrpc/intro.md @@ -58,6 +58,16 @@ To enable JSON-RPC namespaces on the HTTP server, pass each namespace separated reth node --http --http.api eth,net,trace ``` +You can pass the `all` option, which is a convenient wrapper for the all the JSON-RPC namespaces `admin,debug,eth,net,trace,txpool,web3,rpc` on the HTTP server: + +```bash +reth node --http --http.api all +``` + +```bash +reth node --http --http.api All +``` + You can also restrict who can access the HTTP server by specifying a domain for Cross-Origin requests. This is important, since any application local to your node will be able to access the RPC server: ```bash From 8f2e0cd7f0569865ca7508c568942256c58c00e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 15:56:24 +0200 Subject: [PATCH 131/722] feat: add txpool maintain metrics (#3715) --- crates/transaction-pool/src/maintain.rs | 16 +++++++++++----- crates/transaction-pool/src/metrics.rs | 23 +++++++++++++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 0c619f6d2bc6..a55340ed55ab 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,6 +1,7 @@ //! Support for maintaining the state of the transaction pool use crate::{ + metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, ChangedAccount, TransactionPoolExt}, BlockInfo, TransactionPool, }; @@ -45,6 +46,7 @@ where P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, { + let mut metrics = MaintainPoolMetrics::default(); // ensure the pool points to latest state if let Ok(Some(latest)) = client.block_by_number_or_tag(BlockNumberOrTag::Latest) { let latest = latest.seal_slow(); @@ -63,7 +65,11 @@ where let mut maintained_state = MaintainedPoolState::InSync; // Listen for new chain events and derive the update action for the pool - while let Some(event) = events.next().await { + loop { + metrics.set_dirty_accounts_len(dirty_addresses.len()); + + let Some(event) = events.next().await else { break }; + let pool_info = pool.block_info(); // TODO from time to time re-check the unique accounts in the pool and remove and resync @@ -136,7 +142,7 @@ where .filter(|tx| !new_mined_transactions.contains(&tx.hash)) .filter_map(|tx| tx.clone().into_ecrecovered()) .map(

::Transaction::from_recovered_transaction) - .collect(); + .collect::>(); // update the pool first let update = CanonicalStateUpdate { @@ -153,8 +159,8 @@ where // to be re-injected // // Note: we no longer know if the tx was local or external + metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; - // TODO: metrics } CanonStateNotification::Revert { old } => { // this similar to the inverse of a commit where we need to insert the transactions @@ -193,13 +199,13 @@ where .transactions() .filter_map(|tx| tx.clone().into_ecrecovered()) .map(

::Transaction::from_recovered_transaction) - .collect(); + .collect::>(); // all transactions that were mined in the old chain need to be re-injected // // Note: we no longer know if the tx was local or external + metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; - // TODO: metrics } CanonStateNotification::Commit { new } => { let (blocks, state) = new.inner(); diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 91e31992bf2f..5be12634e574 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -34,3 +34,26 @@ pub struct TxPoolMetrics { /// Number of all transactions of all sub-pools: pending + basefee + queued pub(crate) total_transactions: Gauge, } + +/// Transaction pool maintenance metrics +#[derive(Metrics)] +#[metrics(scope = "transaction_pool")] +pub struct MaintainPoolMetrics { + /// Number of currently dirty addresses that need to be updated in the pool by fetching account + /// info + pub(crate) dirty_accounts: Gauge, + /// Number of transaction reinserted into the pool after reorg. + pub(crate) reinserted_transactions: Counter, +} + +impl MaintainPoolMetrics { + #[inline] + pub(crate) fn set_dirty_accounts_len(&self, count: usize) { + self.dirty_accounts.set(count as f64); + } + + #[inline] + pub(crate) fn inc_reinserted_transactions(&self, count: usize) { + self.reinserted_transactions.increment(count as u64); + } +} From 2effa942e1150b9dc6df4375e746e97cb59d2474 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 15:56:33 +0200 Subject: [PATCH 132/722] feat: add performed pool state updates metric (#3714) --- crates/transaction-pool/src/metrics.rs | 3 +++ crates/transaction-pool/src/pool/txpool.rs | 2 ++ 2 files changed, 5 insertions(+) diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 5be12634e574..1a29e4b75f33 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -33,6 +33,9 @@ pub struct TxPoolMetrics { /// Number of all transactions of all sub-pools: pending + basefee + queued pub(crate) total_transactions: Gauge, + + /// How often the pool was updated after the canonical state changed + pub(crate) performed_state_updates: Counter, } /// Transaction pool maintenance metrics diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 9793da32a30b..6b1bad86442f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -261,6 +261,8 @@ impl TxPool { // update the metrics after the update self.update_size_metrics(); + self.metrics.performed_state_updates.increment(1); + OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } } From 1763b5ea7a936c0deff36380838bee4858ee7758 Mon Sep 17 00:00:00 2001 From: Jay Miller <3744812+jaylmiller@users.noreply.github.com> Date: Tue, 11 Jul 2023 10:07:13 -0400 Subject: [PATCH 133/722] perf: handle engine API range request in a new task (#3685) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + bin/reth/src/node/mod.rs | 1 + crates/rpc/rpc-builder/tests/it/utils.rs | 1 + crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/engine_api.rs | 95 ++++++++++++++------- 5 files changed, 70 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8b62e26467c..8bb5dd010534 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5736,6 +5736,7 @@ dependencies = [ "reth-provider", "reth-rpc-api", "reth-rpc-types", + "reth-tasks", "thiserror", "tokio", "tracing", diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 529d67713ac7..13fd00d78e81 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -400,6 +400,7 @@ impl Command { self.chain.clone(), beacon_engine_handle, payload_builder.into(), + Box::new(ctx.task_executor.clone()), ); info!(target: "reth::cli", "Engine API handler initialized"); diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index e7c4a4eb88ae..0b111b33cc8d 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -30,6 +30,7 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { MAINNET.clone(), beacon_engine_handle, spawn_test_payload_service().into(), + Box::new(TokioTaskExecutor::default()), ); let module = AuthRpcModule::new(engine_api); module.start_server(config).await.unwrap() diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 0f417a3a65fa..219edaca6f4b 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -17,6 +17,7 @@ reth-rpc-types = { workspace = true } reth-rpc-api = { path = "../rpc-api" } reth-beacon-consensus = { path = "../../consensus/beacon" } reth-payload-builder = { workspace = true } +reth-tasks = { workspace = true } # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 4803c4d2d715..f461f4928dcd 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,6 +11,7 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodies, ExecutionPayloadEnvelope, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; +use reth_tasks::TaskSpawner; use std::sync::Arc; use tokio::sync::oneshot; use tracing::trace; @@ -24,6 +25,10 @@ const MAX_PAYLOAD_BODIES_LIMIT: u64 = 1024; /// The Engine API implementation that grants the Consensus layer access to data and /// functions in the Execution layer that are crucial for the consensus process. pub struct EngineApi { + inner: Arc>, +} + +struct EngineApiInner { /// The provider to interact with the chain. provider: Provider, /// Consensus configuration @@ -32,6 +37,8 @@ pub struct EngineApi { beacon_consensus: BeaconConsensusEngineHandle, /// The type that can communicate with the payload service to retrieve payloads. payload_store: PayloadStore, + /// For spawning and executing async tasks + task_spawner: Box, } impl EngineApi @@ -44,8 +51,16 @@ where chain_spec: Arc, beacon_consensus: BeaconConsensusEngineHandle, payload_store: PayloadStore, + task_spawner: Box, ) -> Self { - Self { provider, chain_spec, beacon_consensus, payload_store } + let inner = Arc::new(EngineApiInner { + provider, + chain_spec, + beacon_consensus, + payload_store, + task_spawner, + }); + Self { inner } } /// See also @@ -59,7 +74,7 @@ where payload.timestamp.as_u64(), payload.withdrawals.is_some(), )?; - Ok(self.beacon_consensus.new_payload(payload).await?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// See also @@ -72,7 +87,7 @@ where payload.timestamp.as_u64(), payload.withdrawals.is_some(), )?; - Ok(self.beacon_consensus.new_payload(payload).await?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -93,7 +108,7 @@ where attrs.withdrawals.is_some(), )?; } - Ok(self.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } /// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals, @@ -112,7 +127,7 @@ where attrs.withdrawals.is_some(), )?; } - Ok(self.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } /// Returns the most recent version of the payload that is available in the corresponding @@ -126,6 +141,7 @@ where /// > Provider software MAY stop the corresponding build process after serving this call. pub async fn get_payload_v1(&self, payload_id: PayloadId) -> EngineApiResult { Ok(self + .inner .payload_store .resolve(payload_id) .await @@ -145,6 +161,7 @@ where payload_id: PayloadId, ) -> EngineApiResult { Ok(self + .inner .payload_store .resolve(payload_id) .await @@ -162,31 +179,44 @@ where /// Implementors should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. - pub fn get_payload_bodies_by_range( + pub async fn get_payload_bodies_by_range( &self, start: BlockNumber, count: u64, ) -> EngineApiResult { - if count > MAX_PAYLOAD_BODIES_LIMIT { - return Err(EngineApiError::PayloadRequestTooLarge { len: count }) - } + let (tx, rx) = oneshot::channel(); + let inner = self.inner.clone(); - if start == 0 || count == 0 { - return Err(EngineApiError::InvalidBodiesRange { start, count }) - } + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + if count > MAX_PAYLOAD_BODIES_LIMIT { + tx.send(Err(EngineApiError::PayloadRequestTooLarge { len: count })).ok(); + return + } - let mut result = Vec::with_capacity(count as usize); + if start == 0 || count == 0 { + tx.send(Err(EngineApiError::InvalidBodiesRange { start, count })).ok(); + return + } - let end = start.saturating_add(count); - for num in start..end { - let block = self - .provider - .block(BlockHashOrNumber::Number(num)) - .map_err(|err| EngineApiError::Internal(Box::new(err)))?; - result.push(block.map(Into::into)); - } + let mut result = Vec::with_capacity(count as usize); - Ok(result) + let end = start.saturating_add(count); + for num in start..end { + let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); + match block_result { + Ok(block) => { + result.push(block.map(Into::into)); + } + Err(err) => { + tx.send(Err(EngineApiError::Internal(Box::new(err)))).ok(); + return + } + }; + } + tx.send(Ok(result)).ok(); + })); + + rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))? } /// Called to retrieve execution payload bodies by hashes. @@ -202,6 +232,7 @@ where let mut result = Vec::with_capacity(hashes.len()); for hash in hashes { let block = self + .inner .provider .block(BlockHashOrNumber::Hash(hash)) .map_err(|err| EngineApiError::Internal(Box::new(err)))?; @@ -224,6 +255,7 @@ where } = config; let merge_terminal_td = self + .inner .chain_spec .fork(Hardfork::Paris) .ttd() @@ -237,7 +269,7 @@ where }) } - self.beacon_consensus.transition_configuration_exchanged().await; + self.inner.beacon_consensus.transition_configuration_exchanged().await; // Short circuit if communicated block hash is zero if terminal_block_hash.is_zero() { @@ -249,6 +281,7 @@ where // Attempt to look up terminal block hash let local_hash = self + .inner .provider .block_hash(terminal_block_number.as_u64()) .map_err(|err| EngineApiError::Internal(Box::new(err)))?; @@ -276,7 +309,8 @@ where timestamp: u64, has_withdrawals: bool, ) -> EngineApiResult<()> { - let is_shanghai = self.chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(timestamp); + let is_shanghai = + self.inner.chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -404,7 +438,7 @@ where count: U64, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); - Ok(EngineApi::get_payload_bodies_by_range(self, start.as_u64(), count.as_u64())?) + Ok(EngineApi::get_payload_bodies_by_range(self, start.as_u64(), count.as_u64()).await?) } /// Handler for `engine_exchangeTransitionConfigurationV1` @@ -439,6 +473,7 @@ mod tests { use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, H256, MAINNET}; use reth_provider::test_utils::MockEthProvider; + use reth_tasks::TokioTaskExecutor; use std::sync::Arc; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; @@ -447,11 +482,13 @@ mod tests { let provider = Arc::new(MockEthProvider::default()); let payload_store = spawn_test_payload_service(); let (to_engine, engine_rx) = unbounded_channel(); + let task_executor = Box::new(TokioTaskExecutor::default()); let api = EngineApi::new( provider.clone(), chain_spec.clone(), BeaconConsensusEngineHandle::new(to_engine), payload_store.into(), + task_executor, ); let handle = EngineApiTestHandle { chain_spec, provider, from_api: engine_rx }; (handle, api) @@ -491,7 +528,7 @@ mod tests { // test [EngineApiMessage::GetPayloadBodiesByRange] for (start, count) in by_range_tests { - let res = api.get_payload_bodies_by_range(start, count); + let res = api.get_payload_bodies_by_range(start, count).await; assert_matches!(res, Err(EngineApiError::InvalidBodiesRange { .. })); } } @@ -501,7 +538,7 @@ mod tests { let (_, api) = setup_engine_api(); let request_count = MAX_PAYLOAD_BODIES_LIMIT + 1; - let res = api.get_payload_bodies_by_range(0, request_count); + let res = api.get_payload_bodies_by_range(0, request_count).await; assert_matches!(res, Err(EngineApiError::PayloadRequestTooLarge { .. })); } @@ -518,7 +555,7 @@ mod tests { let expected = blocks.iter().cloned().map(|b| Some(b.unseal().into())).collect::>(); - let res = api.get_payload_bodies_by_range(start, count).unwrap(); + let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); assert_eq!(res, expected); } @@ -558,7 +595,7 @@ mod tests { }) .collect::>(); - let res = api.get_payload_bodies_by_range(start, count).unwrap(); + let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); assert_eq!(res, expected); let hashes = blocks.iter().map(|b| b.hash()).collect(); From 94129631cb51711641a519439f1ebaef9af3c6ca Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 11 Jul 2023 16:12:20 +0100 Subject: [PATCH 134/722] feat(primitives, storage): save prune checkpoints in database (#3628) --- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/prune/mod.rs | 2 ++ crates/primitives/src/prune/mode.rs | 2 +- crates/primitives/src/prune/part.rs | 24 ++++++++++++++++++++++ crates/storage/db/src/abstraction/table.rs | 2 +- crates/storage/db/src/tables/mod.rs | 9 ++++++-- crates/storage/db/src/tables/models/mod.rs | 19 ++++++++++++++++- 7 files changed, 54 insertions(+), 6 deletions(-) create mode 100644 crates/primitives/src/prune/part.rs diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 6e96a36b8db5..753c1435fe73 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -78,7 +78,7 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; -pub use prune::{PruneCheckpoint, PruneMode, PruneTargets}; +pub use prune::{PruneCheckpoint, PruneMode, PrunePart, PruneTargets}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 18cb943fb975..510bc40b6e5d 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -1,7 +1,9 @@ mod checkpoint; mod mode; +mod part; mod target; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; +pub use part::PrunePart; pub use target::PruneTargets; diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index 3fba10fe59af..b62a39041b8c 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -17,7 +17,7 @@ pub enum PruneMode { #[cfg(test)] impl Default for PruneMode { fn default() -> Self { - Self::Distance(0) + Self::Full } } diff --git a/crates/primitives/src/prune/part.rs b/crates/primitives/src/prune/part.rs new file mode 100644 index 000000000000..caa176b86a28 --- /dev/null +++ b/crates/primitives/src/prune/part.rs @@ -0,0 +1,24 @@ +use reth_codecs::{main_codec, Compact}; + +/// Part of the data that can be pruned. +#[main_codec] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +pub enum PrunePart { + /// Prune part responsible for the `TxSenders` table. + SenderRecovery, + /// Prune part responsible for the `TxHashNumber` table. + TransactionLookup, + /// Prune part responsible for the `Receipts` table. + Receipts, + /// Prune part responsible for the `AccountChangeSet` and `AccountHistory` tables. + AccountHistory, + /// Prune part responsible for the `StorageChangeSet` and `StorageHistory` tables. + StorageHistory, +} + +#[cfg(test)] +impl Default for PrunePart { + fn default() -> Self { + Self::SenderRecovery + } +} diff --git a/crates/storage/db/src/abstraction/table.rs b/crates/storage/db/src/abstraction/table.rs index 65d611f86856..18e66fe0e179 100644 --- a/crates/storage/db/src/abstraction/table.rs +++ b/crates/storage/db/src/abstraction/table.rs @@ -49,7 +49,7 @@ pub trait Encode: Send + Sync + Sized + Debug { /// Trait that will transform the data to be read from the DB. pub trait Decode: Send + Sync + Sized + Debug { /// Decodes data coming from the database. - fn decode>(key: B) -> Result; + fn decode>(value: B) -> Result; } /// Generic trait that enforces the database key to implement [`Encode`] and [`Decode`]. diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index ba44ce1ea032..924095ca71df 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -37,8 +37,8 @@ use crate::{ use reth_primitives::{ stage::StageCheckpoint, trie::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}, - Account, Address, BlockHash, BlockNumber, Bytecode, Header, IntegerList, Receipt, StorageEntry, - TransactionSignedNoHash, TxHash, TxNumber, H256, + Account, Address, BlockHash, BlockNumber, Bytecode, Header, IntegerList, PruneCheckpoint, + PrunePart, Receipt, StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, H256, }; /// Enum for the types of tables present in libmdbx. @@ -415,6 +415,11 @@ table!( ( SyncStageProgress ) StageId | Vec ); +table!( + /// Stores the highest pruned block number and prune mode of each prune part. + ( PruneParts ) PrunePart | PruneCheckpoint +); + /// Alias Types /// List with transaction numbers. diff --git a/crates/storage/db/src/tables/models/mod.rs b/crates/storage/db/src/tables/models/mod.rs index da746efda686..3bfd0fbfe95f 100644 --- a/crates/storage/db/src/tables/models/mod.rs +++ b/crates/storage/db/src/tables/models/mod.rs @@ -6,7 +6,7 @@ use crate::{ use reth_codecs::Compact; use reth_primitives::{ trie::{StoredNibbles, StoredNibblesSubKey}, - Address, H256, + Address, PrunePart, H256, }; pub mod accounts; @@ -135,3 +135,20 @@ impl Decode for StoredNibblesSubKey { Ok(Self::from_compact(buf, buf.len()).0) } } + +impl Encode for PrunePart { + type Encoded = [u8; 1]; + + fn encode(self) -> Self::Encoded { + let mut buf = [0u8]; + self.to_compact(&mut buf.as_mut()); + buf + } +} + +impl Decode for PrunePart { + fn decode>(value: B) -> Result { + let buf = value.as_ref(); + Ok(Self::from_compact(buf, buf.len()).0) + } +} From fbdea303753ff361a753926cadc1c1ba20638897 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 17:13:06 +0200 Subject: [PATCH 135/722] fix: treat bool params as invalid in logs subscription (#3716) --- crates/rpc/rpc-types/src/eth/pubsub.rs | 29 +++++++++++++++++++++++++- crates/rpc/rpc/src/eth/pubsub.rs | 3 +++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/pubsub.rs b/crates/rpc/rpc-types/src/eth/pubsub.rs index e363cb754c39..d54f6f849a86 100644 --- a/crates/rpc/rpc-types/src/eth/pubsub.rs +++ b/crates/rpc/rpc-types/src/eth/pubsub.rs @@ -96,7 +96,7 @@ pub enum SubscriptionKind { Syncing, } -/// Subscription kind. +/// Any additional parameters for a subscription. #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] pub enum Params { /// No parameters passed. @@ -108,6 +108,20 @@ pub enum Params { Bool(bool), } +impl Params { + /// Returns true if it's a bool parameter. + #[inline] + pub fn is_bool(&self) -> bool { + matches!(self, Params::Bool(_)) + } + + /// Returns true if it's a log parameter. + #[inline] + pub fn is_logs(&self) -> bool { + matches!(self, Params::Logs(_)) + } +} + impl Serialize for Params { fn serialize(&self, serializer: S) -> Result where @@ -141,3 +155,16 @@ impl<'a> Deserialize<'a> for Params { .map_err(|e| D::Error::custom(format!("Invalid Pub-Sub parameters: {e}"))) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn params_serde() { + let s: Params = serde_json::from_str("true").unwrap(); + assert_eq!(s, Params::Bool(true)); + let s: Params = serde_json::from_str("null").unwrap(); + assert_eq!(s, Params::None); + } +} diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index a66930011369..5b823ea40cbd 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -115,6 +115,9 @@ where // if no params are provided, used default filter params let filter = match params { Some(Params::Logs(filter)) => FilteredParams::new(Some(*filter)), + Some(Params::Bool(_)) => { + return Err(invalid_params_rpc_err("Invalid params for logs").into()) + } _ => FilteredParams::default(), }; let stream = From 65c7c1c4f9db388f7f1ad0f6084ceb845e67c646 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 11 Jul 2023 16:49:33 +0100 Subject: [PATCH 136/722] chore: use `transaction_by_id_no_hash` to avoid hash computation (#3718) --- crates/stages/src/stages/sender_recovery.rs | 10 ++++++++-- .../provider/src/providers/database/provider.rs | 15 ++++++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 482ed1eb43f5..e6294b3a861b 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -453,8 +453,14 @@ mod tests { while let Some((_, body)) = body_cursor.next()? { for tx_id in body.tx_num_range() { - let transaction: TransactionSigned = - provider.transaction_by_id(tx_id)?.expect("no transaction entry"); + let transaction: TransactionSigned = provider + .transaction_by_id_no_hash(tx_id)? + .map(|tx| TransactionSigned { + hash: Default::default(), // we don't require the hash + signature: tx.signature, + transaction: tx.transaction, + }) + .expect("no transaction entry"); let signer = transaction.recover_signer().expect("failed to recover signer"); assert_eq!(Some(signer), provider.transaction_sender(tx_id)?) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 28a00f45e20a..2d494c40e26d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -935,12 +935,16 @@ impl<'this, TX: DbTx<'this>> TransactionsProvider for DatabaseProvider<'this, TX } fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result> { - Ok(self.tx.get::(id)?.map(Into::into)) + Ok(self.tx.get::(id)?) } fn transaction_by_hash(&self, hash: TxHash) -> Result> { if let Some(id) = self.transaction_id(hash)? { - Ok(self.transaction_by_id(id)?) + Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { + hash, + signature: tx.signature, + transaction: tx.transaction, + })) } else { Ok(None) } @@ -953,7 +957,12 @@ impl<'this, TX: DbTx<'this>> TransactionsProvider for DatabaseProvider<'this, TX ) -> Result> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(transaction) = self.transaction_by_id(transaction_id)? { + if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { + let transaction = TransactionSigned { + hash: tx_hash, + signature: tx.signature, + transaction: tx.transaction, + }; if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { From 718d811ed716374ddbbd4f372f435bd45a5f14c7 Mon Sep 17 00:00:00 2001 From: Siyuan Han <47173566+hsyodyssey@users.noreply.github.com> Date: Wed, 12 Jul 2023 00:48:33 +0800 Subject: [PATCH 137/722] Chore(book): correct the grafana dashboards json path (#3724) --- book/run/observability.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/run/observability.md b/book/run/observability.md index 43a241999175..9eb3e99a5e8d 100644 --- a/book/run/observability.md +++ b/book/run/observability.md @@ -51,7 +51,7 @@ Once you've logged in, click on the gear icon in the lower left, and select "Dat As this might be a point of confusion, `localhost:9001`, which we supplied to `--metrics`, is the endpoint that Reth exposes, from which Prometheus collects metrics. Prometheus then exposes `localhost:9090` (by default) for other services (such as Grafana) to consume Prometheus metrics. -To configure the dashboard in Grafana, click on the squares icon in the upper left, and click on "New", then "Import". From there, click on "Upload JSON file", and select the example file in [`reth/etc/grafana/overview.json`](https://github.com/paradigmxyz/reth/blob/main/etc/grafana/dashboards/overview.json). Finally, select the Prometheus data source you just created, and click "Import". +To configure the dashboard in Grafana, click on the squares icon in the upper left, and click on "New", then "Import". From there, click on "Upload JSON file", and select the example file in [`reth/etc/grafana/dashboards/overview.json`](https://github.com/paradigmxyz/reth/blob/main/etc/grafana/dashboards/overview.json). Finally, select the Prometheus data source you just created, and click "Import". And voilá, you should see your dashboard! If you're not yet connected to any peers, the dashboard will look like it's in an empty state, but once you are, you should see it start populating with data. From 5609a39050be9bce49c2b77ba8e3fe1c6a9c2b6a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 19:38:13 +0200 Subject: [PATCH 138/722] perf: only update finalized safe if changed (#3725) --- .../consensus/beacon/src/engine/forkchoice.rs | 2 + crates/consensus/beacon/src/engine/mod.rs | 83 ++++++++++++++----- 2 files changed, 63 insertions(+), 22 deletions(-) diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index f41200e39615..51c67f7eef33 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -39,6 +39,8 @@ impl ForkchoiceStateTracker { } /// Returns the [ForkchoiceStatus] of the latest received FCU. + /// + /// Caution: this can be invalid. pub(crate) fn latest_status(&self) -> Option { self.latest.as_ref().map(|s| s.status) } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f8b95d4682ce..b69d3bfee704 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -25,7 +25,8 @@ use reth_primitives::{ Head, Header, SealedBlock, SealedHeader, H256, U256, }; use reth_provider::{ - BlockReader, BlockSource, CanonChainTracker, ProviderError, StageCheckpointReader, + BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ProviderError, + StageCheckpointReader, }; use reth_rpc_types::engine::{ ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, PayloadValidationError, @@ -147,7 +148,11 @@ pub struct BeaconConsensusEngine where DB: Database, Client: HeadersClient + BodiesClient, - BT: BlockchainTreeEngine + BlockReader + CanonChainTracker + StageCheckpointReader, + BT: BlockchainTreeEngine + + BlockReader + + BlockIdReader + + CanonChainTracker + + StageCheckpointReader, { /// Controls syncing triggered by engine updates. sync: EngineSyncController, @@ -187,7 +192,12 @@ where impl BeaconConsensusEngine where DB: Database + Unpin + 'static, - BT: BlockchainTreeEngine + BlockReader + CanonChainTracker + StageCheckpointReader + 'static, + BT: BlockchainTreeEngine + + BlockReader + + BlockIdReader + + CanonChainTracker + + StageCheckpointReader + + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { /// Create a new instance of the [BeaconConsensusEngine]. @@ -657,25 +667,8 @@ where // we update the the tracked header first self.blockchain.set_canonical_head(head); - if !update.finalized_block_hash.is_zero() { - let finalized = self - .blockchain - .find_block_by_hash(update.finalized_block_hash, BlockSource::Any)? - .ok_or_else(|| { - Error::Provider(ProviderError::UnknownBlockHash(update.finalized_block_hash)) - })?; - self.blockchain.set_finalized(finalized.header.seal(update.finalized_block_hash)); - } - - if !update.safe_block_hash.is_zero() { - let safe = self - .blockchain - .find_block_by_hash(update.safe_block_hash, BlockSource::Any)? - .ok_or_else(|| { - Error::Provider(ProviderError::UnknownBlockHash(update.safe_block_hash)) - })?; - self.blockchain.set_safe(safe.header.seal(update.safe_block_hash)); - } + self.update_finalized_block(update.finalized_block_hash)?; + self.update_safe_block(update.safe_block_hash)?; head_block.total_difficulty = self.blockchain.header_td_by_number(head_block.number)?.ok_or_else(|| { @@ -688,6 +681,51 @@ where Ok(()) } + /// Updates the tracked safe block if we have it + /// + /// Returns an error if the block is not found. + #[inline] + fn update_safe_block(&self, safe_block_hash: H256) -> Result<(), reth_interfaces::Error> { + if !safe_block_hash.is_zero() { + if self.blockchain.safe_block_hash()? == Some(safe_block_hash) { + // nothing to update + return Ok(()) + } + + let safe = self + .blockchain + .find_block_by_hash(safe_block_hash, BlockSource::Any)? + .ok_or_else(|| Error::Provider(ProviderError::UnknownBlockHash(safe_block_hash)))?; + self.blockchain.set_safe(safe.header.seal(safe_block_hash)); + } + Ok(()) + } + + /// Updates the tracked finalized block if we have it + /// + /// Returns an error if the block is not found. + #[inline] + fn update_finalized_block( + &self, + finalized_block_hash: H256, + ) -> Result<(), reth_interfaces::Error> { + if !finalized_block_hash.is_zero() { + if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) { + // nothing to update + return Ok(()) + } + + let finalized = self + .blockchain + .find_block_by_hash(finalized_block_hash, BlockSource::Any)? + .ok_or_else(|| { + Error::Provider(ProviderError::UnknownBlockHash(finalized_block_hash)) + })?; + self.blockchain.set_finalized(finalized.header.seal(finalized_block_hash)); + } + Ok(()) + } + /// Handler for a failed a forkchoice update due to a canonicalization error. /// /// This will determine if the state's head is invalid, and if so, return immediately. @@ -1386,6 +1424,7 @@ where Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, BT: BlockchainTreeEngine + BlockReader + + BlockIdReader + CanonChainTracker + StageCheckpointReader + Unpin From 7fa032f9c957fcc30a1b8c60877a3328ea812a44 Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Wed, 12 Jul 2023 03:15:05 +0800 Subject: [PATCH 139/722] test: fix engine hive of `Invalid Transition Payload Sync` (#3710) --- crates/consensus/beacon/src/engine/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index b69d3bfee704..ad1346220730 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -887,8 +887,15 @@ where let block_hash = block.hash(); let block_num_hash = block.num_hash(); + let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash); + if lowest_buffered_ancestor == block.hash { + lowest_buffered_ancestor = block.parent_hash; + } + // now check the block itself - if let Some(status) = self.check_invalid_ancestor_with_head(block.parent_hash, block.hash) { + if let Some(status) = + self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash) + { return Ok(status) } From 94ba83f6353cc8d8e25c6acc118b8f6e90145511 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 22:39:51 +0200 Subject: [PATCH 140/722] docs: add a few more docs to ControlFlow (#3603) --- crates/consensus/beacon/src/engine/mod.rs | 4 ++-- crates/stages/src/pipeline/ctrl.rs | 8 +++++--- crates/stages/src/pipeline/mod.rs | 5 +++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index ad1346220730..10e53323291a 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1306,7 +1306,7 @@ where // update the canon chain if continuous is enabled if self.sync.run_pipeline_continuously() { - let max_block = ctrl.progress().unwrap_or_default(); + let max_block = ctrl.block_number().unwrap_or_default(); let max_header = match self.blockchain.sealed_header(max_block) { Ok(header) => match header { Some(header) => header, @@ -1371,7 +1371,7 @@ where // If both are Some, we perform another distance check and return the desired // pipeline target let pipeline_target = if let (Some(progress), Some(finalized_number)) = - (ctrl.progress(), newest_finalized) + (ctrl.block_number(), newest_finalized) { // Determines whether or not we should run the pipeline again, in case the // new gap is large enough to warrant running the pipeline. diff --git a/crates/stages/src/pipeline/ctrl.rs b/crates/stages/src/pipeline/ctrl.rs index deece92d25fa..820df423b631 100644 --- a/crates/stages/src/pipeline/ctrl.rs +++ b/crates/stages/src/pipeline/ctrl.rs @@ -1,6 +1,8 @@ use reth_primitives::{BlockNumber, SealedHeader}; /// Determines the control flow during pipeline execution. +/// +/// See [Pipeline::run_loop](crate::Pipeline::run_loop) for more information. #[derive(Debug, Eq, PartialEq)] pub enum ControlFlow { /// An unwind was requested and must be performed before continuing. @@ -10,7 +12,7 @@ pub enum ControlFlow { /// The block that caused the unwind. bad_block: SealedHeader, }, - /// The pipeline is allowed to continue executing stages. + /// The pipeline made progress. Continue { /// Block number reached by the stage. block_number: BlockNumber, @@ -33,8 +35,8 @@ impl ControlFlow { matches!(self, ControlFlow::Unwind { .. }) } - /// Returns the pipeline progress, if the state is not `Unwind`. - pub fn progress(&self) -> Option { + /// Returns the pipeline block number the stage reached, if the state is not `Unwind`. + pub fn block_number(&self) -> Option { match self { ControlFlow::Unwind { .. } => None, ControlFlow::Continue { block_number } => Some(*block_number), diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index fa2ec285e0ad..a108695cfade 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -206,6 +206,11 @@ where /// If any stage is unsuccessful at execution, we proceed to /// unwind. This will undo the progress across the entire pipeline /// up to the block that caused the error. + /// + /// Returns the control flow after it ran the pipeline. + /// This will be [ControlFlow::Continue] or [ControlFlow::NoProgress] of the _last_ stage in the + /// pipeline (for example the `Finish` stage). Or [ControlFlow::Unwind] of the stage that caused + /// the unwind. pub async fn run_loop(&mut self) -> Result { let mut previous_stage = None; for stage_index in 0..self.stages.len() { From 5dd4bc6bee887590b5d4287ad157b25c9ea73e17 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 11 Jul 2023 22:59:21 +0200 Subject: [PATCH 141/722] fix: use tx gas limit for root trace (#3719) --- .../revm/revm-inspectors/src/tracing/mod.rs | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index b064e0dd692c..bf3e746c90f9 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -146,15 +146,15 @@ impl TracingInspector { /// /// Invoked on [Inspector::call]. #[allow(clippy::too_many_arguments)] - fn start_trace_on_call( + fn start_trace_on_call( &mut self, - depth: usize, + data: &EVMData<'_, DB>, address: Address, - data: Bytes, + input_data: Bytes, value: U256, kind: CallKind, caller: Address, - gas_limit: u64, + mut gas_limit: u64, maybe_precompile: Option, ) { // This will only be true if the inspector is configured to exclude precompiles and the call @@ -166,14 +166,20 @@ impl TracingInspector { PushTraceKind::PushAndAttachToParent }; + if self.trace_stack.is_empty() { + // this is the root call which should get the original gas limit of the transaction, + // because initialization costs are already subtracted from gas_limit + gas_limit = data.env.tx.gas_limit; + } + self.trace_stack.push(self.traces.push_trace( 0, push_kind, CallTrace { - depth, + depth: data.journaled_state.depth() as usize, address, kind, - data, + data: input_data, value, status: InstructionResult::Continue, caller, @@ -421,7 +427,7 @@ where self.config.exclude_precompile_calls.then(|| self.is_precompile_call(data, &to, value)); self.start_trace_on_call( - data.journaled_state.depth() as usize, + data, to, inputs.input.clone(), value, @@ -460,7 +466,7 @@ where let _ = data.journaled_state.load_account(inputs.caller, data.db); let nonce = data.journaled_state.account(inputs.caller).info.nonce; self.start_trace_on_call( - data.journaled_state.depth() as usize, + data, get_create_address(inputs, nonce), inputs.init_code.clone(), inputs.value, From e2218bea37aa455b4fd6602fe71a8fbe0974f12b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 12 Jul 2023 11:19:43 +0300 Subject: [PATCH 142/722] fix(provider): update checkpoints only for known stages (#3624) --- crates/storage/provider/src/providers/database/provider.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2d494c40e26d..597909db4ce1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1227,14 +1227,15 @@ impl<'this, TX: DbTxMut<'this>> StageCheckpointWriter for DatabaseProvider<'this ) -> Result<()> { // iterate over all existing stages in the table and update its progress. let mut cursor = self.tx.cursor_write::()?; - while let Some((stage_name, checkpoint)) = cursor.next()? { + for stage_id in StageId::ALL { + let (_, checkpoint) = cursor.seek_exact(stage_id.to_string())?.unwrap_or_default(); cursor.upsert( - stage_name, + stage_id.to_string(), StageCheckpoint { block_number, ..if drop_stage_checkpoint { Default::default() } else { checkpoint } }, - )? + )?; } Ok(()) From a7eae8cfc37966d10496d9072f2b58187e94d104 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 12 Jul 2023 12:13:07 +0200 Subject: [PATCH 143/722] fix: register precompiles correctly (#3720) --- .../revm-inspectors/src/tracing/js/mod.rs | 35 +++++++++++-------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/js/mod.rs b/crates/revm/revm-inspectors/src/tracing/js/mod.rs index e9bc8f390046..b79b41f648f5 100644 --- a/crates/revm/revm-inspectors/src/tracing/js/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/js/mod.rs @@ -16,6 +16,7 @@ use revm::{ interpreter::{ return_revert, CallInputs, CallScheme, CreateInputs, Gas, InstructionResult, Interpreter, }, + precompile::Precompiles, primitives::{Env, ExecutionResult, Output, ResultAndState, TransactTo, B160, B256}, Database, EVMData, Inspector, }; @@ -48,6 +49,8 @@ pub struct JsInspector { call_stack: Vec, /// sender half of a channel to communicate with the database service. to_db_service: mpsc::Sender, + /// Marker to track whether the precompiles have been registered. + precompiles_registered: bool, } impl JsInspector { @@ -130,6 +133,7 @@ impl JsInspector { step_fn, call_stack: Default::default(), to_db_service, + precompiles_registered: false, }) } @@ -264,26 +268,25 @@ impl JsInspector { fn pop_call(&mut self) { self.call_stack.pop(); } -} -impl Inspector for JsInspector -where - DB: Database, -{ - fn initialize_interp( - &mut self, - _interp: &mut Interpreter, - data: &mut EVMData<'_, DB>, - _is_static: bool, - ) -> InstructionResult { + /// Registers the precompiles in the JS context + fn register_precompiles(&mut self, precompiles: &Precompiles) { + if !self.precompiles_registered { + return + } let precompiles = - PrecompileList(data.precompiles.addresses().into_iter().map(Into::into).collect()); + PrecompileList(precompiles.addresses().into_iter().map(Into::into).collect()); let _ = precompiles.register_callable(&mut self.ctx); - InstructionResult::Continue + self.precompiles_registered = true } +} +impl Inspector for JsInspector +where + DB: Database, +{ fn step( &mut self, interp: &mut Interpreter, @@ -361,10 +364,12 @@ where fn call( &mut self, - _data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, DB>, inputs: &mut CallInputs, _is_static: bool, ) -> (InstructionResult, Gas, Bytes) { + self.register_precompiles(&data.precompiles); + // determine correct `from` and `to` based on the call scheme let (from, to) = match inputs.context.scheme { CallScheme::DelegateCall | CallScheme::CallCode => { @@ -425,6 +430,8 @@ where data: &mut EVMData<'_, DB>, inputs: &mut CreateInputs, ) -> (InstructionResult, Option, Gas, Bytes) { + self.register_precompiles(&data.precompiles); + let _ = data.journaled_state.load_account(inputs.caller, data.db); let nonce = data.journaled_state.account(inputs.caller).info.nonce; let address = get_create_address(inputs, nonce); From dbafe23cce45917a43f6640d71a0216fe3268428 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 12 Jul 2023 13:03:08 +0100 Subject: [PATCH 144/722] feat(bin, engine, prune): spawn pruning task from the engine (#3566) --- Cargo.lock | 11 + Cargo.toml | 1 + bin/reth/Cargo.toml | 1 + bin/reth/src/node/mod.rs | 6 + crates/blockchain-tree/src/blockchain_tree.rs | 13 +- crates/blockchain-tree/src/config.rs | 2 +- crates/blockchain-tree/src/shareable.rs | 15 +- crates/consensus/beacon/Cargo.toml | 1 + crates/consensus/beacon/src/engine/error.rs | 7 + crates/consensus/beacon/src/engine/metrics.rs | 2 + crates/consensus/beacon/src/engine/mod.rs | 194 ++++++++++++++---- crates/consensus/beacon/src/engine/prune.rs | 146 +++++++++++++ crates/consensus/beacon/src/engine/sync.rs | 8 +- crates/interfaces/src/blockchain_tree/mod.rs | 12 +- crates/primitives/src/stage/checkpoints.rs | 2 +- crates/prune/Cargo.toml | 20 ++ crates/prune/src/error.rs | 4 + crates/prune/src/lib.rs | 5 + crates/prune/src/pruner.rs | 83 ++++++++ crates/storage/provider/src/providers/mod.rs | 11 +- 20 files changed, 494 insertions(+), 50 deletions(-) create mode 100644 crates/consensus/beacon/src/engine/prune.rs create mode 100644 crates/prune/Cargo.toml create mode 100644 crates/prune/src/error.rs create mode 100644 crates/prune/src/lib.rs create mode 100644 crates/prune/src/pruner.rs diff --git a/Cargo.lock b/Cargo.lock index 8bb5dd010534..48cec8dd50fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4956,6 +4956,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-provider", + "reth-prune", "reth-revm", "reth-revm-inspectors", "reth-rlp", @@ -5029,6 +5030,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-provider", + "reth-prune", "reth-rpc-types", "reth-stages", "reth-tasks", @@ -5554,6 +5556,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-prune" +version = "0.1.0-alpha.3" +dependencies = [ + "reth-primitives", + "thiserror", + "tracing", +] + [[package]] name = "reth-revm" version = "0.1.0-alpha.3" diff --git a/Cargo.toml b/Cargo.toml index c73f2f87aba3..94e6ca26fa33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "crates/net/downloaders", "crates/payload/basic", "crates/primitives", + "crates/prune", "crates/revm", "crates/revm/revm-primitives", "crates/revm/revm-inspectors", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 1a2fc9591254..eb81a41931fc 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -36,6 +36,7 @@ reth-payload-builder = { workspace = true } reth-basic-payload-builder = { path = "../../crates/payload/basic" } reth-discv4 = { path = "../../crates/net/discv4" } reth-metrics = { workspace = true } +reth-prune = { path = "../../crates/prune" } jemallocator = { version = "0.5.0", optional = true } jemalloc-ctl = { version = "0.5.0", optional = true } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 13fd00d78e81..d2ed2825610e 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -360,6 +360,11 @@ impl Command { None }; + let pruner = config.prune.map(|prune_config| { + info!(target: "reth::cli", "Pruner initialized"); + reth_prune::Pruner::new(prune_config.block_interval, tree_config.max_reorg_depth()) + }); + // Configure the consensus engine let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( client, @@ -374,6 +379,7 @@ impl Command { MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, consensus_engine_rx, + pruner, )?; info!(target: "reth::cli", "Consensus engine initialized"); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 92c821064c1a..43fe1be6fa52 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -750,12 +750,21 @@ impl BlockchainTree /// /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using /// [`BlockchainTree::finalize_block`]). - pub fn restore_canonical_hashes( + pub fn restore_canonical_hashes_and_finalize( &mut self, last_finalized_block: BlockNumber, ) -> Result<(), Error> { self.finalize_block(last_finalized_block); + self.restore_canonical_hashes() + } + + /// Reads the last `N` canonical hashes from the database and updates the block indices of the + /// tree. + /// + /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the + /// `BLOCKHASH` opcode in the EVM. + pub fn restore_canonical_hashes(&mut self) -> Result<(), Error> { let num_of_canonical_hashes = self.config.max_reorg_depth() + self.config.num_of_additional_canonical_block_hashes(); @@ -1578,7 +1587,7 @@ mod tests { .assert(&tree); // update canonical block to b2, this would make b2a be removed - assert_eq!(tree.restore_canonical_hashes(12), Ok(())); + assert_eq!(tree.restore_canonical_hashes_and_finalize(12), Ok(())); assert_eq!(tree.is_block_known(block2.num_hash()).unwrap(), Some(BlockStatus::Valid)); diff --git a/crates/blockchain-tree/src/config.rs b/crates/blockchain-tree/src/config.rs index 681c8f61b3d9..3c56acc56574 100644 --- a/crates/blockchain-tree/src/config.rs +++ b/crates/blockchain-tree/src/config.rs @@ -1,7 +1,7 @@ //! Blockchain tree configuration /// The configuration for the blockchain tree. -#[derive(Clone, Debug)] +#[derive(Clone, Copy, Debug)] pub struct BlockchainTreeConfig { /// Number of blocks after the last finalized block that we are storing. /// diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index a63b18dd2657..d99901dc8e08 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -66,10 +66,21 @@ impl BlockchainTreeEngine tree.update_chains_metrics(); } - fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<(), Error> { + fn restore_canonical_hashes_and_finalize( + &self, + last_finalized_block: BlockNumber, + ) -> Result<(), Error> { trace!(target: "blockchain_tree", ?last_finalized_block, "Restoring canonical hashes for last finalized block"); let mut tree = self.tree.write(); - let res = tree.restore_canonical_hashes(last_finalized_block); + let res = tree.restore_canonical_hashes_and_finalize(last_finalized_block); + tree.update_chains_metrics(); + res + } + + fn restore_canonical_hashes(&self) -> Result<(), Error> { + trace!(target: "blockchain_tree", "Restoring canonical hashes"); + let mut tree = self.tree.write(); + let res = tree.restore_canonical_hashes(); tree.update_chains_metrics(); res } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 64845e971d84..5f7ba35dd225 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -19,6 +19,7 @@ reth-rpc-types = { workspace = true } reth-tasks = { workspace = true } reth-payload-builder = { workspace = true } reth-metrics = { workspace = true } +reth-prune = { path = "../../prune" } # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 355197576a52..b78b3828bdbd 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,3 +1,4 @@ +use reth_prune::PrunerError; use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_stages::PipelineError; @@ -16,6 +17,12 @@ pub enum BeaconConsensusEngineError { /// Pipeline error. #[error(transparent)] Pipeline(#[from] Box), + /// Pruner channel closed. + #[error("Pruner channel closed")] + PrunerChannelClosed, + /// Pruner error. + #[error(transparent)] + Pruner(#[from] PrunerError), /// Common error. Wrapper around [reth_interfaces::Error]. #[error(transparent)] Common(#[from] reth_interfaces::Error), diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs index 14d68f4dd7a5..04080e93be55 100644 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ b/crates/consensus/beacon/src/engine/metrics.rs @@ -13,6 +13,8 @@ pub(crate) struct EngineMetrics { pub(crate) forkchoice_updated_messages: Counter, /// The total count of new payload messages received. pub(crate) new_payload_messages: Counter, + /// The number of times the pruner was run. + pub(crate) pruner_runs: Counter, } /// Metrics for the `EngineSyncController`. diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 10e53323291a..9403597278f1 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -3,6 +3,7 @@ use crate::{ forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, message::OnForkChoiceUpdated, metrics::EngineMetrics, + prune::{EnginePruneController, EnginePruneEvent}, }, sync::{EngineSyncController, EngineSyncEvent}, }; @@ -28,6 +29,7 @@ use reth_provider::{ BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ProviderError, StageCheckpointReader, }; +use reth_prune::Pruner; use reth_rpc_types::engine::{ ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; @@ -60,13 +62,15 @@ use invalid_headers::InvalidHeaderCache; mod event; pub use event::BeaconConsensusEngineEvent; -mod forkchoice; -mod metrics; -pub(crate) mod sync; mod handle; pub use handle::BeaconConsensusEngineHandle; +mod forkchoice; +mod metrics; +pub(crate) mod prune; +pub(crate) mod sync; + /// The maximum number of invalid headers that can be tracked by the engine. const MAX_INVALID_HEADERS: u32 = 512u32; @@ -187,6 +191,8 @@ where /// blocks using the pipeline. Otherwise, the engine, sync controller, and blockchain tree will /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, + /// Controls pruning triggered by engine updates. + prune: Option, } impl BeaconConsensusEngine @@ -213,7 +219,8 @@ where payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, - ) -> Result<(Self, BeaconConsensusEngineHandle), reth_interfaces::Error> { + pruner: Option, + ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let (to_engine, rx) = mpsc::unbounded_channel(); Self::with_channel( client, @@ -228,6 +235,7 @@ where pipeline_run_threshold, to_engine, rx, + pruner, ) } @@ -257,15 +265,17 @@ where pipeline_run_threshold: u64, to_engine: UnboundedSender, rx: UnboundedReceiver, - ) -> Result<(Self, BeaconConsensusEngineHandle), reth_interfaces::Error> { + pruner: Option, + ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let handle = BeaconConsensusEngineHandle { to_engine }; let sync = EngineSyncController::new( pipeline, client, - task_spawner, + task_spawner.clone(), run_pipeline_continuously, max_block, ); + let prune = pruner.map(|pruner| EnginePruneController::new(pruner, task_spawner)); let mut this = Self { sync, blockchain, @@ -278,6 +288,7 @@ where invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), metrics: EngineMetrics::default(), pipeline_run_threshold, + prune, }; let maybe_pipeline_target = match target { @@ -304,7 +315,7 @@ where /// # Returns /// /// A target block hash if the pipeline is inconsistent, otherwise `None`. - fn check_pipeline_consistency(&self) -> Result, reth_interfaces::Error> { + fn check_pipeline_consistency(&self) -> Result, Error> { // If no target was provided, check if the stages are congruent - check if the // checkpoint of the last stage matches the checkpoint of the first. let first_stage_checkpoint = self @@ -532,7 +543,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, - tx: oneshot::Sender>, + tx: oneshot::Sender>, ) -> bool { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); @@ -583,7 +594,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, - ) -> Result { + ) -> Result { trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); if state.head_block_hash.is_zero() { return Ok(OnForkChoiceUpdated::invalid_state()) @@ -602,6 +613,17 @@ where return Ok(OnForkChoiceUpdated::syncing()) } + if self.is_prune_active() { + // We can only process new forkchoice updates if the pruner is idle, since it requires + // exclusive access to the database + warn!( + target: "consensus::engine", + "Pruning is in progress, skipping forkchoice update. \ + This may affect the performance of your node as a validator." + ); + return Ok(OnForkChoiceUpdated::syncing()) + } + let status = match self.blockchain.make_canonical(&state.head_block_hash) { Ok(outcome) => { if !outcome.is_already_canonical() { @@ -654,7 +676,7 @@ where &self, head: SealedHeader, update: &ForkchoiceState, - ) -> Result<(), reth_interfaces::Error> { + ) -> Result<(), Error> { let mut head_block = Head { number: head.number, hash: head.hash, @@ -899,11 +921,14 @@ where return Ok(status) } - let res = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it holds - // exclusive access to the database + let res = if self.sync.is_pipeline_idle() && self.is_prune_idle() { + // we can only insert new payloads if the pipeline and the pruner are _not_ running, + // because they hold exclusive access to the database self.try_insert_new_payload(block) } else { + if self.is_prune_active() { + warn!(target: "consensus::engine", "Pruning is in progress, buffering new payload."); + } self.try_buffer_payload(block) }; @@ -964,12 +989,12 @@ where Ok(block) } - /// When the pipeline is actively syncing the tree is unable to commit any additional blocks - /// since the pipeline holds exclusive access to the database. + /// When the pipeline or the pruner is active, the tree is unable to commit any additional + /// blocks since the pipeline holds exclusive access to the database. /// /// In this scenario we buffer the payload in the tree if the payload is valid, once the - /// pipeline finished syncing the tree is then able to also use the buffered payloads to commit - /// to a (newer) canonical chain. + /// pipeline or pruner is finished, the tree is then able to also use the buffered payloads to + /// commit to a (newer) canonical chain. /// /// This will return `SYNCING` if the block was buffered successfully, and an error if an error /// occurred while buffering the block. @@ -984,7 +1009,7 @@ where /// Attempts to insert a new payload into the tree. /// - /// Caution: This expects that the pipeline is idle. + /// Caution: This expects that the pipeline and the pruner are idle. #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] fn try_insert_new_payload( &mut self, @@ -1063,14 +1088,11 @@ where /// /// If the given block is missing from the database, this will return `false`. Otherwise, `true` /// is returned: the database contains the hash and the tree was updated. - fn update_tree_on_finished_pipeline( - &mut self, - block_hash: H256, - ) -> Result { + fn update_tree_on_finished_pipeline(&mut self, block_hash: H256) -> Result { let synced_to_finalized = match self.blockchain.block_number(block_hash)? { Some(number) => { // Attempt to restore the tree. - self.blockchain.restore_canonical_hashes(number)?; + self.blockchain.restore_canonical_hashes_and_finalize(number)?; true } None => false, @@ -1078,6 +1100,14 @@ where Ok(synced_to_finalized) } + /// Attempt to restore the tree. + /// + /// This is invoked after a pruner run to update the tree with the most recent canonical + /// hashes. + fn update_tree_on_finished_pruner(&mut self) -> Result<(), Error> { + self.blockchain.restore_canonical_hashes() + } + /// Invoked if we successfully downloaded a new block from the network. /// /// This will attempt to insert the block into the tree. @@ -1226,9 +1256,7 @@ where // it's part of the canonical chain: if it's the safe or the finalized block if matches!( err, - reth_interfaces::Error::Execution( - BlockExecutionError::BlockHashNotFoundInChain { .. } - ) + Error::Execution(BlockExecutionError::BlockHashNotFoundInChain { .. }) ) { // if the inserted block is the currently targeted `finalized` or `safe` // block, we will attempt to make them canonical, @@ -1250,9 +1278,9 @@ where /// This returns a result to indicate whether the engine future should resolve (fatal error). fn on_sync_event( &mut self, - ev: EngineSyncEvent, + event: EngineSyncEvent, ) -> Option> { - match ev { + match event { EngineSyncEvent::FetchedFullBlock(block) => { self.on_downloaded_block(block); } @@ -1416,6 +1444,55 @@ where None } + + /// Event handler for events emitted by the [EnginePruneController]. + /// + /// This returns a result to indicate whether the engine future should resolve (fatal error). + fn on_prune_event( + &mut self, + event: EnginePruneEvent, + ) -> Option> { + match event { + EnginePruneEvent::NotReady => {} + EnginePruneEvent::Started(tip_block_number) => { + trace!(target: "consensus::engine", %tip_block_number, "Pruner started"); + self.metrics.pruner_runs.increment(1); + } + EnginePruneEvent::TaskDropped => { + error!(target: "consensus::engine", "Failed to receive spawned pruner"); + return Some(Err(BeaconConsensusEngineError::PrunerChannelClosed)) + } + EnginePruneEvent::Finished { result } => { + trace!(target: "consensus::engine", ?result, "Pruner finished"); + match result { + Ok(_) => { + // Update the state and hashes of the blockchain tree if possible. + match self.update_tree_on_finished_pruner() { + Ok(()) => {} + Err(error) => { + error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); + return Some(Err(error.into())) + } + }; + } + // Any pruner error at this point is fatal. + Err(error) => return Some(Err(error.into())), + }; + } + }; + + None + } + + /// Returns `true` if the prune controller's pruner is idle. + fn is_prune_idle(&self) -> bool { + self.prune.as_ref().map(|prune| prune.is_pruner_idle()).unwrap_or(true) + } + + /// Returns `true` if the prune controller's pruner is active. + fn is_prune_active(&self) -> bool { + !self.is_prune_idle() + } } /// On initialization, the consensus engine will poll the message receiver and return @@ -1446,6 +1523,7 @@ where // SyncController, hence they are polled first, and they're also time sensitive. loop { let mut engine_messages_pending = false; + let mut sync_pending = false; // handle next engine message match this.engine_message_rx.poll_next_unpin(cx) { @@ -1484,10 +1562,28 @@ where } } Poll::Pending => { - if engine_messages_pending { - // both the sync and the engine message receiver are pending - return Poll::Pending + // no more sync events to process + sync_pending = true; + } + } + + // check prune events if pipeline is idle AND (pruning is running and we need to + // prioritize checking its events OR no engine and sync messages are pending and we may + // start pruning) + if this.sync.is_pipeline_idle() && + (this.is_prune_active() || engine_messages_pending & sync_pending) + { + if let Some(ref mut prune) = this.prune { + match prune.poll(cx, this.blockchain.canonical_tip().number) { + Poll::Ready(prune_event) => { + if let Some(res) = this.on_prune_event(prune_event) { + return Poll::Ready(res) + } + } + Poll::Pending => return Poll::Pending, } + } else { + return Poll::Pending } } } @@ -1680,6 +1776,9 @@ mod tests { let shareable_db = ProviderFactory::new(db.clone(), self.chain_spec.clone()); let latest = self.chain_spec.genesis_header().seal_slow(); let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest); + + let pruner = Pruner::new(5, 0); + let (mut engine, handle) = BeaconConsensusEngine::new( NoopFullBlockClient::default(), pipeline, @@ -1691,6 +1790,7 @@ mod tests { payload_builder, None, self.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), + Some(pruner), ) .expect("failed to create consensus engine"); @@ -1767,21 +1867,41 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - // consensus engine is still idle + // consensus engine is still idle because no FCUs were received let _ = env.send_new_payload(SealedBlock::default().into()).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - // consensus engine receives a forkchoice state and triggers the pipeline + // consensus engine is still idle because pruning is running let _ = env .send_forkchoice_updated(ForkchoiceState { head_block_hash: H256::random(), ..Default::default() }) .await; - assert_matches!( - rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) - ); + assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); + + // consensus engine receives a forkchoice state and triggers the pipeline when pruning is + // finished + loop { + match rx.try_recv() { + Ok(result) => { + assert_matches!( + result, + Err(BeaconConsensusEngineError::Pipeline(n)) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) + ); + break + } + Err(TryRecvError::Empty) => { + let _ = env + .send_forkchoice_updated(ForkchoiceState { + head_block_hash: H256::random(), + ..Default::default() + }) + .await; + } + Err(err) => panic!("receive error: {err}"), + } + } } // Test that the consensus engine runs the pipeline again if the tree cannot be restored. diff --git a/crates/consensus/beacon/src/engine/prune.rs b/crates/consensus/beacon/src/engine/prune.rs new file mode 100644 index 000000000000..855456f3543b --- /dev/null +++ b/crates/consensus/beacon/src/engine/prune.rs @@ -0,0 +1,146 @@ +//! Prune management for the engine implementation. + +use futures::FutureExt; +use reth_primitives::BlockNumber; +use reth_prune::{Pruner, PrunerError, PrunerWithResult}; +use reth_tasks::TaskSpawner; +use std::task::{ready, Context, Poll}; +use tokio::sync::oneshot; + +/// Manages pruning under the control of the engine. +/// +/// This type controls the [Pruner]. +pub(crate) struct EnginePruneController { + /// The current state of the pruner. + pruner_state: PrunerState, + /// The type that can spawn the pruner task. + pruner_task_spawner: Box, +} + +impl EnginePruneController { + /// Create a new instance + pub(crate) fn new(pruner: Pruner, pruner_task_spawner: Box) -> Self { + Self { pruner_state: PrunerState::Idle(Some(pruner)), pruner_task_spawner } + } + + /// Returns `true` if the pruner is idle. + pub(crate) fn is_pruner_idle(&self) -> bool { + self.pruner_state.is_idle() + } + + /// Advances the pruner state. + /// + /// This checks for the result in the channel, or returns pending if the pruner is idle. + fn poll_pruner(&mut self, cx: &mut Context<'_>) -> Poll { + let res = match self.pruner_state { + PrunerState::Idle(_) => return Poll::Pending, + PrunerState::Running(ref mut fut) => { + ready!(fut.poll_unpin(cx)) + } + }; + let ev = match res { + Ok((pruner, result)) => { + self.pruner_state = PrunerState::Idle(Some(pruner)); + EnginePruneEvent::Finished { result } + } + Err(_) => { + // failed to receive the pruner + EnginePruneEvent::TaskDropped + } + }; + Poll::Ready(ev) + } + + /// This will try to spawn the pruner if it is idle: + /// 1. Check if pruning is needed through [Pruner::is_pruning_needed]. + /// 2a. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a + /// separate task. Set pruner state to [PrunerState::Running]. + /// 2b. If pruning is not needed, set pruner state back to [PrunerState::Idle]. + /// + /// If pruner is already running, do nothing. + fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { + match &mut self.pruner_state { + PrunerState::Idle(pruner) => { + let mut pruner = pruner.take()?; + + // Check tip for pruning + if pruner.is_pruning_needed(tip_block_number) { + let (tx, rx) = oneshot::channel(); + self.pruner_task_spawner.spawn_critical_blocking( + "pruner task", + Box::pin(async move { + let result = pruner.run(tip_block_number); + let _ = tx.send((pruner, result)); + }), + ); + self.pruner_state = PrunerState::Running(rx); + + Some(EnginePruneEvent::Started(tip_block_number)) + } else { + self.pruner_state = PrunerState::Idle(Some(pruner)); + Some(EnginePruneEvent::NotReady) + } + } + PrunerState::Running(_) => None, + } + } + + /// Advances the prune process with the tip block number. + pub(crate) fn poll( + &mut self, + cx: &mut Context<'_>, + tip_block_number: BlockNumber, + ) -> Poll { + // Try to spawn a pruner + match self.try_spawn_pruner(tip_block_number) { + Some(EnginePruneEvent::NotReady) => return Poll::Pending, + Some(event) => return Poll::Ready(event), + None => (), + } + + // Poll pruner and check its status + self.poll_pruner(cx) + } +} + +/// The event type emitted by the [EnginePruneController]. +#[derive(Debug)] +pub(crate) enum EnginePruneEvent { + /// Pruner is not ready + NotReady, + /// Pruner started with tip block number + Started(BlockNumber), + /// Pruner finished + /// + /// If this is returned, the pruner is idle. + Finished { + /// Final result of the pruner run. + result: Result<(), PrunerError>, + }, + /// Pruner task was dropped after it was started, unable to receive it because channel + /// closed. This would indicate a panicked pruner task + TaskDropped, +} + +/// The possible pruner states within the sync controller. +/// +/// [PrunerState::Idle] means that the pruner is currently idle. +/// [PrunerState::Running] means that the pruner is currently running. +/// +/// NOTE: The differentiation between these two states is important, because when the pruner is +/// running, it acquires the write lock over the database. This means that we cannot forward to the +/// blockchain tree any messages that would result in database writes, since it would result in a +/// deadlock. +enum PrunerState { + /// Pruner is idle. + Idle(Option), + /// Pruner is running and waiting for a response + Running(oneshot::Receiver), +} + +impl PrunerState { + /// Returns `true` if the state matches idle. + fn is_idle(&self) -> bool { + matches!(self, PrunerState::Idle(_)) + } +} diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index b422583f2dbb..02da63d5aec1 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -166,9 +166,9 @@ where return false } trace!( - target: "consensus::engine", + target: "consensus::engine::sync", ?hash, - "start downloading full block." + "Start downloading full block" ); let request = self.full_block_client.get_full_block(hash); self.inflight_full_block_requests.push(request); @@ -191,10 +191,10 @@ where self.max_block.map(|target| progress >= target).unwrap_or_default(); if has_reached_max_block { trace!( - target: "consensus::engine", + target: "consensus::engine::sync", ?progress, max_block = ?self.max_block, - "Consensus engine reached max block." + "Consensus engine reached max block" ); } has_reached_max_block diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 63f628c49df7..ce844ee0b376 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -62,7 +62,17 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { /// /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using /// [`BlockchainTreeEngine::finalize_block`]). - fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<(), Error>; + fn restore_canonical_hashes_and_finalize( + &self, + last_finalized_block: BlockNumber, + ) -> Result<(), Error>; + + /// Reads the last `N` canonical hashes from the database and updates the block indices of the + /// tree. + /// + /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the + /// `BLOCKHASH` opcode in the EVM. + fn restore_canonical_hashes(&self) -> Result<(), Error>; /// Make a block and its parent chain part of the canonical chain by committing it to the /// database. diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index c6de28ff63d4..4d7311dd06db 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -220,7 +220,7 @@ impl StageCheckpoint { /// Get the underlying [`EntitiesCheckpoint`], if any, to determine the number of entities /// processed, and the number of total entities to process. pub fn entities(&self) -> Option { - let Some(stage_checkpoint) = self.stage_checkpoint else { return None }; + let stage_checkpoint = self.stage_checkpoint?; match stage_checkpoint { StageUnitCheckpoint::Account(AccountHashingCheckpoint { diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml new file mode 100644 index 000000000000..56b0c49a017a --- /dev/null +++ b/crates/prune/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-prune" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = """ +Pruning implementation +""" + +[dependencies] +# reth +reth-primitives = { workspace = true } + +# misc +tracing = { workspace = true } +thiserror = { workspace = true } + diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs new file mode 100644 index 000000000000..96f0a25b7d72 --- /dev/null +++ b/crates/prune/src/error.rs @@ -0,0 +1,4 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum PrunerError {} diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs new file mode 100644 index 000000000000..6d133030fcd7 --- /dev/null +++ b/crates/prune/src/lib.rs @@ -0,0 +1,5 @@ +mod error; +mod pruner; + +pub use error::PrunerError; +pub use pruner::{Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs new file mode 100644 index 000000000000..9f3a60b86cc3 --- /dev/null +++ b/crates/prune/src/pruner.rs @@ -0,0 +1,83 @@ +//! Support for pruning. + +use crate::PrunerError; +use reth_primitives::BlockNumber; +use tracing::debug; + +/// Result of [Pruner::run] execution +pub type PrunerResult = Result<(), PrunerError>; + +/// The pipeline type itself with the result of [Pruner::run] +pub type PrunerWithResult = (Pruner, PrunerResult); + +/// Pruning routine. Main pruning logic happens in [Pruner::run]. +pub struct Pruner { + /// Minimum pruning interval measured in blocks. All prune parts are checked and, if needed, + /// pruned, when the chain advances by the specified number of blocks. + min_block_interval: u64, + /// Maximum prune depth. Used to determine the pruning target for parts that are needed during + /// the reorg, e.g. changesets. + #[allow(dead_code)] + max_prune_depth: u64, + /// Last pruned block number. Used in conjunction with `min_block_interval` to determine + /// when the pruning needs to be initiated. + last_pruned_block_number: Option, +} + +impl Pruner { + /// Creates a new [Pruner]. + pub fn new(min_block_interval: u64, max_prune_depth: u64) -> Self { + Self { min_block_interval, max_prune_depth, last_pruned_block_number: None } + } + + /// Run the pruner + pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { + // Pruning logic + + self.last_pruned_block_number = Some(tip_block_number); + Ok(()) + } + + /// Returns `true` if the pruning is needed at the provided tip block number. + /// This determined by the check against minimum pruning interval and last pruned block number. + pub fn is_pruning_needed(&self, tip_block_number: BlockNumber) -> bool { + if self.last_pruned_block_number.map_or(true, |last_pruned_block_number| { + // Saturating subtraction is needed for the case when the chain was reverted, meaning + // current block number might be less than the previously pruned block number. If + // that's the case, no pruning is needed as outdated data is also reverted. + tip_block_number.saturating_sub(last_pruned_block_number) >= self.min_block_interval + }) { + debug!( + target: "pruner", + last_pruned_block_number = ?self.last_pruned_block_number, + %tip_block_number, + "Minimum pruning interval reached" + ); + true + } else { + false + } + } +} + +#[cfg(test)] +mod tests { + use crate::Pruner; + + #[test] + fn pruner_is_pruning_needed() { + let pruner = Pruner::new(5, 0); + + // No last pruned block number was set before + let first_block_number = 1; + assert!(pruner.is_pruning_needed(first_block_number)); + + // Delta is not less than min block interval + let second_block_number = first_block_number + pruner.min_block_interval; + assert!(pruner.is_pruning_needed(second_block_number)); + + // Delta is less than min block interval + let third_block_number = second_block_number; + assert!(pruner.is_pruning_needed(third_block_number)); + } +} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 81c123323fa4..a7c3ecbe4fe4 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -594,8 +594,15 @@ where self.tree.finalize_block(finalized_block) } - fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<()> { - self.tree.restore_canonical_hashes(last_finalized_block) + fn restore_canonical_hashes_and_finalize( + &self, + last_finalized_block: BlockNumber, + ) -> Result<()> { + self.tree.restore_canonical_hashes_and_finalize(last_finalized_block) + } + + fn restore_canonical_hashes(&self) -> Result<()> { + self.tree.restore_canonical_hashes() } fn make_canonical(&self, block_hash: &BlockHash) -> Result { From 6799fc3600601d4aadd73ea808734d8bd7f11cda Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 12 Jul 2023 10:03:40 -0400 Subject: [PATCH 145/722] fix: use engine responses to progress autoseal mining task (#3727) Co-authored-by: Matthias Seitz --- crates/consensus/auto-seal/src/task.rs | 63 ++++++++++--------- crates/consensus/beacon/src/engine/message.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 1 + 3 files changed, 37 insertions(+), 29 deletions(-) diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 06f637d38905..5217661c4cd6 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -1,12 +1,10 @@ use crate::{mode::MiningMode, Storage}; -use futures_util::{future::BoxFuture, FutureExt, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; +use futures_util::{future::BoxFuture, FutureExt}; +use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, - proofs, - stage::StageId, - Block, BlockBody, ChainSpec, Header, IntoRecoveredTransaction, ReceiptWithBloom, + proofs, Block, BlockBody, ChainSpec, Header, IntoRecoveredTransaction, ReceiptWithBloom, SealedBlockWithSenders, EMPTY_OMMER_ROOT, U256, }; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; @@ -26,7 +24,7 @@ use std::{ }; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{debug, trace, warn}; +use tracing::{debug, error, trace, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage pub struct MiningTask { @@ -117,7 +115,7 @@ where let client = this.client.clone(); let chain_spec = Arc::clone(&this.chain_spec); let pool = this.pool.clone(); - let mut events = this.pipe_line_events.take(); + let events = this.pipe_line_events.take(); let canon_state_notification = this.canon_state_notification.clone(); // Create the mining future that creates a block, notifies the engine that drives @@ -226,29 +224,38 @@ where }; drop(storage); - // send the new update to the engine, this will trigger the pipeline to - // download the block, execute it and store it in the database. - let (tx, _rx) = oneshot::channel(); - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs: None, - tx, - }); - debug!(target: "consensus::auto", ?state, "sent fork choice update"); - - // wait for the pipeline to finish - if let Some(events) = events.as_mut() { - debug!(target: "consensus::auto", "waiting for finish stage event..."); - // wait for the finish stage to - loop { - if let Some(PipelineEvent::Running { stage_id, .. }) = - events.next().await - { - if stage_id == StageId::Finish { - debug!(target: "consensus::auto", "received finish stage event"); - break + // TODO: make this a future + // await the fcu call rx for SYNCING, then wait for a VALID response + loop { + // send the new update to the engine, this will trigger the engine + // to download and execute the block we just inserted + let (tx, rx) = oneshot::channel(); + let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs: None, + tx, + }); + debug!(target: "consensus::auto", ?state, "Sent fork choice update"); + + match rx.await.unwrap() { + Ok(fcu_response) => { + match fcu_response.forkchoice_status() { + ForkchoiceStatus::Valid => break, + ForkchoiceStatus::Invalid => { + error!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned invalid response"); + return None + } + ForkchoiceStatus::Syncing => { + debug!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); + // wait for the next fork choice update + continue + } } } + Err(err) => { + error!(target: "consensus::auto", ?err, "Autoseal fork choice update failed"); + return None + } } } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 7223cb621f7d..2e5e542aab17 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -40,7 +40,7 @@ impl OnForkChoiceUpdated { } /// Returns the determined status of the received ForkchoiceState. - pub(crate) fn forkchoice_status(&self) -> ForkchoiceStatus { + pub fn forkchoice_status(&self) -> ForkchoiceStatus { self.forkchoice_status } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 9403597278f1..f1f38cb0fb8a 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -67,6 +67,7 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; +pub use forkchoice::ForkchoiceStatus; mod metrics; pub(crate) mod prune; pub(crate) mod sync; From 99240906a844ae7a8b6b6032fc3b0f64dce22f25 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 12 Jul 2023 10:12:33 -0400 Subject: [PATCH 146/722] fix: perform forkchoice update consistency checks (#3730) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/shareable.rs | 5 + crates/consensus/beacon/src/engine/mod.rs | 112 ++++++++++++++++++- crates/interfaces/src/blockchain_tree/mod.rs | 3 + crates/storage/provider/src/providers/mod.rs | 4 + 4 files changed, 119 insertions(+), 5 deletions(-) diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index d99901dc8e08..ab191385ec9a 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -158,6 +158,11 @@ impl BlockchainTreeViewer self.tree.read().block_indices().canonical_tip() } + fn is_canonical(&self, hash: BlockHash) -> Result { + trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); + self.tree.read().is_block_hash_canonical(&hash) + } + fn pending_blocks(&self) -> (BlockNumber, Vec) { trace!(target: "blockchain_tree", "Returning all pending blocks"); self.tree.read().block_indices().pending_blocks() diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f1f38cb0fb8a..76a4abaa9123 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -631,12 +631,20 @@ where debug!(target: "consensus::engine", hash=?state.head_block_hash, number=outcome.header().number, "canonicalized new head"); // new VALID update that moved the canonical chain forward - let _ = self.update_canon_chain(outcome.header().clone(), &state); + let _ = self.update_head(outcome.header().clone()); } else { debug!(target: "consensus::engine", fcu_head_num=?outcome.header().number, current_head_num=?self.blockchain.canonical_tip().number, "Ignoring beacon update to old head"); } if let Some(attrs) = attrs { + // if we return early then we wouldn't perform these consistency checks, so we + // need to do them here, and should do them before we process any payload + // attributes + if let Some(invalid_fcu_response) = self.ensure_consistent_state(state)? { + trace!(target: "consensus::engine", ?state, head=?state.head_block_hash, "Forkchoice state is inconsistent, returning invalid response"); + return Ok(invalid_fcu_response) + } + // the CL requested to build a new payload on top of this new VALID head let payload_response = self.process_payload_attributes( attrs, @@ -662,22 +670,119 @@ where } }; + if let Some(invalid_fcu_response) = + self.ensure_consistent_state_with_status(state, &status)? + { + trace!(target: "consensus::engine", ?status, ?state, "Forkchoice state is inconsistent, returning invalid response"); + return Ok(invalid_fcu_response) + } + trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); Ok(OnForkChoiceUpdated::valid(status)) } + /// Ensures that the given forkchoice state is consistent, assuming the head block has been + /// made canonical. This takes a status as input, and will only perform consistency checks if + /// the input status is VALID. + /// + /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will + /// return an instance of [OnForkChoiceUpdated] that is INVALID. + /// + /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are + /// consistent with the head block. + fn ensure_consistent_state_with_status( + &mut self, + state: ForkchoiceState, + status: &PayloadStatus, + ) -> Result, reth_interfaces::Error> { + // We only perform consistency checks if the status is VALID because if the status is + // INVALID, we want to return the correct _type_ of error to the CL so we can properly + // describe the reason it is invalid. For example, it's possible that the status is invalid + // because the safe block has an invalid state root. In that case, we want to preserve the + // correct `latestValidHash`, instead of returning a generic "invalid state" error that + // does not contain a `latestValidHash`. + // + // We also should not perform these checks if the status is SYNCING, because in that case + // we likely do not have the finalized or safe blocks, and would return an incorrect + // INVALID status instead. + if status.is_valid() { + return self.ensure_consistent_state(state) + } + + Ok(None) + } + + /// Ensures that the given forkchoice state is consistent, assuming the head block has been + /// made canonical. + /// + /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will + /// return an instance of [OnForkChoiceUpdated] that is INVALID. + /// + /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are + /// consistent with the head block. + fn ensure_consistent_state( + &mut self, + state: ForkchoiceState, + ) -> Result, reth_interfaces::Error> { + // Ensure that the finalized block, if not zero, is known and in the canonical chain + // after the head block is canonicalized. + // + // This ensures that the finalized block is consistent with the head block, i.e. the + // finalized block is an ancestor of the head block. + if !state.finalized_block_hash.is_zero() && + !self.blockchain.is_canonical(state.finalized_block_hash)? + { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // Finalized block is consistent, so update it in the canon chain tracker. + self.update_finalized_block(state.finalized_block_hash)?; + + // Also ensure that the safe block, if not zero, is known and in the canonical chain + // after the head block is canonicalized. + // + // This ensures that the safe block is consistent with the head block, i.e. the safe + // block is an ancestor of the head block. + if !state.safe_block_hash.is_zero() && + !self.blockchain.is_canonical(state.safe_block_hash)? + { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // Safe block is consistent, so update it in the canon chain tracker. + self.update_safe_block(state.safe_block_hash)?; + + Ok(None) + } + /// Sets the state of the canon chain tracker based to the given head. /// /// This expects the given head to be the new canonical head. /// /// Additionally, updates the head used for p2p handshakes. /// - /// This should be called before issuing a VALID forkchoice update. + /// This also updates the tracked safe and finalized blocks, and should be called before + /// returning a VALID forkchoice update response fn update_canon_chain( &self, head: SealedHeader, update: &ForkchoiceState, ) -> Result<(), Error> { + self.update_head(head)?; + self.update_finalized_block(update.finalized_block_hash)?; + self.update_safe_block(update.safe_block_hash)?; + + Ok(()) + } + + /// Updates the state of the canon chain tracker based on the given head. + /// + /// This expects the given head to be the new canonical head. + /// Additionally, updates the head used for p2p handshakes. + /// + /// This should be called before returning a VALID forkchoice update response + #[inline] + fn update_head(&self, head: SealedHeader) -> Result<(), reth_interfaces::Error> { let mut head_block = Head { number: head.number, hash: head.hash, @@ -690,9 +795,6 @@ where // we update the the tracked header first self.blockchain.set_canonical_head(head); - self.update_finalized_block(update.finalized_block_hash)?; - self.update_safe_block(update.safe_block_hash)?; - head_block.total_difficulty = self.blockchain.header_td_by_number(head_block.number)?.ok_or_else(|| { Error::Provider(ProviderError::TotalDifficultyNotFound { diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index ce844ee0b376..8025c7901129 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -217,6 +217,9 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Note: this could be the given `parent_hash` if it's already canonical. fn find_canonical_ancestor(&self, parent_hash: BlockHash) -> Option; + /// Return whether or not the block is known and in the canonical chain. + fn is_canonical(&self, hash: BlockHash) -> Result; + /// Given the hash of a block, this checks the buffered blocks for the lowest ancestor in the /// buffer. /// diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index a7c3ecbe4fe4..083f15f98874 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -655,6 +655,10 @@ where self.tree.canonical_tip() } + fn is_canonical(&self, hash: BlockHash) -> std::result::Result { + self.tree.is_canonical(hash) + } + fn pending_blocks(&self) -> (BlockNumber, Vec) { self.tree.pending_blocks() } From 637506f17fdb98484bd20317d0978468beb7fcf5 Mon Sep 17 00:00:00 2001 From: Sabnock <24715302+Sabnock01@users.noreply.github.com> Date: Wed, 12 Jul 2023 12:39:08 -0500 Subject: [PATCH 147/722] fix(consensus): call `finalize_block` when finalized block changes. (#3731) --- crates/consensus/beacon/src/engine/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 76a4abaa9123..63d0a014d042 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -846,6 +846,7 @@ where .ok_or_else(|| { Error::Provider(ProviderError::UnknownBlockHash(finalized_block_hash)) })?; + self.blockchain.finalize_block(finalized.number); self.blockchain.set_finalized(finalized.header.seal(finalized_block_hash)); } Ok(()) From f0cf93e0f9f8ef675d2d5cd0e892c4d6fea2112b Mon Sep 17 00:00:00 2001 From: N Date: Wed, 12 Jul 2023 14:35:00 -0400 Subject: [PATCH 148/722] feat: complete vm and statediff tracers (#3529) Co-authored-by: N Co-authored-by: Matthias Seitz --- .../src/tracing/builder/mod.rs | 3 + .../src/tracing/builder/parity.rs | 173 ++++++++++++++++-- .../src/tracing/builder/walker.rs | 39 ++++ .../revm/revm-inspectors/src/tracing/mod.rs | 5 + .../revm/revm-inspectors/src/tracing/types.rs | 4 +- 5 files changed, 211 insertions(+), 13 deletions(-) create mode 100644 crates/revm/revm-inspectors/src/tracing/builder/walker.rs diff --git a/crates/revm/revm-inspectors/src/tracing/builder/mod.rs b/crates/revm/revm-inspectors/src/tracing/builder/mod.rs index 677ae88da5e0..e6e58d8c2d92 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/mod.rs @@ -5,3 +5,6 @@ pub mod geth; /// Parity style trace builders for `trace_` namespace pub mod parity; + +/// Walker types used for traversing various callgraphs +mod walker; diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 3798d19f8697..ba73f6cef0b7 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -1,11 +1,16 @@ -use crate::tracing::{types::CallTraceNode, TracingInspectorConfig}; +use super::walker::CallTraceNodeWalkerBF; +use crate::tracing::{ + types::{CallTraceNode, CallTraceStep}, + TracingInspectorConfig, +}; use reth_primitives::{Address, U64}; use reth_rpc_types::{trace::parity::*, TransactionInfo}; use revm::{ db::DatabaseRef, - primitives::{AccountInfo, ExecutionResult, ResultAndState}, + interpreter::opcode, + primitives::{AccountInfo, ExecutionResult, ResultAndState, KECCAK_EMPTY}, }; -use std::collections::HashSet; +use std::collections::{HashSet, VecDeque}; /// A type for creating parity style traces /// @@ -14,6 +19,7 @@ use std::collections::HashSet; pub struct ParityTraceBuilder { /// Recorded trace nodes nodes: Vec, + /// How the traces were recorded _config: TracingInspectorConfig, } @@ -154,7 +160,18 @@ impl ParityTraceBuilder { DB: DatabaseRef, { let ResultAndState { result, state } = res; + + let breadth_first_addresses = if trace_types.contains(&TraceType::VmTrace) { + CallTraceNodeWalkerBF::new(&self.nodes) + .map(|node| node.trace.address) + .collect::>() + } else { + vec![] + }; + let mut trace_res = self.into_trace_results(result, trace_types); + + // check the state diff case if let Some(ref mut state_diff) = trace_res.state_diff { populate_account_balance_nonce_diffs( state_diff, @@ -162,6 +179,12 @@ impl ParityTraceBuilder { state.into_iter().map(|(addr, acc)| (addr, acc.info)), )?; } + + // check the vm trace case + if let Some(ref mut vm_trace) = trace_res.vm_trace { + populate_vm_trace_bytecodes(&db, vm_trace, breadth_first_addresses)?; + } + Ok(trace_res) } @@ -177,11 +200,8 @@ impl ParityTraceBuilder { let with_traces = trace_types.contains(&TraceType::Trace); let with_diff = trace_types.contains(&TraceType::StateDiff); - let vm_trace = if trace_types.contains(&TraceType::VmTrace) { - Some(vm_trace(&self.nodes)) - } else { - None - }; + let vm_trace = + if trace_types.contains(&TraceType::VmTrace) { Some(self.vm_trace()) } else { None }; let mut traces = Vec::with_capacity(if with_traces { self.nodes.len() } else { 0 }); let mut diff = StateDiff::default(); @@ -218,13 +238,142 @@ impl ParityTraceBuilder { pub fn into_transaction_traces(self) -> Vec { self.into_transaction_traces_iter().collect() } + + /// Creates a VM trace by walking over `CallTraceNode`s + /// + /// does not have the code fields filled in + pub fn vm_trace(&self) -> VmTrace { + match self.nodes.get(0) { + Some(current) => self.make_vm_trace(current), + None => VmTrace { code: Default::default(), ops: Vec::new() }, + } + } + + /// returns a VM trace without the code filled in + /// + /// iteratively creaters a VM trace by traversing an arena + fn make_vm_trace(&self, start: &CallTraceNode) -> VmTrace { + let mut child_idx_stack: Vec = Vec::with_capacity(self.nodes.len()); + let mut sub_stack: VecDeque> = VecDeque::with_capacity(self.nodes.len()); + + let mut current = start; + let mut child_idx: usize = 0; + + // finds the deepest nested calls of each call frame and fills them up bottom to top + let instructions = loop { + match current.children.get(child_idx) { + Some(child) => { + child_idx_stack.push(child_idx + 1); + + child_idx = 0; + current = self.nodes.get(*child).expect("there should be a child"); + } + None => { + let mut instructions: Vec = + Vec::with_capacity(current.trace.steps.len()); + + for step in ¤t.trace.steps { + let maybe_sub = match step.op.u8() { + opcode::CALL | + opcode::CALLCODE | + opcode::DELEGATECALL | + opcode::STATICCALL | + opcode::CREATE | + opcode::CREATE2 => { + sub_stack.pop_front().expect("there should be a sub trace") + } + _ => None, + }; + + instructions.push(Self::make_instruction(step, maybe_sub)); + } + + match current.parent { + Some(parent) => { + sub_stack.push_back(Some(VmTrace { + code: Default::default(), + ops: instructions, + })); + + child_idx = child_idx_stack.pop().expect("there should be a child idx"); + + current = self.nodes.get(parent).expect("there should be a parent"); + } + None => break instructions, + } + } + } + }; + + VmTrace { code: Default::default(), ops: instructions } + } + + /// Creates a VM instruction from a [CallTraceStep] and a [VmTrace] for the subcall if there is + /// one + fn make_instruction(step: &CallTraceStep, maybe_sub: Option) -> VmInstruction { + let maybe_storage = step.storage_change.map(|storage_change| StorageDelta { + key: storage_change.key, + val: storage_change.value, + }); + + let maybe_memory = match step.memory.len() { + 0 => None, + _ => { + Some(MemoryDelta { off: step.memory_size, data: step.memory.data().clone().into() }) + } + }; + + let maybe_execution = Some(VmExecutedOperation { + used: step.gas_cost, + push: step.new_stack.map(|new_stack| new_stack.into()), + mem: maybe_memory, + store: maybe_storage, + }); + + VmInstruction { + pc: step.pc, + cost: 0, // TODO: use op gas cost + ex: maybe_execution, + sub: maybe_sub, + } + } } -/// Construct the vmtrace for the entire callgraph -fn vm_trace(nodes: &[CallTraceNode]) -> VmTrace { - // TODO: populate vm trace +/// addresses are presorted via breadth first walk thru [CallTraceNode]s, this can be done by a +/// walker in [crate::tracing::builder::walker] +/// +/// iteratively fill the [VmTrace] code fields +pub(crate) fn populate_vm_trace_bytecodes( + db: &DB, + trace: &mut VmTrace, + breadth_first_addresses: I, +) -> Result<(), DB::Error> +where + DB: DatabaseRef, + I: IntoIterator, +{ + let mut stack: VecDeque<&mut VmTrace> = VecDeque::new(); + stack.push_back(trace); + + let mut addrs = breadth_first_addresses.into_iter(); + + while let Some(curr_ref) = stack.pop_front() { + for op in curr_ref.ops.iter_mut() { + if let Some(sub) = op.sub.as_mut() { + stack.push_back(sub); + } + } + + let addr = addrs.next().expect("there should be an address"); - VmTrace { code: nodes[0].trace.data.clone().into(), ops: vec![] } + let db_acc = db.basic(addr)?.unwrap_or_default(); + + let code_hash = if db_acc.code_hash != KECCAK_EMPTY { db_acc.code_hash } else { continue }; + + curr_ref.code = db.code_by_hash(code_hash)?.bytecode.into(); + } + + Ok(()) } /// Loops over all state accounts in the accounts diff that contains all accounts that are included diff --git a/crates/revm/revm-inspectors/src/tracing/builder/walker.rs b/crates/revm/revm-inspectors/src/tracing/builder/walker.rs new file mode 100644 index 000000000000..4d88a2af4684 --- /dev/null +++ b/crates/revm/revm-inspectors/src/tracing/builder/walker.rs @@ -0,0 +1,39 @@ +use crate::tracing::types::CallTraceNode; +use std::collections::VecDeque; + +/// Traverses Reths internal tracing structure breadth-first +/// +/// This is a lazy iterator +pub(crate) struct CallTraceNodeWalkerBF<'trace> { + /// the entire arena + nodes: &'trace Vec, + + /// holds indexes of nodes to visit as we traverse + queue: VecDeque, +} + +impl<'trace> CallTraceNodeWalkerBF<'trace> { + pub(crate) fn new(nodes: &'trace Vec) -> Self { + let mut queue = VecDeque::with_capacity(nodes.len()); + queue.push_back(0); + + Self { nodes, queue } + } +} + +impl<'trace> Iterator for CallTraceNodeWalkerBF<'trace> { + type Item = &'trace CallTraceNode; + + fn next(&mut self) -> Option { + match self.queue.pop_front() { + Some(idx) => { + let curr = self.nodes.get(idx).expect("there should be a node"); + + self.queue.extend(curr.children.iter()); + + Some(curr) + } + None => None, + } + } +} diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index bf3e746c90f9..53a26f765a3c 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -265,6 +265,7 @@ impl TracingInspector { op, contract: interp.contract.address, stack, + new_stack: None, memory, memory_size: interp.memory.len(), gas_remaining: self.gas_inspector.gas_remaining(), @@ -290,6 +291,10 @@ impl TracingInspector { self.step_stack.pop().expect("can't fill step without starting a step first"); let step = &mut self.traces.arena[trace_idx].trace.steps[step_idx]; + if interp.stack.len() > step.stack.len() { + step.new_stack = interp.stack.data().last().copied(); + } + if self.config.record_memory_snapshots { // resize memory so opcodes that allocated memory is correctly displayed if interp.memory.len() > step.memory.len() { diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index b66bd67261bc..548ae72b8789 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -463,11 +463,13 @@ pub(crate) struct CallTraceStep { pub(crate) contract: Address, /// Stack before step execution pub(crate) stack: Stack, + /// The new stack item placed by this step if any + pub(crate) new_stack: Option, /// All allocated memory in a step /// /// This will be empty if memory capture is disabled pub(crate) memory: Memory, - /// Size of memory + /// Size of memory at the beginning of the step pub(crate) memory_size: usize, /// Remaining gas before step execution pub(crate) gas_remaining: u64, From bebc2b433ff7200422014e68bf008ea4b5ade32b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 12 Jul 2023 20:54:50 +0200 Subject: [PATCH 149/722] fix: serialize selfdestruct as suicide (#3736) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 64 ++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 3c4b0bc57c7b..1f9e73f5fed6 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -99,11 +99,38 @@ impl DerefMut for StateDiff { pub enum Action { Call(CallAction), Create(CreateAction), + /// Parity style traces never renamed suicide to selfdestruct: + /// + /// For compatibility reasons, this is serialized as `suicide`: + #[serde(rename = "suicide", alias = "selfdestruct")] Selfdestruct(SelfdestructAction), Reward(RewardAction), } +impl Action { + /// Returns true if this is a call action + pub fn is_call(&self) -> bool { + matches!(self, Action::Call(_)) + } + + /// Returns true if this is a create action + pub fn is_create(&self) -> bool { + matches!(self, Action::Call(_)) + } + + /// Returns true if this is a selfdestruct action + pub fn is_selfdestruct(&self) -> bool { + matches!(self, Action::Selfdestruct(_)) + } + /// Returns true if this is a reward action + pub fn is_reward(&self) -> bool { + matches!(self, Action::Reward(_)) + } +} + /// An external action type. +/// +/// Used as enum identifier for [Action] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum ActionType { @@ -112,6 +139,7 @@ pub enum ActionType { /// Contract creation. Create, /// Contract suicide/selfdestruct. + #[serde(rename = "suicide", alias = "selfdestruct")] Selfdestruct, /// A block reward. Reward, @@ -318,8 +346,6 @@ mod tests { "to": "0x160f5f00288e9e1cc8655b327e081566e580a71d", "value": "0x244b" }, - "blockHash": "0xbca9ee244882bd00a19737a66f24002a4562a949c4d5ebd03c32e04111cff536", - "blockNumber": 17600209, "error": "Reverted", "result": { "gasUsed": "0x9daf", @@ -327,11 +353,41 @@ mod tests { }, "subtraces": 3, "traceAddress": [], - "transactionHash": "0x0e48a8d4419efaa2d3a9b8f625a1c559a4179fd19ddd10c02842965f3a7e7b63", - "transactionPosition": 0, "type": "call" }"#; let val = serde_json::from_str::(s).unwrap(); serde_json::to_value(val).unwrap(); } + + #[test] + fn test_selfdestruct_suicide() { + let input = r#"{ + "action": { + "address": "0x66e29f0b6b1b07071f2fde4345d512386cb66f5f", + "refundAddress": "0x66e29f0b6b1b07071f2fde4345d512386cb66f5f", + "balance": "0x244b" + }, + "error": "Reverted", + "result": { + "gasUsed": "0x9daf", + "output": "0x000000000000000000000000000000000000000000000000011c37937e080000" + }, + "subtraces": 3, + "traceAddress": [], + "type": "suicide" + }"#; + let val = serde_json::from_str::(input).unwrap(); + assert!(val.action.is_selfdestruct()); + + let json = serde_json::to_value(val.clone()).unwrap(); + let expect = serde_json::from_str::(input).unwrap(); + similar_asserts::assert_eq!(json, expect); + let s = serde_json::to_string(&val).unwrap(); + let json = serde_json::from_str::(&s).unwrap(); + similar_asserts::assert_eq!(json, expect); + + let input = input.replace("suicide", "selfdestruct"); + let val = serde_json::from_str::(&input).unwrap(); + assert!(val.action.is_selfdestruct()); + } } From 9e330f111dc8d70ccb4764d8099d0ac371bbec61 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 12 Jul 2023 22:26:51 +0100 Subject: [PATCH 150/722] chore(storage): transactions -> receipts in `receipts_by_block` (#3744) --- .../storage/provider/src/providers/database/provider.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 597909db4ce1..16f25d9792f7 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1090,12 +1090,12 @@ impl<'this, TX: DbTx<'this>> ReceiptProvider for DatabaseProvider<'this, TX> { return if tx_range.is_empty() { Ok(Some(Vec::new())) } else { - let mut tx_cursor = self.tx.cursor_read::()?; - let transactions = tx_cursor + let mut receipts_cursor = self.tx.cursor_read::()?; + let receipts = receipts_cursor .walk_range(tx_range)? - .map(|result| result.map(|(_, tx)| tx)) + .map(|result| result.map(|(_, receipt)| receipt)) .collect::, _>>()?; - Ok(Some(transactions)) + Ok(Some(receipts)) } } } From f6646aa4520df61ed03c4982a8d33e7b5dddce8a Mon Sep 17 00:00:00 2001 From: Max Wolff Date: Wed, 12 Jul 2023 15:51:43 -0700 Subject: [PATCH 151/722] #3667 Add Dial Success Metric (#3729) --- crates/net/network/src/metrics.rs | 8 ++++++++ crates/net/network/src/session/mod.rs | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index d7969da80af1..085b1f093b2d 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -45,6 +45,14 @@ pub struct NetworkMetrics { pub(crate) total_dropped_eth_requests_at_full_capacity: Counter, } +/// Metrics for SessionManager +#[derive(Metrics)] +#[metrics(scope = "network")] +pub struct SesssionManagerMetrics { + /// Number of dials that resulted in a peer being added to the peerset + pub(crate) total_dial_successes: Counter, +} + /// Metrics for the TransactionsManager #[derive(Metrics)] #[metrics(scope = "network")] diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 7dda07cdd9c9..b0d628d48c42 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,6 +1,7 @@ //! Support for handling peer sessions. use crate::{ message::PeerMessage, + metrics::SesssionManagerMetrics, session::{ active::ActiveSession, config::SessionCounter, @@ -101,6 +102,8 @@ pub(crate) struct SessionManager { active_session_rx: ReceiverStream, /// Used to measure inbound & outbound bandwidth across all managed streams bandwidth_meter: BandwidthMeter, + /// Metrics for the session manager. + metrics: SesssionManagerMetrics, } // === impl SessionManager === @@ -137,6 +140,7 @@ impl SessionManager { active_session_tx: MeteredSender::new(active_session_tx, "network_active_session"), active_session_rx: ReceiverStream::new(active_session_rx), bandwidth_meter, + metrics: Default::default(), } } @@ -473,6 +477,10 @@ impl SessionManager { self.active_sessions.insert(peer_id, handle); self.counter.inc_active(&direction); + if direction.is_outgoing() { + self.metrics.total_dial_successes.increment(1); + } + Poll::Ready(SessionEvent::SessionEstablished { peer_id, remote_addr, @@ -695,6 +703,11 @@ impl Direction { pub(crate) fn is_incoming(&self) -> bool { matches!(self, Direction::Incoming) } + + /// Returns `true` if this an outgoing connection. + pub(crate) fn is_outgoing(&self) -> bool { + matches!(self, Direction::Outgoing(_)) + } } impl std::fmt::Display for Direction { From 8bfc3d093ed27ff00b3f03ebd898da1b782e0321 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 13 Jul 2023 15:56:41 +0200 Subject: [PATCH 152/722] test: ignore another flaky geth test (#3757) --- crates/net/network/tests/it/connect.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 5669a5acdc90..b6156ea71884 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -409,6 +409,7 @@ async fn test_incoming_connect_with_single_geth() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] #[cfg_attr(not(feature = "geth-tests"), ignore)] +#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_outgoing_connect_with_single_geth() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { @@ -455,6 +456,7 @@ async fn test_outgoing_connect_with_single_geth() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] #[cfg_attr(not(feature = "geth-tests"), ignore)] +#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_geth_disconnect() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { From f3e13db71e764b74ac59ae678478d4c1b908c691 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 13 Jul 2023 17:09:39 +0200 Subject: [PATCH 153/722] chore: reorder call action fields (#3758) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 1f9e73f5fed6..7fbea1bbac87 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -168,16 +168,16 @@ pub enum CallType { pub struct CallAction { /// Address of the sending account. pub from: Address, - /// Address of the destination/target account. - pub to: Address, - /// Value transferred to the destination account. - pub value: U256, + /// The type of the call. + pub call_type: CallType, /// The gas available for executing the call. pub gas: U64, /// The input data provided to the call. pub input: Bytes, - /// The type of the call. - pub call_type: CallType, + /// Address of the destination/target account. + pub to: Address, + /// Value transferred to the destination account. + pub value: U256, } /// Represents a _create_ action, either a `CREATE` operation or a CREATE transaction. From 4c7f980c774c77db48abdb119028d6a94cf24952 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 13 Jul 2023 17:26:02 +0200 Subject: [PATCH 154/722] chore: add network example (#3753) --- Cargo.lock | 1 + Cargo.toml | 1 + crates/net/network/src/config.rs | 21 +++++++++++++++- crates/net/network/src/state.rs | 3 +++ examples/Cargo.toml | 31 ++++++++++++++---------- examples/network.rs | 41 ++++++++++++++++++++++++++++++++ 6 files changed, 84 insertions(+), 14 deletions(-) create mode 100644 examples/network.rs diff --git a/Cargo.lock b/Cargo.lock index 48cec8dd50fd..0ee5b47fcf76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2162,6 +2162,7 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-db", + "reth-network", "reth-network-api", "reth-primitives", "reth-provider", diff --git a/Cargo.toml b/Cargo.toml index 94e6ca26fa33..c6b79a2c2164 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ reth-revm = { path = "./crates/revm" } reth-payload-builder = { path = "./crates/payload/builder" } reth-transaction-pool = { path = "./crates/transaction-pool" } reth-tasks = { path = "./crates/tasks" } +reth-network = { path = "./crates/net/network" } reth-network-api = { path = "./crates/net/network-api" } ## eth diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index dbfc570873a5..9da1eed9345a 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -11,7 +11,9 @@ use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT}; use reth_dns_discovery::DnsDiscoveryConfig; use reth_ecies::util::pk2id; use reth_eth_wire::{HelloMessage, Status}; -use reth_primitives::{ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET}; +use reth_primitives::{ + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, +}; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; @@ -31,6 +33,9 @@ pub fn rng_secret_key() -> SecretKey { /// All network related initialization settings. pub struct NetworkConfig { /// The client type that can interact with the chain. + /// + /// This type is used to fetch the block number after we established a session and received the + /// [Status] block hash. pub client: C, /// The node's secret key, from which the node's identity is derived. pub secret_key: SecretKey, @@ -278,6 +283,16 @@ impl NetworkConfigBuilder { self } + /// Convenience function for setting [Self::boot_nodes] to the mainnet boot nodes. + pub fn mainnet_boot_nodes(self) -> Self { + self.boot_nodes(mainnet_nodes()) + } + + /// Convenience function for setting [Self::boot_nodes] to the sepolia boot nodes. + pub fn sepolia_boot_nodes(self) -> Self { + self.boot_nodes(sepolia_nodes()) + } + /// Sets the boot nodes. pub fn boot_nodes(mut self, nodes: impl IntoIterator) -> Self { self.boot_nodes = nodes.into_iter().collect(); @@ -330,6 +345,10 @@ impl NetworkConfigBuilder { /// Consumes the type and creates the actual [`NetworkConfig`] /// for the given client type that can interact with the chain. + /// + /// The given client is to be used for interacting with the chain, for example fetching the + /// corresponding block for a given block hash we receive from a peer in the status message when + /// establishing a connection. pub fn build(self, client: C) -> NetworkConfig { let peer_id = self.get_peer_id(); let Self { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 05f423a73843..104f263f4b71 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -51,6 +51,9 @@ pub struct NetworkState { /// Buffered messages until polled. queued_messages: VecDeque, /// The client type that can interact with the chain. + /// + /// This type is used to fetch the block number after we established a session and received the + /// [Status] block hash. client: C, /// Network discovery. discovery: Discovery, diff --git a/examples/Cargo.toml b/examples/Cargo.toml index c77a2d6879db..6114a5732777 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -6,25 +6,26 @@ edition.workspace = true license.workspace = true [dev-dependencies] -reth-primitives = { workspace = true } +reth-primitives.workspace = true -reth-db = { workspace = true } -reth-provider = { workspace = true } +reth-db.workspace = true +reth-provider.workspace = true -reth-rpc-builder = { workspace = true } -reth-rpc-types = { workspace = true } +reth-rpc-builder.workspace = true +reth-rpc-types.workspace = true -reth-revm = { workspace = true } -reth-blockchain-tree = { workspace = true } -reth-beacon-consensus = { workspace = true } -reth-network-api = { workspace = true } -reth-transaction-pool = { workspace = true } -reth-tasks = { workspace = true } +reth-revm.workspace = true +reth-blockchain-tree.workspace = true +reth-beacon-consensus.workspace = true +reth-network-api.workspace = true +reth-network.workspace = true +reth-transaction-pool.workspace = true +reth-tasks.workspace = true eyre = "0.6.8" -futures = "0.3.0" -tokio = { workspace = true } +futures.workspace = true +tokio.workspace = true [[example]] name = "rpc-db" @@ -33,3 +34,7 @@ path = "rpc-db.rs" [[example]] name = "db-access" path = "db-access.rs" + +[[example]] +name = "network" +path = "network.rs" diff --git a/examples/network.rs b/examples/network.rs new file mode 100644 index 000000000000..09a20f3118e8 --- /dev/null +++ b/examples/network.rs @@ -0,0 +1,41 @@ +//! Example of how to use the network as a standalone component +//! +//! Run with +//! +//! ```not_rust +//! cargo run --example network +//! ``` + +use futures::StreamExt; +use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +use reth_provider::test_utils::NoopProvider; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + // This block provider implementation is used for testing purposes. + let client = NoopProvider::default(); + + // The key that's used for encrypting sessions and to identify our node. + let local_key = rng_secret_key(); + + // Configure the network + let config = + NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + + // create the network instance + let network = NetworkManager::new(config).await?; + + // get a handle to the network to interact with it + let handle = network.handle().clone(); + + // spawn the network + tokio::task::spawn(network); + + // interact with the network + let mut events = handle.event_listener(); + while let Some(event) = events.next().await { + println!("Received event: {:?}", event); + } + + Ok(()) +} From 7edab9748127c79e153fdea7da043aca9d638a0f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 13 Jul 2023 18:32:09 +0200 Subject: [PATCH 155/722] fix: poll logic when pipeline active (#3761) --- crates/consensus/beacon/src/engine/mod.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 63d0a014d042..e42bc3c71709 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1671,12 +1671,13 @@ where } } + // we're pending if both engine messages and sync events are pending (fully drained) + let is_pending = engine_messages_pending && sync_pending; + // check prune events if pipeline is idle AND (pruning is running and we need to // prioritize checking its events OR no engine and sync messages are pending and we may // start pruning) - if this.sync.is_pipeline_idle() && - (this.is_prune_active() || engine_messages_pending & sync_pending) - { + if this.sync.is_pipeline_idle() && (this.is_prune_active() || is_pending) { if let Some(ref mut prune) = this.prune { match prune.poll(cx, this.blockchain.canonical_tip().number) { Poll::Ready(prune_event) => { @@ -1684,12 +1685,16 @@ where return Poll::Ready(res) } } - Poll::Pending => return Poll::Pending, + Poll::Pending => {} } - } else { - return Poll::Pending } } + + if is_pending { + // incoming engine messages and sync events are drained, so we can yield back + // control + return Poll::Pending + } } } } From 99f00eb8c2a475aeb67cdee33263060ad9596c19 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 13 Jul 2023 20:22:01 +0200 Subject: [PATCH 156/722] fix: return null withdrawals (#3762) --- crates/rpc/rpc-api/src/engine.rs | 6 +++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 14 +++++++------ .../rpc/rpc-types/src/eth/engine/payload.rs | 20 +++++++++++-------- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index be4825a5b6f0..756cb5473c7c 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -2,7 +2,7 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, H256, U256, U64}; use reth_rpc_types::{ engine::{ - ExecutionPayload, ExecutionPayloadBodies, ExecutionPayloadEnvelope, ForkchoiceState, + ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelope, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, @@ -64,7 +64,7 @@ pub trait EngineApi { async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> RpcResult; + ) -> RpcResult; /// See also /// @@ -83,7 +83,7 @@ pub trait EngineApi { &self, start: U64, count: U64, - ) -> RpcResult; + ) -> RpcResult; /// See also #[method(name = "exchangeTransitionConfigurationV1")] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index f461f4928dcd..2a0581595f49 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -8,7 +8,7 @@ use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hard use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ - ExecutionPayload, ExecutionPayloadBodies, ExecutionPayloadEnvelope, ForkchoiceUpdated, + ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelope, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_tasks::TaskSpawner; @@ -183,7 +183,7 @@ where &self, start: BlockNumber, count: u64, - ) -> EngineApiResult { + ) -> EngineApiResult { let (tx, rx) = oneshot::channel(); let inner = self.inner.clone(); @@ -223,7 +223,7 @@ where pub fn get_payload_bodies_by_hash( &self, hashes: Vec, - ) -> EngineApiResult { + ) -> EngineApiResult { let len = hashes.len() as u64; if len > MAX_PAYLOAD_BODIES_LIMIT { return Err(EngineApiError::PayloadRequestTooLarge { len }) @@ -414,7 +414,7 @@ where async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); Ok(EngineApi::get_payload_bodies_by_hash(self, block_hashes)?) } @@ -432,11 +432,13 @@ where /// Implementors should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. + /// + /// Note: If a block is pre shanghai, `withdrawals` field will be `null async fn get_payload_bodies_by_range_v1( &self, start: U64, count: U64, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); Ok(EngineApi::get_payload_bodies_by_range(self, start.as_u64(), count.as_u64()).await?) } @@ -482,7 +484,7 @@ mod tests { let provider = Arc::new(MockEthProvider::default()); let payload_store = spawn_test_payload_service(); let (to_engine, engine_rx) = unbounded_channel(); - let task_executor = Box::new(TokioTaskExecutor::default()); + let task_executor = Box::::default(); let api = EngineApi::new( provider.clone(), chain_spec.clone(), diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 5b4da25aab15..5b63e1b8cc5c 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -8,7 +8,7 @@ use reth_rlp::Decodable; use serde::{ser::SerializeMap, Deserialize, Serialize, Serializer}; /// The execution payload body response that allows for `null` values. -pub type ExecutionPayloadBodies = Vec>; +pub type ExecutionPayloadBodiesV1 = Vec>; /// And 8-byte identifier for an execution payload. #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] @@ -220,21 +220,25 @@ impl PayloadError { /// /// See also: #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadBody { +pub struct ExecutionPayloadBodyV1 { + /// Enveloped encoded transactions. pub transactions: Vec, - pub withdrawals: Vec, + /// All withdrawals in the block. + /// + /// Will always be `None` if pre shanghai. + pub withdrawals: Option>, } -impl From for ExecutionPayloadBody { +impl From for ExecutionPayloadBodyV1 { fn from(value: Block) -> Self { let transactions = value.body.into_iter().map(|tx| { let mut out = Vec::new(); tx.encode_enveloped(&mut out); out.into() }); - ExecutionPayloadBody { + ExecutionPayloadBodyV1 { transactions: transactions.collect(), - withdrawals: value.withdrawals.unwrap_or_default(), + withdrawals: value.withdrawals, } } } @@ -456,7 +460,7 @@ mod tests { let mut rng = generators::rng(); for block in random_block_range(&mut rng, 0..=99, H256::default(), 0..2) { let unsealed = block.clone().unseal(); - let payload_body: ExecutionPayloadBody = unsealed.into(); + let payload_body: ExecutionPayloadBodyV1 = unsealed.into(); assert_eq!( Ok(block.body), @@ -467,7 +471,7 @@ mod tests { .collect::, _>>(), ); - assert_eq!(block.withdrawals.unwrap_or_default(), payload_body.withdrawals); + assert_eq!(block.withdrawals, payload_body.withdrawals); } } From ac12cc47315b4c0c8012d30008a881f456d73c36 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 13 Jul 2023 14:46:56 -0400 Subject: [PATCH 157/722] chore: fix typo in RlpDecodableWrapper derive (#3763) --- crates/rlp/rlp-derive/src/de.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rlp/rlp-derive/src/de.rs b/crates/rlp/rlp-derive/src/de.rs index 8ab670cafa7f..aab545fd86d3 100644 --- a/crates/rlp/rlp-derive/src/de.rs +++ b/crates/rlp/rlp-derive/src/de.rs @@ -72,12 +72,12 @@ pub(crate) fn impl_decodable(ast: &syn::DeriveInput) -> Result { } pub(crate) fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> Result { - let body = parse_struct(ast, "RlpEncodableWrapper")?; + let body = parse_struct(ast, "RlpDecodableWrapper")?; assert_eq!( body.fields.iter().count(), 1, - "#[derive(RlpEncodableWrapper)] is only defined for structs with one field." + "#[derive(RlpDecodableWrapper)] is only defined for structs with one field." ); let name = &ast.ident; From 307fc10abb2a89944745ee85da0ff601ae65d2e0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 02:09:26 +0200 Subject: [PATCH 158/722] chore: add some txs helpers (#3767) --- crates/primitives/src/transaction/mod.rs | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 055402c302f1..796baf2986a9 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -780,6 +780,8 @@ impl Decodable for TransactionKind { } /// Signed transaction without its Hash. Used type for inserting into the DB. +/// +/// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. #[derive_arbitrary(compact)] #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default, Serialize, Deserialize)] pub struct TransactionSignedNoHash { @@ -800,6 +802,14 @@ impl TransactionSignedNoHash { keccak256(&buf) } + /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + pub fn recover_signer(&self) -> Option

{ + let signature_hash = self.signature_hash(); + self.signature.recover_signer(signature_hash) + } + /// Converts into a transaction type with its hash: [`TransactionSigned`]. pub fn with_hash(self) -> TransactionSigned { self.into() @@ -938,7 +948,7 @@ impl TransactionSigned { self.signature.recover_signer(signature_hash) } - /// Devour Self, recover signer and return [`TransactionSignedEcRecovered`] + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] /// /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. pub fn into_ecrecovered(self) -> Option { @@ -946,12 +956,23 @@ impl TransactionSigned { Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) } - /// try to recover signer and return [`TransactionSignedEcRecovered`] + /// Tries to recover signer and return [`TransactionSignedEcRecovered`] by cloning the type. pub fn try_ecrecovered(&self) -> Option { let signer = self.recover_signer()?; Some(TransactionSignedEcRecovered { signed_transaction: self.clone(), signer }) } + /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. + /// + /// Returns `Err(Self)` if the transaction's signature is invalid, see also + /// [Self::recover_signer]. + pub fn try_into_ecrecovered(self) -> Result { + match self.recover_signer() { + None => Err(self), + Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), + } + } + /// Returns the enveloped encoded transactions. /// /// See also [TransactionSigned::encode_enveloped] From f78f0302f10e4c087b306daf92aba40540bad881 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 09:59:57 +0200 Subject: [PATCH 159/722] fix(rpc): serialize traces always as vec (#3770) --- .../revm/revm-inspectors/src/tracing/builder/parity.rs | 7 ++++++- crates/rpc/rpc-types/src/eth/trace/parity.rs | 9 +++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index ba73f6cef0b7..cce86c43445c 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -138,7 +138,12 @@ impl ParityTraceBuilder { let (trace, vm_trace, state_diff) = self.into_trace_type_traces(trace_types); - TraceResults { output: output.into(), trace, vm_trace, state_diff } + TraceResults { + output: output.into(), + trace: trace.unwrap_or_default(), + vm_trace, + state_diff, + } } /// Consumes the inspector and returns the trace results according to the configured trace diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 7fbea1bbac87..fcc5e54bbbb8 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -30,12 +30,13 @@ pub enum TraceType { pub struct TraceResults { /// Output of the trace pub output: Bytes, - /// Enabled if [TraceType::Trace] is provided - pub trace: Option>, - /// Enabled if [TraceType::VmTrace] is provided - pub vm_trace: Option, /// Enabled if [TraceType::StateDiff] is provided pub state_diff: Option, + /// Enabled if [TraceType::Trace] is provided, otherwise an empty vec + #[serde(default)] + pub trace: Vec, + /// Enabled if [TraceType::VmTrace] is provided + pub vm_trace: Option, } /// A `FullTrace` with an additional transaction hash From ad4c590b655bb3cceec334e8713ca3415b7cd207 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 10:13:56 +0200 Subject: [PATCH 160/722] fix(rpc): make trace filter req field hex or decimal (#3772) --- crates/primitives/src/serde_helper/num.rs | 27 ++++++++++++++++++++ crates/rpc/rpc-types/src/eth/trace/filter.rs | 25 ++++++++++++++---- 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/serde_helper/num.rs b/crates/primitives/src/serde_helper/num.rs index 312f287124be..820174985138 100644 --- a/crates/primitives/src/serde_helper/num.rs +++ b/crates/primitives/src/serde_helper/num.rs @@ -88,6 +88,33 @@ pub mod u64_hex_or_decimal { U64HexOrNumber::from(*value).serialize(s) } } + +/// serde functions for handling primitive optional `u64` as [U64](crate::U64) +pub mod u64_hex_or_decimal_opt { + use crate::serde_helper::num::U64HexOrNumber; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Deserializes an `u64` accepting a hex quantity string with optional 0x prefix or + /// a number + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + match Option::::deserialize(deserializer)? { + Some(val) => Ok(Some(val.into())), + None => Ok(None), + } + } + + /// Serializes u64 as hex string + pub fn serialize(value: &Option, s: S) -> Result { + match value { + Some(val) => U64HexOrNumber::from(*val).serialize(s), + None => s.serialize_none(), + } + } +} + /// Deserializes the input into an `Option`, using [`from_int_or_hex`] to deserialize the /// inner value. pub fn from_int_or_hex_opt<'de, D>(deserializer: D) -> Result, D::Error> diff --git a/crates/rpc/rpc-types/src/eth/trace/filter.rs b/crates/rpc/rpc-types/src/eth/trace/filter.rs index bde828cab5a5..3121335daeb9 100644 --- a/crates/rpc/rpc-types/src/eth/trace/filter.rs +++ b/crates/rpc/rpc-types/src/eth/trace/filter.rs @@ -1,5 +1,5 @@ //! `trace_filter` types and support -use reth_primitives::{Address, BlockNumber}; +use reth_primitives::{serde_helper::num::u64_hex_or_decimal_opt, Address}; use serde::{Deserialize, Serialize}; /// Trace filter. @@ -8,15 +8,30 @@ use serde::{Deserialize, Serialize}; #[serde(rename_all = "camelCase")] pub struct TraceFilter { /// From block - pub from_block: Option, + #[serde(with = "u64_hex_or_decimal_opt")] + pub from_block: Option, /// To block - pub to_block: Option, + #[serde(with = "u64_hex_or_decimal_opt")] + pub to_block: Option, /// From address pub from_address: Option>, /// To address pub to_address: Option>, /// Output offset - pub after: Option, + pub after: Option, /// Output amount - pub count: Option, + pub count: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_filter() { + let s = r#"{"fromBlock": "0x3","toBlock": "0x5"}"#; + let filter: TraceFilter = serde_json::from_str(s).unwrap(); + assert_eq!(filter.from_block, Some(3)); + assert_eq!(filter.to_block, Some(5)); + } } From e3ac77a2314d33f62272bf477640b8bd6cbcebd1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 11:06:23 +0200 Subject: [PATCH 161/722] perf: only lookup in db in cache layer (#3773) --- crates/rpc/rpc/src/eth/cache/mod.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index f5e758f86b70..8209a77ec303 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -3,7 +3,9 @@ use futures::{future::Either, Stream, StreamExt}; use reth_interfaces::{provider::ProviderError, Result}; use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, H256}; -use reth_provider::{BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{ + BlockReader, BlockSource, CanonStateNotification, EvmEnvProvider, StateProviderFactory, +}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv}; use schnellru::{ByLength, Limiter}; @@ -307,7 +309,10 @@ where let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { - let res = provider.block_by_hash(block_hash); + // Only look in the database to prevent situations where we + // looking up the tree is blocking + let res = provider + .find_block_by_hash(block_hash, BlockSource::Database); let _ = action_tx .send(CacheAction::BlockResult { block_hash, res }); })); @@ -325,7 +330,10 @@ where let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { - let res = provider.block_by_hash(block_hash); + // Only look in the database to prevent situations where we + // looking up the tree is blocking + let res = provider + .find_block_by_hash(block_hash, BlockSource::Database); let _ = action_tx .send(CacheAction::BlockResult { block_hash, res }); })); From 3b16a6b46176f50d6133dd5f3c86a44465af41c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 13:38:24 +0200 Subject: [PATCH 162/722] fix: add missing null check (#3766) --- crates/rpc/rpc-types/src/eth/trace/geth/mod.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs b/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs index 092efa438559..eb9a33acd28c 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs @@ -209,6 +209,9 @@ impl GethDebugTracerConfig { /// Returns the [CallConfig] if it is a call config. pub fn into_call_config(self) -> Result { + if self.0.is_null() { + return Ok(Default::default()) + } self.from_value() } @@ -219,6 +222,9 @@ impl GethDebugTracerConfig { /// Returns the [PreStateConfig] if it is a call config. pub fn into_pre_state_config(self) -> Result { + if self.0.is_null() { + return Ok(Default::default()) + } self.from_value() } } @@ -370,6 +376,18 @@ fn serialize_string_storage_map_opt( mod tests { use super::*; + #[test] + fn test_tracer_config() { + let s = "{\"tracer\": \"callTracer\"}"; + let opts = serde_json::from_str::(s).unwrap(); + assert_eq!( + opts.tracer, + Some(GethDebugTracerType::BuiltInTracer(GethDebugBuiltInTracerType::CallTracer)) + ); + let _call_config = opts.tracer_config.clone().into_call_config().unwrap(); + let _prestate_config = opts.tracer_config.into_pre_state_config().unwrap(); + } + #[test] fn test_memory_capture() { let mut config = GethDefaultTracingOptions::default(); From 715d41dbc76c5b6a21cc9ec8a84d32e895cebbe0 Mon Sep 17 00:00:00 2001 From: Aditya Pandey Date: Fri, 14 Jul 2023 18:14:33 +0530 Subject: [PATCH 163/722] adding row for total db size in db stats command (#3777) Co-authored-by: Matthias Seitz --- bin/reth/src/db/mod.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index ee2e0d12ab53..4670683c43d5 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -104,6 +104,7 @@ impl Command { let mut tables = Tables::ALL.iter().map(|table| table.name()).collect::>(); tables.sort(); + let mut total_size = 0; for table in tables { let table_db = tx.inner.open_db(Some(table)).wrap_err("Could not open db.")?; @@ -123,6 +124,7 @@ impl Command { let num_pages = leaf_pages + branch_pages + overflow_pages; let table_size = page_size * num_pages; + total_size += table_size; let mut row = Row::new(); row.add_cell(Cell::new(table)) .add_cell(Cell::new(stats.entries())) @@ -132,6 +134,24 @@ impl Command { .add_cell(Cell::new(human_bytes(table_size as f64))); stats_table.add_row(row); } + + let max_widths = stats_table.column_max_content_widths(); + + let mut seperator = Row::new(); + for width in max_widths { + seperator.add_cell(Cell::new("-".repeat(width as usize))); + } + stats_table.add_row(seperator); + + let mut row = Row::new(); + row.add_cell(Cell::new("Total DB size")) + .add_cell(Cell::new("")) + .add_cell(Cell::new("")) + .add_cell(Cell::new("")) + .add_cell(Cell::new("")) + .add_cell(Cell::new(human_bytes(total_size as f64))); + stats_table.add_row(row); + Ok::<(), eyre::Report>(()) })??; From d1610f3df9c8649de405ea139df366aa4863e635 Mon Sep 17 00:00:00 2001 From: suneal Date: Fri, 14 Jul 2023 22:23:37 +0800 Subject: [PATCH 164/722] fix: add unknown block error (#3779) Co-authored-by: suneal --- crates/rpc/rpc-types/src/eth/error.rs | 4 ++++ crates/rpc/rpc/src/eth/error.rs | 12 ++++++++++-- crates/storage/provider/src/traits/block_id.rs | 12 +++++++++--- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/error.rs b/crates/rpc/rpc-types/src/eth/error.rs index c20e61f0c409..f580a4dbe8da 100644 --- a/crates/rpc/rpc-types/src/eth/error.rs +++ b/crates/rpc/rpc-types/src/eth/error.rs @@ -13,6 +13,9 @@ pub enum EthRpcErrorCode { /// > If the block is not found, the callee SHOULD raise a JSON-RPC error (the recommended /// > error code is -32001: Resource not found). ResourceNotFound, + /// Thrown when querying for `finalized` or `safe` block before the merge transition is + /// finalized, + UnknownBlock, } impl EthRpcErrorCode { @@ -23,6 +26,7 @@ impl EthRpcErrorCode { EthRpcErrorCode::ExecutionError => 3, EthRpcErrorCode::InvalidInput => -32000, EthRpcErrorCode::ResourceNotFound => -32001, + EthRpcErrorCode::UnknownBlock => -39001, } } } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 1818f4aea87c..023e76740610 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -26,6 +26,10 @@ pub enum EthApiError { PoolError(RpcPoolError), #[error("Unknown block number")] UnknownBlockNumber, + /// Thrown when querying for `finalized` or `safe` block before the merge transition is + /// finalized, + #[error("Unknown block")] + UnknownSafeOrFinalizedBlock, #[error("Unknown block or tx index")] UnknownBlockOrTxIndex, #[error("Invalid block range")] @@ -101,6 +105,9 @@ impl From for ErrorObject<'static> { EthApiError::UnknownBlockNumber | EthApiError::UnknownBlockOrTxIndex => { rpc_error_with_code(EthRpcErrorCode::ResourceNotFound.code(), error.to_string()) } + EthApiError::UnknownSafeOrFinalizedBlock => { + rpc_error_with_code(EthRpcErrorCode::UnknownBlock.code(), error.to_string()) + } EthApiError::Unsupported(msg) => internal_rpc_err(msg), EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), @@ -143,11 +150,12 @@ impl From for EthApiError { ProviderError::HeaderNotFound(_) | ProviderError::BlockHashNotFound(_) | ProviderError::BestBlockNotFound | - ProviderError::FinalizedBlockNotFound | - ProviderError::SafeBlockNotFound | ProviderError::BlockNumberForTransactionIndexNotFound | ProviderError::TotalDifficultyNotFound { .. } | ProviderError::UnknownBlockHash(_) => EthApiError::UnknownBlockNumber, + ProviderError::FinalizedBlockNotFound | ProviderError::SafeBlockNotFound => { + EthApiError::UnknownSafeOrFinalizedBlock + } err => EthApiError::Internal(err.into()), } } diff --git a/crates/storage/provider/src/traits/block_id.rs b/crates/storage/provider/src/traits/block_id.rs index 4fd3ae4a1fd7..296d7e1c1890 100644 --- a/crates/storage/provider/src/traits/block_id.rs +++ b/crates/storage/provider/src/traits/block_id.rs @@ -1,5 +1,5 @@ use super::BlockHashReader; -use reth_interfaces::Result; +use reth_interfaces::{provider::ProviderError, Result}; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, H256}; /// Client trait for getting important block numbers (such as the latest block number), converting @@ -61,8 +61,14 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { .map(|res_opt| res_opt.map(|num_hash| num_hash.number)) } BlockNumberOrTag::Number(num) => num, - BlockNumberOrTag::Finalized => return self.finalized_block_number(), - BlockNumberOrTag::Safe => return self.safe_block_number(), + BlockNumberOrTag::Finalized => match self.finalized_block_number()? { + Some(block_number) => block_number, + None => return Err(ProviderError::FinalizedBlockNotFound.into()), + }, + BlockNumberOrTag::Safe => match self.safe_block_number()? { + Some(block_number) => block_number, + None => return Err(ProviderError::SafeBlockNotFound.into()), + }, }; Ok(Some(num)) } From c0cafc7a4f54f66ec450551f7da0d0561b7ad43a Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 14 Jul 2023 19:35:18 +0100 Subject: [PATCH 165/722] chore: enable jemalloc by default on unix (#3735) --- bin/reth/Cargo.toml | 7 +++++-- bin/reth/src/lib.rs | 2 +- bin/reth/src/main.rs | 2 +- bin/reth/src/prometheus_exporter.rs | 8 ++++---- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index eb81a41931fc..ac7aca802737 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -37,8 +37,6 @@ reth-basic-payload-builder = { path = "../../crates/payload/basic" } reth-discv4 = { path = "../../crates/net/discv4" } reth-metrics = { workspace = true } reth-prune = { path = "../../crates/prune" } -jemallocator = { version = "0.5.0", optional = true } -jemalloc-ctl = { version = "0.5.0", optional = true } # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } @@ -88,7 +86,12 @@ pretty_assertions = "1.3.0" humantime = "2.1.0" const-str = "0.5.6" +[target.'cfg(not(windows))'.dependencies] +jemallocator = { version = "0.5.0", optional = true } +jemalloc-ctl = { version = "0.5.0", optional = true } + [features] +default = ["jemalloc"] jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl"] jemalloc-prof = ["jemalloc", "jemallocator?/profiling"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 46d2c76a16e0..60fb98df12cd 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -40,5 +40,5 @@ pub mod test_vectors; pub mod utils; pub mod version; -#[cfg(feature = "jemalloc")] +#[cfg(all(feature = "jemalloc", unix))] use jemallocator as _; diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index d7832b51c5da..220b04d9d0e7 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -1,5 +1,5 @@ // We use jemalloc for performance reasons -#[cfg(feature = "jemalloc")] +#[cfg(all(feature = "jemalloc", unix))] #[global_allocator] static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index f5635c88d28e..c1cea799d918 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -121,7 +121,7 @@ pub(crate) async fn initialize( Ok(()) } -#[cfg(feature = "jemalloc")] +#[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use jemalloc_ctl::{epoch, stats}; use reth_metrics::metrics::gauge; @@ -169,7 +169,7 @@ fn collect_memory_stats() { } } -#[cfg(feature = "jemalloc")] +#[cfg(all(feature = "jemalloc", unix))] fn describe_memory_stats() { use reth_metrics::metrics::describe_gauge; @@ -206,8 +206,8 @@ fn describe_memory_stats() { ); } -#[cfg(not(feature = "jemalloc"))] +#[cfg(not(all(feature = "jemalloc", unix)))] fn collect_memory_stats() {} -#[cfg(not(feature = "jemalloc"))] +#[cfg(not(all(feature = "jemalloc", unix)))] fn describe_memory_stats() {} From bc91cafabf1d216d74dce252336df191280457c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 20:38:09 +0200 Subject: [PATCH 166/722] fix: remove single body response check (#3783) --- crates/net/downloaders/src/bodies/request.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index a82b216cc32c..a71b81f33e73 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -119,11 +119,11 @@ where // Increment total downloaded metric self.metrics.total_downloaded.increment(response_len as u64); - // Malicious peers often return a single block. Mark responses with single - // block when more than 1 were requested invalid. - // TODO: Instead of marking single block responses invalid, calculate - // soft response size lower limit and use that for filtering. - if bodies.is_empty() || (request_len != 1 && response_len == 1) { + // TODO: Malicious peers often return a single block even if it does not exceed the soft + // response limit (2MB). this could be penalized by checking if this block and the + // next one exceed the soft response limit, if not then peer either does not have the next + // block or deliberately sent a single block. + if bodies.is_empty() { return Err(DownloadError::EmptyResponse) } From 77faa04ca6682c033ecfe391a0e5672692ba0adb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 14 Jul 2023 20:50:59 +0200 Subject: [PATCH 167/722] feat: remove peers after several unsuccessful attempts (#3780) --- crates/config/src/config.rs | 2 +- crates/net/network/src/error.rs | 18 ++++- crates/net/network/src/peers/manager.rs | 100 ++++++++++++++++++++++-- 3 files changed, 111 insertions(+), 9 deletions(-) diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index b622ed47ee3d..659df006b5e3 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -313,7 +313,7 @@ mod tests { fn test_store_config() { with_tempdir("config-store-test", |config_path| { let config = Config::default(); - confy::store_path(config_path, config).unwrap(); + confy::store_path(config_path, config).expect("Failed to store config"); }) } diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 55d5e14a8dfb..7362f7094181 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -177,8 +177,18 @@ impl SessionError for EthStreamError { return match err { DisconnectReason::TooManyPeers | DisconnectReason::AlreadyConnected | + DisconnectReason::PingTimeout | + DisconnectReason::DisconnectRequested | DisconnectReason::TcpSubsystemError => Some(BackoffKind::Low), - _ => { + + DisconnectReason::ProtocolBreach | + DisconnectReason::UselessPeer | + DisconnectReason::IncompatibleP2PProtocolVersion | + DisconnectReason::NullNodeIdentity | + DisconnectReason::ClientQuitting | + DisconnectReason::UnexpectedHandshakeIdentity | + DisconnectReason::ConnectedToSelf | + DisconnectReason::SubprotocolSpecific => { // These are considered fatal, and are handled by the // [`SessionError::is_fatal_protocol_error`] Some(BackoffKind::High) @@ -245,8 +255,10 @@ impl SessionError for io::Error { // these usually happen when the remote instantly drops the connection, for example // if the previous connection isn't properly cleaned up yet and the peer is temp. // banned. - ErrorKind::ConnectionRefused | ErrorKind::ConnectionReset | ErrorKind::BrokenPipe => { - Some(BackoffKind::Low) + ErrorKind::ConnectionReset | ErrorKind::BrokenPipe => Some(BackoffKind::Low), + ErrorKind::ConnectionRefused => { + // peer is unreachable, e.g. port not open or down + Some(BackoffKind::High) } _ => Some(BackoffKind::Medium), } diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 63a89a24682c..8b192f1d79a3 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -110,6 +110,8 @@ pub(crate) struct PeersManager { connect_trusted_nodes_only: bool, /// Timestamp of the last time [Self::tick] was called. last_tick: Instant, + /// Maximum number of backoff attempts before we give up on a peer and dropping. + max_backoff_count: u32, } impl PeersManager { @@ -125,7 +127,7 @@ impl PeersManager { trusted_nodes, connect_trusted_nodes_only, basic_nodes, - .. + max_backoff_count, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -161,6 +163,7 @@ impl PeersManager { backoff_durations, connect_trusted_nodes_only, last_tick: Instant::now(), + max_backoff_count, } } @@ -294,7 +297,7 @@ impl PeersManager { self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration); } - /// Temporarily puts the peer in timeout + /// Temporarily puts the peer in timeout by inserting it into the backedoff peers set fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) { trace!(target: "net::peers", ?peer_id, "backing off"); @@ -448,9 +451,9 @@ impl PeersManager { trace!(target: "net::peers", ?remote_addr, ?peer_id, ?err, "fatal connection error"); // remove the peer to which we can't establish a connection due to protocol related // issues. - if let Some(peer) = self.peers.remove(peer_id) { + if let Some((peer_id, peer)) = self.peers.remove_entry(peer_id) { self.connection_info.decr_state(peer.state); - self.queued_actions.push_back(PeerAction::PeerRemoved(*peer_id)); + self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id)); } // ban the peer @@ -465,6 +468,7 @@ impl PeersManager { } } else { let mut backoff_until = None; + let mut remove_peer = false; if let Some(peer) = self.peers.get_mut(peer_id) { if let Some(kind) = err.should_backoff() { @@ -488,8 +492,20 @@ impl PeersManager { self.connection_info.decr_state(peer.state); peer.state = PeerConnectionState::Idle; + + if peer.severe_backoff_counter > self.max_backoff_count && !peer.is_trusted() { + // mark peer for removal if it has been backoff too many times and is _not_ + // trusted + remove_peer = true; + } } - if let Some(backoff_until) = backoff_until { + + // remove peer if it has been marked for removal + if remove_peer { + let (peer_id, _) = self.peers.remove_entry(peer_id).expect("peer must exist"); + self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id)); + } else if let Some(backoff_until) = backoff_until { + // otherwise, backoff the peer if marked as such self.backoff_peer_until(*peer_id, backoff_until); } } @@ -1052,6 +1068,16 @@ pub struct PeersConfig { pub trusted_nodes: HashSet, /// Connect to trusted nodes only? pub connect_trusted_nodes_only: bool, + /// Maximum number of backoff attempts before we give up on a peer and dropping. + /// + /// The max time spent of a peer before it's removed from the set is determined by the + /// configured backoff duration and the max backoff count. + /// + /// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the + /// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h). + /// + /// Note: this does not apply to trusted peers. + pub max_backoff_count: u32, /// Basic nodes to connect to. #[cfg_attr(feature = "serde", serde(skip))] pub basic_nodes: HashSet, @@ -1067,6 +1093,8 @@ pub struct PeersConfig { pub reputation_weights: ReputationChangeWeights, /// How long to backoff peers that are we failed to connect to for non-fatal reasons, such as /// [`DisconnectReason::TooManyPeers`]. + /// + /// The backoff duration increases with number of backoff attempts. pub backoff_durations: PeerBackoffDurations, } @@ -1083,6 +1111,7 @@ impl Default for PeersConfig { trusted_nodes: Default::default(), connect_trusted_nodes_only: false, basic_nodes: Default::default(), + max_backoff_count: 5, } } } @@ -1134,6 +1163,12 @@ impl PeersConfig { self } + /// Configures the max allowed backoff count. + pub fn with_max_backoff_count(mut self, max_backoff_count: u32) -> Self { + self.max_backoff_count = max_backoff_count; + self + } + /// Read from file nodes available at launch. Ignored if None. pub fn with_basic_nodes_from_file( self, @@ -1575,6 +1610,61 @@ mod test { assert!(peers.peers.get(&peer).is_none()); } + #[tokio::test] + async fn test_remove_on_max_backoff_count() { + let peer = PeerId::random(); + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); + let config = PeersConfig::default(); + let mut peers = PeersManager::new(config.clone()); + peers.add_peer(peer, socket_addr, None); + let peer_struct = peers.peers.get_mut(&peer).unwrap(); + + // Simulate a peer that was already backed off once + peer_struct.severe_backoff_counter = config.max_backoff_count; + + match event!(peers) { + PeerAction::PeerAdded(peer_id) => { + assert_eq!(peer_id, peer); + } + _ => unreachable!(), + } + match event!(peers) { + PeerAction::Connect { peer_id, .. } => { + assert_eq!(peer_id, peer); + } + _ => unreachable!(), + } + + poll_fn(|cx| { + assert!(peers.poll(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + peers.on_pending_session_dropped( + &socket_addr, + &peer, + &PendingSessionHandshakeError::Eth( + io::Error::new(io::ErrorKind::ConnectionRefused, "peer unreachable").into(), + ), + ); + + match event!(peers) { + PeerAction::PeerRemoved(peer_id) => { + assert_eq!(peer_id, peer); + } + _ => unreachable!(), + } + + poll_fn(|cx| { + assert!(peers.poll(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + assert!(peers.peers.get(&peer).is_none()); + } + #[tokio::test] async fn test_ban_on_pending_drop() { let peer = PeerId::random(); From c1b1eac50505307cfe38fefd6bf3d40c84860d04 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 14 Jul 2023 16:23:00 -0400 Subject: [PATCH 168/722] chore: make some session types pub (#3666) --- crates/net/network/src/lib.rs | 6 +- crates/net/network/src/session/handle.rs | 104 ++++++++++++++++++++--- crates/net/network/src/session/mod.rs | 72 ++++++++++------ 3 files changed, 147 insertions(+), 35 deletions(-) diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 766ac6684995..25733cc11001 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -151,6 +151,10 @@ pub use manager::{NetworkEvent, NetworkManager}; pub use message::PeerRequest; pub use network::NetworkHandle; pub use peers::PeersConfig; -pub use session::{PeerInfo, SessionsConfig}; +pub use session::{ + ActiveSessionHandle, ActiveSessionMessage, Direction, PeerInfo, PendingSessionEvent, + PendingSessionHandle, PendingSessionHandshakeError, SessionCommand, SessionEvent, SessionId, + SessionLimits, SessionManager, SessionsConfig, +}; pub use reth_eth_wire::{DisconnectReason, HelloBuilder, HelloMessage}; diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 8d233039dede..1b4f893b6eea 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -14,7 +14,10 @@ use reth_primitives::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::{ net::TcpStream, - sync::{mpsc, oneshot}, + sync::{ + mpsc::{self, error::SendError}, + oneshot, + }, }; /// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello @@ -22,7 +25,7 @@ use tokio::{ /// /// This session needs to wait until it is authenticated. #[derive(Debug)] -pub(crate) struct PendingSessionHandle { +pub struct PendingSessionHandle { /// Can be used to tell the session to disconnect the connection/abort the handshake process. pub(crate) disconnect_tx: Option>, /// The direction of the session @@ -33,11 +36,16 @@ pub(crate) struct PendingSessionHandle { impl PendingSessionHandle { /// Sends a disconnect command to the pending session. - pub(crate) fn disconnect(&mut self) { + pub fn disconnect(&mut self) { if let Some(tx) = self.disconnect_tx.take() { let _ = tx.send(()); } } + + /// Returns the direction of the pending session (inbound or outbound). + pub fn direction(&self) -> Direction { + self.direction + } } /// An established session with a remote peer. @@ -46,7 +54,7 @@ impl PendingSessionHandle { /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] #[allow(unused)] -pub(crate) struct ActiveSessionHandle { +pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, /// The assigned id for this session @@ -71,10 +79,59 @@ pub(crate) struct ActiveSessionHandle { impl ActiveSessionHandle { /// Sends a disconnect command to the session. - pub(crate) fn disconnect(&self, reason: Option) { + pub fn disconnect(&self, reason: Option) { // Note: we clone the sender which ensures the channel has capacity to send the message let _ = self.commands_to_session.clone().try_send(SessionCommand::Disconnect { reason }); } + + /// Sends a disconnect command to the session, awaiting the command channel for available + /// capacity. + pub async fn try_disconnect( + &self, + reason: Option, + ) -> Result<(), SendError> { + self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await + } + + /// Returns the direction of the active session (inbound or outbound). + pub fn direction(&self) -> Direction { + self.direction + } + + /// Returns the assigned session id for this session. + pub fn session_id(&self) -> SessionId { + self.session_id + } + + /// Returns the negotiated eth version for this session. + pub fn version(&self) -> EthVersion { + self.version + } + + /// Returns the identifier of the remote peer. + pub fn remote_id(&self) -> PeerId { + self.remote_id + } + + /// Returns the timestamp when the session has been established. + pub fn established(&self) -> Instant { + self.established + } + + /// Returns the announced capabilities of the peer. + pub fn capabilities(&self) -> Arc { + self.capabilities.clone() + } + + /// Returns the client's name and version. + pub fn client_version(&self) -> Arc { + self.client_version.clone() + } + + /// Returns the address we're connected to. + pub fn remote_addr(&self) -> SocketAddr { + self.remote_addr + } } /// Info about an active peer session. @@ -98,46 +155,66 @@ pub struct PeerInfo { /// /// A session starts with a `Handshake`, followed by a `Hello` message which #[derive(Debug)] -pub(crate) enum PendingSessionEvent { +pub enum PendingSessionEvent { /// Represents a successful `Hello` and `Status` exchange: Established { + /// An internal identifier for the established session session_id: SessionId, + /// The remote node's socket address remote_addr: SocketAddr, /// The remote node's public key peer_id: PeerId, + /// All capabilities the peer announced capabilities: Arc, + /// The Status message the peer sent for the `eth` handshake status: Status, + /// The actual connection stream which can be used to send and receive `eth` protocol + /// messages conn: EthStream>>>, + /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, + /// The remote node's user agent, usually containing the client name and version client_id: String, }, /// Handshake unsuccessful, session was disconnected. Disconnected { + /// The remote node's socket address remote_addr: SocketAddr, + /// The internal identifier for the disconnected session session_id: SessionId, + /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, + /// The error that caused the disconnect error: Option, }, /// Thrown when unable to establish a [`TcpStream`]. OutgoingConnectionError { + /// The remote node's socket address remote_addr: SocketAddr, + /// The internal identifier for the disconnected session session_id: SessionId, + /// The remote node's public key peer_id: PeerId, + /// The error that caused the outgoing connection failure error: io::Error, }, - /// Thrown when authentication via Ecies failed. + /// Thrown when authentication via ECIES failed. EciesAuthError { + /// The remote node's socket address remote_addr: SocketAddr, + /// The internal identifier for the disconnected session session_id: SessionId, + /// The error that caused the ECIES session to fail error: ECIESError, + /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, }, } /// Commands that can be sent to the spawned session. #[derive(Debug)] -pub(crate) enum SessionCommand { +pub enum SessionCommand { /// Disconnect the connection Disconnect { /// Why the disconnect was initiated @@ -150,12 +227,19 @@ pub(crate) enum SessionCommand { /// Message variants an active session can produce and send back to the /// [`SessionManager`](crate::session::SessionManager) #[derive(Debug)] -pub(crate) enum ActiveSessionMessage { +pub enum ActiveSessionMessage { /// Session was gracefully disconnected. - Disconnected { peer_id: PeerId, remote_addr: SocketAddr }, + Disconnected { + /// The remote node's public key + peer_id: PeerId, + /// The remote node's socket address + remote_addr: SocketAddr, + }, /// Session was closed due an error ClosedOnConnectionError { + /// The remote node's public key peer_id: PeerId, + /// The remote node's socket address remote_addr: SocketAddr, /// The error that caused the session to close error: EthStreamError, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index b0d628d48c42..d31d2c635bc1 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -2,14 +2,7 @@ use crate::{ message::PeerMessage, metrics::SesssionManagerMetrics, - session::{ - active::ActiveSession, - config::SessionCounter, - handle::{ - ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, - SessionCommand, - }, - }, + session::{active::ActiveSession, config::SessionCounter}, }; pub use crate::{message::PeerRequestSender, session::handle::PeerInfo}; use fnv::FnvHashMap; @@ -47,7 +40,11 @@ use tracing::{instrument, trace}; mod active; mod config; mod handle; -pub use config::SessionsConfig; +pub use config::{SessionLimits, SessionsConfig}; +pub use handle::{ + ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, + SessionCommand, +}; /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] @@ -56,7 +53,7 @@ pub struct SessionId(usize); /// Manages a set of sessions. #[must_use = "Session Manager must be polled to process session events."] #[derive(Debug)] -pub(crate) struct SessionManager { +pub struct SessionManager { /// Tracks the identifier for the next session. next_id: usize, /// Keeps track of all sessions @@ -110,7 +107,7 @@ pub(crate) struct SessionManager { impl SessionManager { /// Creates a new empty [`SessionManager`]. - pub(crate) fn new( + pub fn new( secret_key: SecretKey, config: SessionsConfig, executor: Box, @@ -146,7 +143,7 @@ impl SessionManager { /// Check whether the provided [`ForkId`] is compatible based on the validation rules in /// `EIP-2124`. - pub(crate) fn is_valid_fork_id(&self, fork_id: ForkId) -> bool { + pub fn is_valid_fork_id(&self, fork_id: ForkId) -> bool { self.fork_filter.validate(fork_id).is_ok() } @@ -158,12 +155,12 @@ impl SessionManager { } /// Returns the current status of the session. - pub(crate) fn status(&self) -> Status { + pub fn status(&self) -> Status { self.status } /// Returns the session hello message. - pub(crate) fn hello_message(&self) -> HelloMessage { + pub fn hello_message(&self) -> HelloMessage { self.hello_message.clone() } @@ -235,7 +232,7 @@ impl SessionManager { } /// Starts a new pending session from the local node to the given remote node. - pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_peer_id: PeerId) { + pub fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_peer_id: PeerId) { // The error can be dropped because no dial will be made if it would exceed the limit if self.counter.ensure_pending_outbound().is_ok() { let session_id = self.next_id(); @@ -272,7 +269,7 @@ impl SessionManager { /// /// This will trigger the disconnect on the session task to gracefully terminate. The result /// will be picked up by the receiver. - pub(crate) fn disconnect(&self, node: PeerId, reason: Option) { + pub fn disconnect(&self, node: PeerId, reason: Option) { if let Some(session) = self.active_sessions.get(&node) { session.disconnect(reason); } @@ -297,21 +294,21 @@ impl SessionManager { /// /// It will trigger the disconnect on all the session tasks to gracefully terminate. The result /// will be picked by the receiver. - pub(crate) fn disconnect_all(&self, reason: Option) { + pub fn disconnect_all(&self, reason: Option) { for (_, session) in self.active_sessions.iter() { session.disconnect(reason); } } /// Disconnects all pending sessions. - pub(crate) fn disconnect_all_pending(&mut self) { + pub fn disconnect_all_pending(&mut self) { for (_, session) in self.pending_sessions.iter_mut() { session.disconnect(); } } /// Sends a message to the peer's session - pub(crate) fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { if let Some(session) = self.active_sessions.get_mut(peer_id) { let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)); } @@ -565,7 +562,7 @@ impl SessionManager { } /// Returns [`PeerInfo`] for all connected peers - pub(crate) fn get_peer_info(&self) -> Vec { + pub fn get_peer_info(&self) -> Vec { self.active_sessions .values() .map(|session| PeerInfo { @@ -581,7 +578,7 @@ impl SessionManager { /// Returns [`PeerInfo`] for a given peer. /// /// Returns `None` if there's no active session to the peer. - pub(crate) fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { + pub fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { self.active_sessions.get(&peer_id).map(|session| PeerInfo { remote_id: session.remote_id, direction: session.direction, @@ -594,35 +591,50 @@ impl SessionManager { /// Events produced by the [`SessionManager`] #[derive(Debug)] -pub(crate) enum SessionEvent { +pub enum SessionEvent { /// A new session was successfully authenticated. /// /// This session is now able to exchange data. SessionEstablished { + /// The remote node's public key peer_id: PeerId, + /// The remote node's socket address remote_addr: SocketAddr, + /// The user agent of the remote node, usually containing the client name and version client_version: Arc, + /// The capabilities the remote node has announced capabilities: Arc, /// negotiated eth version version: EthVersion, + /// The Status message the peer sent during the `eth` handshake status: Status, + /// The channel for sending messages to the peer with the session messages: PeerRequestSender, + /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, + /// The maximum time that the session waits for a response from the peer before timing out + /// the connection timeout: Arc, }, + /// The peer was already connected with another session. AlreadyConnected { + /// The remote node's public key peer_id: PeerId, + /// The remote node's socket address remote_addr: SocketAddr, + /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, }, /// A session received a valid message via RLPx. ValidMessage { + /// The remote node's public key peer_id: PeerId, /// Message received from the peer. message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { + /// The remote node's public key peer_id: PeerId, /// Announced capabilities of the remote peer. capabilities: Arc, @@ -641,19 +653,27 @@ pub(crate) enum SessionEvent { }, /// Closed an incoming pending session during handshaking. IncomingPendingSessionClosed { + /// The remote node's socket address remote_addr: SocketAddr, + /// The pending handshake session error that caused the session to close error: Option, }, /// Closed an outgoing pending session during handshaking. OutgoingPendingSessionClosed { + /// The remote node's socket address remote_addr: SocketAddr, + /// The remote node's public key peer_id: PeerId, + /// The pending handshake session error that caused the session to close error: Option, }, /// Failed to establish a tcp stream OutgoingConnectionError { + /// The remote node's socket address remote_addr: SocketAddr, + /// The remote node's public key peer_id: PeerId, + /// The error that caused the outgoing connection to fail error: io::Error, }, /// Session was closed due to an error @@ -667,15 +687,19 @@ pub(crate) enum SessionEvent { }, /// Active session was gracefully disconnected. Disconnected { + /// The remote node's public key peer_id: PeerId, + /// The remote node's socket address that we were connected to remote_addr: SocketAddr, }, } /// Errors that can occur during handshaking/authenticating the underlying streams. #[derive(Debug)] -pub(crate) enum PendingSessionHandshakeError { +pub enum PendingSessionHandshakeError { + /// The pending session failed due to an error while establishing the `eth` stream Eth(EthStreamError), + /// The pending session failed due to an error while establishing the ECIES stream Ecies(ECIESError), } @@ -700,7 +724,7 @@ pub enum Direction { impl Direction { /// Returns `true` if this an incoming connection. - pub(crate) fn is_incoming(&self) -> bool { + pub fn is_incoming(&self) -> bool { matches!(self, Direction::Incoming) } From 2748bed86b0ab6df6d6fac5785d87bde4c5333d4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 15 Jul 2023 12:02:14 +0300 Subject: [PATCH 169/722] chore(downloader): simplify bodies task polling (#3788) --- crates/net/downloaders/src/bodies/task.rs | 45 +++++++++-------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 30aabe3a6ab1..456f8a637cc4 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -115,35 +115,26 @@ impl Future for SpawnedDownloader { let this = self.get_mut(); loop { - loop { - match this.updates.poll_next_unpin(cx) { - Poll::Pending => break, - Poll::Ready(None) => { - // channel closed, this means [TaskDownloader] was dropped, so we can also - // exit - return Poll::Ready(()) - } - Poll::Ready(Some(range)) => { - if let Err(err) = this.downloader.set_download_range(range) { - tracing::error!(target: "downloaders::bodies", ?err, "Failed to set bodies download range"); - - match ready!(this.bodies_tx.poll_reserve(cx)) { - Ok(()) => { - if this.bodies_tx.send_item(Err(err)).is_err() { - // channel closed, this means [TaskDownloader] was dropped, - // so we can also - // exit - return Poll::Ready(()) - } - } - Err(_) => { - // channel closed, this means [TaskDownloader] was dropped, so - // we can also exit - return Poll::Ready(()) - } - } + while let Poll::Ready(update) = this.updates.poll_next_unpin(cx) { + if let Some(range) = update { + if let Err(err) = this.downloader.set_download_range(range) { + tracing::error!(target: "downloaders::bodies", ?err, "Failed to set bodies download range"); + + // Clone the sender ensure its availability. See [PollSender::clone]. + let mut bodies_tx = this.bodies_tx.clone(); + + let forward_error_result = ready!(bodies_tx.poll_reserve(cx)) + .and_then(|_| bodies_tx.send_item(Err(err))); + if forward_error_result.is_err() { + // channel closed, this means [TaskDownloader] was dropped, + // so we can also exit + return Poll::Ready(()) } } + } else { + // channel closed, this means [TaskDownloader] was dropped, so we can also + // exit + return Poll::Ready(()) } } From 300b496686ca8811412c30b88aa5b7b372977b89 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 15 Jul 2023 12:27:19 +0300 Subject: [PATCH 170/722] chore(tree): remove revert notification (#3645) --- crates/storage/provider/src/traits/chain.rs | 18 ++------- crates/transaction-pool/src/maintain.rs | 45 --------------------- 2 files changed, 4 insertions(+), 59 deletions(-) diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/storage/provider/src/traits/chain.rs index dc5c8a168d78..93f5a61a4b78 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -64,14 +64,11 @@ impl Stream for CanonStateNotificationStream { #[derive(Clone, Debug)] #[allow(missing_docs)] pub enum CanonStateNotification { - /// Chain reorgs and both old and new chain are returned. - Reorg { old: Arc, new: Arc }, - /// Chain got reverted without reorg and only old chain is returned. - /// - /// This reverts the chain's tip to the first block of the chain. - Revert { old: Arc }, /// Chain got extended without reorg and only new chain is returned. Commit { new: Arc }, + /// Chain reorgs and both old and new chain are returned. + /// Revert is just a subset of reorg where the new chain is empty. + Reorg { old: Arc, new: Arc }, } // For one reason or another, the compiler can't derive PartialEq for CanonStateNotification. @@ -82,7 +79,6 @@ impl PartialEq for CanonStateNotification { (Self::Reorg { old: old1, new: new1 }, Self::Reorg { old: old2, new: new2 }) => { old1 == old2 && new1 == new2 } - (Self::Revert { old: old1 }, Self::Revert { old: old2 }) => old1 == old2, (Self::Commit { new: new1 }, Self::Commit { new: new2 }) => new1 == new2, _ => false, } @@ -94,7 +90,6 @@ impl CanonStateNotification { pub fn reverted(&self) -> Option> { match self { Self::Reorg { old, .. } => Some(old.clone()), - Self::Revert { old } => Some(old.clone()), Self::Commit { .. } => None, } } @@ -102,12 +97,9 @@ impl CanonStateNotification { /// Get the new chain if any. /// /// Returns the new committed [Chain] for [Self::Reorg] and [Self::Commit] variants. - /// - /// Returns None for [Self::Revert] variant. pub fn committed(&self) -> Option> { match self { Self::Reorg { new, .. } => Some(new.clone()), - Self::Revert { .. } => None, Self::Commit { new } => Some(new.clone()), } } @@ -115,12 +107,10 @@ impl CanonStateNotification { /// Returns the new tip of the chain. /// /// Returns the new tip for [Self::Reorg] and [Self::Commit] variants which commit at least 1 - /// new block. Returns the first block of the chain for [Self::Revert] variant, which is the - /// block that the chain reverted to. + /// new block. pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Reorg { new, .. } => new.tip(), - Self::Revert { old } => old.first(), Self::Commit { new } => new.tip(), } } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index a55340ed55ab..11d9871f4a1f 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -162,51 +162,6 @@ where metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; } - CanonStateNotification::Revert { old } => { - // this similar to the inverse of a commit where we need to insert the transactions - // back into the pool and update the pool's state accordingly - - let (blocks, state) = old.inner(); - let first_block = blocks.first(); - - if first_block.hash == pool_info.last_seen_block_hash { - // nothing to update - continue - } - - // base fee for the next block: `first_block+1` - let pending_block_base_fee = - first_block.next_block_base_fee().unwrap_or_default() as u128; - - let mut changed_accounts = Vec::with_capacity(state.accounts().len()); - for acc in changed_accounts_iter(state) { - // we can always clear the dirty flag for this account - dirty_addresses.remove(&acc.address); - changed_accounts.push(acc); - } - - let update = CanonicalStateUpdate { - hash: first_block.hash, - number: first_block.number, - pending_block_base_fee, - changed_accounts, - // no tx to prune in the reverted chain - mined_transactions: vec![], - }; - pool.on_canonical_state_change(update); - - let pruned_old_transactions = blocks - .transactions() - .filter_map(|tx| tx.clone().into_ecrecovered()) - .map(

::Transaction::from_recovered_transaction) - .collect::>(); - - // all transactions that were mined in the old chain need to be re-injected - // - // Note: we no longer know if the tx was local or external - metrics.inc_reinserted_transactions(pruned_old_transactions.len()); - let _ = pool.add_external_transactions(pruned_old_transactions).await; - } CanonStateNotification::Commit { new } => { let (blocks, state) = new.inner(); let tip = blocks.tip(); From be656c239ab186640a1b98db33110c03ffe7fe98 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Sat, 15 Jul 2023 13:33:33 +0300 Subject: [PATCH 171/722] feat: trim cmd args in parser (#3789) --- bin/reth/src/args/rpc_server_args.rs | 19 +++++++++++++++++++ crates/rpc/rpc-builder/src/lib.rs | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 9e7b890e0cad..230741885bc6 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -526,6 +526,25 @@ mod tests { ); } + #[test] + fn test_transport_rpc_module_trim_config() { + let args = CommandParser::::parse_from([ + "reth", + "--http.api", + " eth, admin, debug", + "--http", + "--ws", + ]) + .args; + let config = args.transport_rpc_module_config(); + let expected = vec![RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug]; + assert_eq!(config.http().cloned().unwrap().into_selection(), expected); + assert_eq!( + config.ws().cloned().unwrap().into_selection(), + RpcModuleSelection::standard_modules() + ); + } + #[test] fn test_rpc_server_config() { let args = CommandParser::::parse_from([ diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index eb9f0d1fd390..7013131ec5c0 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -604,7 +604,7 @@ impl FromStr for RpcModuleSelection { type Err = ParseError; fn from_str(s: &str) -> Result { - let mut modules = s.split(',').peekable(); + let mut modules = s.split(',').map(str::trim).peekable(); let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; match first { "all" | "All" => Ok(RpcModuleSelection::All), From e12883edef245c4cc443e2be5d83cec7f6c1fbff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 15 Jul 2023 16:27:06 +0200 Subject: [PATCH 172/722] feat: txpool block building fallback (#3755) --- crates/rpc/rpc/src/eth/api/block.rs | 10 ++- crates/rpc/rpc/src/eth/api/mod.rs | 82 ++++++++++++++++++++- crates/rpc/rpc/src/eth/api/pending_block.rs | 67 +++++++++++++++++ crates/rpc/rpc/src/eth/api/transactions.rs | 31 ++------ 4 files changed, 160 insertions(+), 30 deletions(-) create mode 100644 crates/rpc/rpc/src/eth/api/pending_block.rs diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index d6d12438543e..5220f907a5ab 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -10,10 +10,13 @@ use crate::{ use reth_primitives::{BlockId, BlockNumberOrTag, TransactionMeta}; use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::{Block, Index, RichBlock, TransactionReceipt}; +use reth_transaction_pool::TransactionPool; impl EthApi where Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Pool: TransactionPool + Clone + 'static, + Network: Send + Sync + 'static, { /// Returns the uncle headers of the given block /// @@ -121,7 +124,12 @@ where if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(self.provider().pending_block()?) + let maybe_pending = self.provider().pending_block()?; + return if maybe_pending.is_some() { + return Ok(maybe_pending) + } else { + self.local_pending_block().await + } } let block_hash = match self.provider().block_hash_for_id(block_id)? { diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 227e14cefb6e..b53a80993aed 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -12,22 +12,27 @@ use crate::eth::{ use async_trait::async_trait; use reth_interfaces::Result; use reth_network_api::NetworkInfo; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, ChainInfo, H256, U256, U64}; +use reth_primitives::{ + Address, BlockId, BlockNumberOrTag, ChainInfo, SealedBlock, H256, U256, U64, +}; use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderBox, StateProviderFactory}; use reth_rpc_types::{SyncInfo, SyncStatus}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::TransactionPool; -use std::{future::Future, sync::Arc}; -use tokio::sync::oneshot; +use revm_primitives::{BlockEnv, CfgEnv}; +use std::{future::Future, sync::Arc, time::Instant}; +use tokio::sync::{oneshot, Mutex}; mod block; mod call; mod fees; +mod pending_block; mod server; mod sign; mod state; mod transactions; +use crate::eth::api::pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; pub use transactions::{EthTransactions, TransactionSource}; /// `Eth` API trait. @@ -115,6 +120,7 @@ where gas_oracle, starting_block: U256::from(latest_block), task_spawner, + pending_block: Default::default(), }; Self { inner: Arc::new(inner) } } @@ -201,6 +207,74 @@ where } } +impl EthApi +where + Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Pool: TransactionPool + Clone + 'static, + Network: Send + Sync + 'static, +{ + /// Configures the [CfgEnv] and [BlockEnv] for the pending block + /// + /// If no pending block is available, this will derive it from the `latest` block + pub(crate) fn pending_block_env_and_cfg(&self) -> EthResult { + let origin = if let Some(pending) = self.provider().pending_block()? { + PendingBlockEnvOrigin::ActualPending(pending) + } else { + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let mut latest = + self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + + // child block + latest.number += 1; + // assumed child block is in the next slot + latest.timestamp += 12; + // base fee of the child block + latest.base_fee_per_gas = latest.next_block_base_fee(); + + PendingBlockEnvOrigin::DerivedFromLatest(latest) + }; + + let mut cfg = CfgEnv::default(); + let mut block_env = BlockEnv::default(); + self.provider().fill_block_env_with_header(&mut block_env, origin.header())?; + self.provider().fill_cfg_env_with_header(&mut cfg, origin.header())?; + + Ok(PendingBlockEnv { cfg, block_env, origin }) + } + + /// Returns the locally built pending block + pub(crate) async fn local_pending_block(&self) -> EthResult> { + let pending = self.pending_block_env_and_cfg()?; + if pending.origin.is_actual_pending() { + return Ok(pending.origin.into_actual_pending()) + } + + // no pending block from the CL yet, so we need to build it ourselves via txpool + self.on_blocking_task(|this| async move { + let PendingBlockEnv { cfg: _, block_env, origin } = pending; + let lock = this.inner.pending_block.lock().await; + let now = Instant::now(); + // this is guaranteed to be the `latest` header + let parent_header = origin.into_header(); + + // check if the block is still good + if let Some(pending) = lock.as_ref() { + if block_env.number.to::() == pending.block.number && + pending.block.parent_hash == parent_header.parent_hash && + now <= pending.expires_at + { + return Ok(Some(pending.block.clone())) + } + } + + // TODO(mattsse): actually build the pending block + Ok(None) + }) + .await + } +} + impl std::fmt::Debug for EthApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() @@ -284,4 +358,6 @@ struct EthApiInner { starting_block: U256, /// The type that can spawn tasks which would otherwise block. task_spawner: Box, + /// Cached pending block if any + pending_block: Mutex>, } diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs new file mode 100644 index 000000000000..8e57f893dca0 --- /dev/null +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -0,0 +1,67 @@ +//! Support for building a pending block via local txpool. + +use reth_primitives::{SealedBlock, SealedHeader}; +use revm_primitives::{BlockEnv, CfgEnv}; +use std::time::Instant; + +/// Configured [BlockEnv] and [CfgEnv] for a pending block +#[derive(Debug, Clone)] +pub(crate) struct PendingBlockEnv { + /// Configured [CfgEnv] for the pending block. + pub(crate) cfg: CfgEnv, + /// Configured [BlockEnv] for the pending block. + pub(crate) block_env: BlockEnv, + /// Origin block for the config + pub(crate) origin: PendingBlockEnvOrigin, +} + +/// The origin for a configured [PendingBlockEnv] +#[derive(Clone, Debug)] +pub(crate) enum PendingBlockEnvOrigin { + /// The pending block as received from the CL. + ActualPending(SealedBlock), + /// The header of the latest block + DerivedFromLatest(SealedHeader), +} + +impl PendingBlockEnvOrigin { + /// Returns true if the origin is the actual pending block as received from the CL. + pub(crate) fn is_actual_pending(&self) -> bool { + matches!(self, PendingBlockEnvOrigin::ActualPending(_)) + } + + /// Consumes the type and returns the actual pending block. + pub(crate) fn into_actual_pending(self) -> Option { + match self { + PendingBlockEnvOrigin::ActualPending(block) => Some(block), + _ => None, + } + } + + /// Returns the header this pending block is based on. + pub(crate) fn header(&self) -> &SealedHeader { + match self { + PendingBlockEnvOrigin::ActualPending(block) => &block.header, + PendingBlockEnvOrigin::DerivedFromLatest(header) => header, + } + } + + /// Consumes the type and returns the header this pending block is based on. + pub(crate) fn into_header(self) -> SealedHeader { + match self { + PendingBlockEnvOrigin::ActualPending(block) => block.header, + PendingBlockEnvOrigin::DerivedFromLatest(header) => header, + } + } +} + +/// In memory pending block for `pending` tag +#[derive(Debug)] +pub(crate) struct PendingBlock { + /// The cached pending block + pub(crate) block: SealedBlock, + /// Timestamp when the pending block is considered outdated + pub(crate) expires_at: Instant, +} + +impl PendingBlock {} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 1cbd5df2ba28..c932ec825bcf 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations specific to transactions use crate::{ eth::{ + api::pending_block::PendingBlockEnv, error::{EthApiError, EthResult, SignError}, revm_utils::{ inspect, inspect_and_return_db, prepare_call_env, replay_transactions_until, transact, @@ -239,31 +240,8 @@ where async fn evm_env_at(&self, at: BlockId) -> EthResult<(CfgEnv, BlockEnv, BlockId)> { if at.is_pending() { - let header = if let Some(pending) = self.provider().pending_header()? { - pending - } else { - // no pending block from the CL yet, so we use the latest block and modify the env - // values that we can - let mut latest = self - .provider() - .latest_header()? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; - - // child block - latest.number += 1; - // assumed child block is in the next slot - latest.timestamp += 12; - // base fee of the child block - latest.base_fee_per_gas = latest.next_block_base_fee(); - - latest - }; - - let mut cfg = CfgEnv::default(); - let mut block_env = BlockEnv::default(); - self.provider().fill_block_env_with_header(&mut block_env, &header)?; - self.provider().fill_cfg_env_with_header(&mut cfg, &header)?; - return Ok((cfg, block_env, header.hash.into())) + let PendingBlockEnv { cfg, block_env, origin } = self.pending_block_env_and_cfg()?; + Ok((cfg, block_env, origin.header().hash.into())) } else { // Use cached values if there is no pending block let block_hash = self @@ -652,6 +630,7 @@ where impl EthApi where Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Network: 'static, { /// Helper function for `eth_getTransactionReceipt` /// @@ -675,7 +654,7 @@ impl EthApi where Pool: TransactionPool + 'static, Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, - Network: 'static, + Network: Send + Sync + 'static, { pub(crate) fn sign_request( &self, From da13ef688bfed0e876b571fef15c6a3291c5b068 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 15 Jul 2023 16:28:26 +0200 Subject: [PATCH 173/722] fix: concurrent ipc driver impl (#3790) --- Cargo.lock | 24 -------- crates/rpc/ipc/Cargo.toml | 1 - crates/rpc/ipc/src/server/connection.rs | 82 ++++++++++++++++++++++++- crates/rpc/ipc/src/server/mod.rs | 54 +++++----------- 4 files changed, 96 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ee5b47fcf76..3bf50b5dcaae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5327,7 +5327,6 @@ dependencies = [ "tokio-util", "tower", "tracing", - "tracing-test", ] [[package]] @@ -7400,29 +7399,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "tracing-test" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a2c0ff408fe918a94c428a3f2ad04e4afd5c95bbc08fcf868eff750c15728a4" -dependencies = [ - "lazy_static", - "tracing-core", - "tracing-subscriber", - "tracing-test-macro", -] - -[[package]] -name = "tracing-test-macro" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" -dependencies = [ - "lazy_static", - "quote 1.0.28", - "syn 1.0.109", -] - [[package]] name = "triehash" version = "0.8.4" diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 417dfd8239d0..3c6d3832b532 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -30,5 +30,4 @@ bytes = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -tracing-test = "0.2" tokio-stream = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index ff0bd4c00abf..e502a27de7ac 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,8 +1,10 @@ //! A IPC connection. use crate::stream_codec::StreamCodec; -use futures::{ready, Sink, Stream, StreamExt}; +use futures::{ready, stream::FuturesUnordered, Sink, Stream, StreamExt}; use std::{ + collections::VecDeque, + future::Future, io, marker::PhantomData, pin::Pin, @@ -10,6 +12,7 @@ use std::{ }; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tokio_util::codec::Framed; +use tower::Service; pub(crate) type JsonRpcStream = Framed; @@ -113,3 +116,80 @@ where self.project().0.poll_close(cx) } } + +/// Drives an [IpcConn] forward. +/// +/// This forwards received requests from the connection to the service and sends responses to the +/// connection. +/// +/// This future terminates when the connection is closed. +#[pin_project::pin_project] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub(crate) struct IpcConnDriver { + #[pin] + pub(crate) conn: IpcConn>, + pub(crate) service: S, + #[pin] + pub(crate) pending_calls: FuturesUnordered, + pub(crate) items: VecDeque, +} + +impl IpcConnDriver { + /// Add a new item to the send queue. + pub(crate) fn push_back(&mut self, item: String) { + self.items.push_back(item); + } +} + +impl Future for IpcConnDriver +where + S: Service> + Send + 'static, + S::Error: Into>, + S::Future: Send, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + loop { + // process calls + if !this.pending_calls.is_empty() { + while let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) { + let item = match res { + Ok(Some(resp)) => resp, + Ok(None) => continue, + Err(err) => err.into().to_string(), + }; + this.items.push_back(item); + } + } + + // write to the sink + while this.conn.as_mut().poll_ready(cx).is_ready() { + if let Some(item) = this.items.pop_front() { + if let Err(err) = this.conn.as_mut().start_send(item) { + tracing::warn!("IPC response failed: {:?}", err); + return Poll::Ready(()) + } + } else { + break + } + } + + // read from the stream + match ready!(this.conn.as_mut().poll_next(cx)) { + Some(Ok(item)) => { + let call = this.service.call(item); + this.pending_calls.push(call); + } + Some(Err(err)) => { + tracing::warn!("IPC request failed: {:?}", err); + return Poll::Ready(()) + } + None => return Poll::Ready(()), + } + } + } +} diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index d30a5ae76799..7911f037d991 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -4,7 +4,7 @@ use crate::server::{ connection::{Incoming, IpcConn, JsonRpcStream}, future::{ConnectionGuard, FutureDriver, StopHandle}, }; -use futures::{FutureExt, SinkExt, Stream, StreamExt}; +use futures::{FutureExt, Stream, StreamExt}; use jsonrpsee::{ core::{Error, TEN_MB_SIZE_BYTES}, server::{logger::Logger, IdProvider, RandomIntegerIdProvider, ServerHandle}, @@ -25,6 +25,7 @@ use tower::{layer::util::Identity, Service}; use tracing::{debug, trace, warn}; // re-export so can be used during builder setup +use crate::server::connection::IpcConnDriver; pub use parity_tokio_ipc::Endpoint; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -285,7 +286,7 @@ impl Service for TowerService { /// Spawns the IPC connection onto a new task async fn spawn_connection( conn: IpcConn>, - mut service: S, + service: S, mut stop_handle: StopHandle, rx: mpsc::Receiver, ) where @@ -296,51 +297,29 @@ async fn spawn_connection( { let task = tokio::task::spawn(async move { let rx_item = ReceiverStream::new(rx); + let conn = IpcConnDriver { + conn, + service, + pending_calls: Default::default(), + items: Default::default(), + }; tokio::pin!(conn, rx_item); loop { - let item = tokio::select! { - res = conn.next() => { - match res { - Some(Ok(request)) => { - // handle the RPC request - match service.call(request).await { - Ok(Some(resp)) => { - resp - }, - Ok(None) => { - continue - }, - Err(err) => err.into().to_string(), - } - }, - Some(Err(e)) => { - tracing::warn!("IPC request failed: {:?}", e); - break - } - None => { - return - } - } + tokio::select! { + _ = &mut conn => { + break } item = rx_item.next() => { - match item { - Some(item) => item, - None => { - continue - } + if let Some(item) = item { + conn.push_back(item); } } _ = stop_handle.shutdown() => { + // shutdown break } }; - - // send item over ipc - if let Err(err) = conn.send(item).await { - warn!("Failed to send IPC response: {:?}", err); - break - } } }); @@ -593,7 +572,6 @@ mod tests { use parity_tokio_ipc::dummy_endpoint; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; - use tracing_test::traced_test; async fn pipe_from_stream_with_bounded_buffer( pending: PendingSubscriptionSink, @@ -641,7 +619,6 @@ mod tests { } #[tokio::test] - #[traced_test] async fn test_rpc_request() { let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint).unwrap(); @@ -672,7 +649,6 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - #[traced_test] async fn test_rpc_subscription() { let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint).unwrap(); From 64d58456dafbd87f85af21b690b5e915732a125e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 15 Jul 2023 16:48:18 +0200 Subject: [PATCH 174/722] fix: rpc cap block range correctly (#3791) Co-authored-by: Georgios Konstantopoulos --- crates/rpc/rpc/src/eth/api/fees.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index c9a1513535ac..49b0291a1195 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -67,9 +67,11 @@ where return Err(EthApiError::UnknownBlockNumber) }; - // Check that we would not be querying outside of genesis - if end_block < block_count { - return Err(EthApiError::InvalidBlockRange) + // need to add 1 to the end block to get the correct (inclusive) range + let end_block_plus = end_block + 1; + // Ensure that we would not be querying outside of genesis + if end_block_plus < block_count { + block_count = end_block_plus; } // If reward percentiles were specified, we need to validate that they are monotonically @@ -86,7 +88,8 @@ where // // Treat a request for 1 block as a request for `newest_block..=newest_block`, // otherwise `newest_block - 2 - let start_block = end_block - block_count + 1; + // SAFETY: We ensured that block count is capped + let start_block = end_block_plus - block_count; let headers = self.provider().sealed_headers_range(start_block..=end_block)?; if headers.len() != block_count as usize { return Err(EthApiError::InvalidBlockRange) From 9a00f04d77714934f25225c961cf1b5fd4de2227 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 15 Jul 2023 17:26:44 +0200 Subject: [PATCH 175/722] feat: reload dirty accounts if pool drifts (#3732) Co-authored-by: Georgios Konstantopoulos --- Cargo.lock | 4 +- bin/reth/src/node/mod.rs | 2 + crates/transaction-pool/src/lib.rs | 15 +- crates/transaction-pool/src/maintain.rs | 182 ++++++++++++++++++--- crates/transaction-pool/src/noop.rs | 6 +- crates/transaction-pool/src/pool/mod.rs | 22 ++- crates/transaction-pool/src/pool/txpool.rs | 43 +++-- crates/transaction-pool/src/traits.rs | 15 +- 8 files changed, 239 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bf50b5dcaae..3aa656a4cddf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4296,9 +4296,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index d2ed2825610e..1c5cd3a7fce5 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -241,6 +241,8 @@ impl Command { client, pool, chain_events, + ctx.task_executor.clone(), + Default::default(), ), ); debug!(target: "reth::cli", "Spawned txpool maintenance task"); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index f5c92b8086d2..a87e0600752c 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -132,7 +132,7 @@ //! ); //! //! // spawn a task that listens for new blocks and updates the pool's transactions, mined transactions etc.. -//! tokio::task::spawn( maintain_transaction_pool_future(client, pool, stream)); +//! tokio::task::spawn( maintain_transaction_pool_future(client, pool, stream, TokioTaskExecutor::default(), Default::default())); //! //! # } //! ``` @@ -145,7 +145,10 @@ use crate::pool::PoolInner; use aquamarine as _; use reth_primitives::{Address, TxHash, U256}; use reth_provider::StateProviderFactory; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; @@ -438,6 +441,10 @@ where ) -> Vec>> { self.pool.get_transactions_by_sender(sender) } + + fn unique_senders(&self) -> HashSet

{ + self.pool.unique_senders() + } } impl TransactionPoolExt for Pool @@ -454,6 +461,10 @@ where fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { self.pool.on_canonical_state_change(update); } + + fn update_accounts(&self, accounts: Vec) { + self.pool.update_accounts(accounts); + } } impl Clone for Pool { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 11d9871f4a1f..e8b8a8f1ed87 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -5,33 +5,57 @@ use crate::{ traits::{CanonicalStateUpdate, ChangedAccount, TransactionPoolExt}, BlockInfo, TransactionPool, }; -use futures_util::{future::BoxFuture, FutureExt, Stream, StreamExt}; +use futures_util::{ + future::{BoxFuture, Fuse, FusedFuture}, + FutureExt, Stream, StreamExt, +}; use reth_primitives::{Address, BlockHash, BlockNumberOrTag, FromRecoveredTransaction}; use reth_provider::{BlockReaderIdExt, CanonStateNotification, PostState, StateProviderFactory}; +use reth_tasks::TaskSpawner; use std::{ borrow::Borrow, collections::HashSet, hash::{Hash, Hasher}, }; -use tracing::debug; +use tokio::sync::oneshot; +use tracing::{debug, trace}; + +/// Additional settings for maintaining the transaction pool +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct MaintainPoolConfig { + /// Maximum (reorg) depth we handle when updating the transaction pool: `new.number - + /// last_seen.number` + /// + /// Default: 64 (2 epochs) + pub max_update_depth: u64, + /// Maximum number of accounts to reload from state at once when updating the transaction pool. + /// + /// Default: 250 + pub max_reload_accounts: usize, +} -/// Maximum (reorg) depth we handle when updating the transaction pool: `new.number - -/// last_seen.number` -const MAX_UPDATE_DEPTH: u64 = 64; +impl Default for MaintainPoolConfig { + fn default() -> Self { + Self { max_update_depth: 64, max_reload_accounts: 250 } + } +} /// Returns a spawnable future for maintaining the state of the transaction pool. -pub fn maintain_transaction_pool_future( +pub fn maintain_transaction_pool_future( client: Client, pool: P, events: St, + task_spawner: Tasks, + config: MaintainPoolConfig, ) -> BoxFuture<'static, ()> where - Client: StateProviderFactory + BlockReaderIdExt + Send + 'static, + Client: StateProviderFactory + BlockReaderIdExt + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, + Tasks: TaskSpawner + 'static, { async move { - maintain_transaction_pool(client, pool, events).await; + maintain_transaction_pool(client, pool, events, task_spawner, config).await; } .boxed() } @@ -39,14 +63,20 @@ where /// Maintains the state of the transaction pool by handling new blocks and reorgs. /// /// This listens for any new blocks and reorgs and updates the transaction pool's state accordingly -#[allow(unused)] -pub async fn maintain_transaction_pool(client: Client, pool: P, mut events: St) -where - Client: StateProviderFactory + BlockReaderIdExt + Send + 'static, +pub async fn maintain_transaction_pool( + client: Client, + pool: P, + mut events: St, + task_spawner: Tasks, + config: MaintainPoolConfig, +) where + Client: StateProviderFactory + BlockReaderIdExt + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, + Tasks: TaskSpawner + 'static, { - let mut metrics = MaintainPoolMetrics::default(); + let metrics = MaintainPoolMetrics::default(); + let MaintainPoolConfig { max_update_depth, max_reload_accounts } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.block_by_number_or_tag(BlockNumberOrTag::Latest) { let latest = latest.seal_slow(); @@ -64,17 +94,100 @@ where // keeps track of the state of the pool wrt to blocks let mut maintained_state = MaintainedPoolState::InSync; + // the future that reloads accounts from state + let mut reload_accounts_fut = Fuse::terminated(); + + // The update loop that waits for new blocks and reorgs and performs pool updated // Listen for new chain events and derive the update action for the pool loop { + trace!(target = "txpool", state=?maintained_state, "awaiting new block or reorg"); + metrics.set_dirty_accounts_len(dirty_addresses.len()); + let pool_info = pool.block_info(); - let Some(event) = events.next().await else { break }; + // after performing a pool update after a new block we have some time to properly update + // dirty accounts and correct if the pool drifted from current state, for example after + // restart or a pipeline run + if maintained_state.is_drifted() { + // assuming all senders are dirty + dirty_addresses = pool.unique_senders(); + maintained_state = MaintainedPoolState::InSync; + } - let pool_info = pool.block_info(); + // if we have accounts that are out of sync with the pool, we reload them in chunks + if !dirty_addresses.is_empty() && reload_accounts_fut.is_terminated() { + let (tx, rx) = oneshot::channel(); + let c = client.clone(); + let at = pool_info.last_seen_block_hash; + let fut = if dirty_addresses.len() > max_reload_accounts { + // need to chunk accounts to reload + let accs_to_reload = + dirty_addresses.iter().copied().take(max_reload_accounts).collect::>(); + for acc in &accs_to_reload { + // make sure we remove them from the dirty set + dirty_addresses.remove(acc); + } + async move { + let res = load_accounts(c, at, accs_to_reload.into_iter()); + let _ = tx.send(res); + } + .boxed() + } else { + // can fetch all dirty accounts at once + let accs_to_reload = std::mem::take(&mut dirty_addresses); + async move { + let res = load_accounts(c, at, accs_to_reload.into_iter()); + let _ = tx.send(res); + } + .boxed() + }; + reload_accounts_fut = rx.fuse(); + task_spawner.spawn_blocking(fut); + } + + // outcomes of the futures we are waiting on + let mut event = None; + let mut reloaded = None; - // TODO from time to time re-check the unique accounts in the pool and remove and resync - // based on the tracked state + // select of account reloads and new canonical state updates which should arrive at the rate + // of the block time (12s) + tokio::select! { + res = &mut reload_accounts_fut => { + reloaded = Some(res); + } + ev = events.next() => { + if ev.is_none() { + // the stream ended, we are done + break; + } + event = ev; + } + } + // handle the result of the account reload + match reloaded { + Some(Ok(Ok(LoadedAccounts { accounts, failed_to_load }))) => { + // reloaded accounts successfully + // extend accounts we failed to load from database + dirty_addresses.extend(failed_to_load); + // update the pool with the loaded accounts + pool.update_accounts(accounts); + } + Some(Ok(Err(res))) => { + // Failed to load accounts from state + let (accs, err) = *res; + debug!(target = "txpool", ?err, "failed to load accounts"); + dirty_addresses.extend(accs); + } + Some(Err(_)) => { + // failed to receive the accounts, sender dropped, only possible if task panicked + maintained_state = MaintainedPoolState::Drifted; + } + None => {} + } + + // handle the new block or reorg + let Some(event) = event else { continue }; match event { CanonStateNotification::Reorg { old, new } => { let (old_blocks, old_state) = old.inner(); @@ -88,7 +201,7 @@ where new_first.parent_hash == pool_info.last_seen_block_hash) { // the new block points to a higher block than the oldest block in the old chain - maintained_state = MaintainedPoolState::Drift; + maintained_state = MaintainedPoolState::Drifted; } // base fee for the next block: `new_tip+1` @@ -108,7 +221,7 @@ where // for these we need to fetch the nonce+balance from the db at the new tip let mut changed_accounts = - match load_accounts(&client, new_tip.hash, missing_changed_acc) { + match load_accounts(client.clone(), new_tip.hash, missing_changed_acc) { Ok(LoadedAccounts { accounts, failed_to_load }) => { // extend accounts we failed to load from database dirty_addresses.extend(failed_to_load); @@ -118,6 +231,7 @@ where Err(err) => { let (addresses, err) = *err; debug!( + target = "txpool", ?err, "failed to load missing changed accounts at new tip: {:?}", new_tip.hash @@ -170,13 +284,20 @@ where let pending_block_base_fee = tip.next_block_base_fee().unwrap_or_default() as u128; let first_block = blocks.first(); + trace!( + target = "txpool", + first = first_block.number, + tip = tip.number, + pool_block = pool_info.last_seen_block_number, + "update pool on new commit" + ); // check if the depth is too large and should be skipped, this could happen after // initial sync or long re-sync let depth = tip.number.abs_diff(pool_info.last_seen_block_number); - if depth > MAX_UPDATE_DEPTH { - maintained_state = MaintainedPoolState::Drift; - debug!(?depth, "skipping deep canonical update"); + if depth > max_update_depth { + maintained_state = MaintainedPoolState::Drifted; + debug!(target = "txpool", ?depth, "skipping deep canonical update"); let info = BlockInfo { last_seen_block_hash: tip.hash, last_seen_block_number: tip.number, @@ -200,7 +321,7 @@ where // we received a new canonical chain commit but the commit is not canonical with // the pool's block, this could happen after initial sync or // long re-sync - maintained_state = MaintainedPoolState::Drift; + maintained_state = MaintainedPoolState::Drifted; } // Canonical update @@ -219,12 +340,19 @@ where /// Keeps track of the pool's state, whether the accounts in the pool are in sync with the actual /// state. -#[derive(Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq)] enum MaintainedPoolState { - /// Pool is assumed to be in sync with the state + /// Pool is assumed to be in sync with the current state InSync, /// Pool could be out of sync with the state - Drift, + Drifted, +} + +impl MaintainedPoolState { + /// Returns `true` if the pool is assumed to be out of sync with the current state. + fn is_drifted(&self) -> bool { + matches!(self, MaintainedPoolState::Drifted) + } } /// A unique ChangedAccount identified by its address that can be used for deduplication @@ -263,7 +391,7 @@ struct LoadedAccounts { /// /// Note: this expects _unique_ addresses fn load_accounts( - client: &Client, + client: Client, at: BlockHash, addresses: I, ) -> Result, reth_interfaces::Error)>> diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 0ccec210c131..a45d71662a7a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -10,7 +10,7 @@ use crate::{ TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use reth_primitives::{Address, TxHash}; -use std::{marker::PhantomData, sync::Arc}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; /// A [`TransactionPool`] implementation that does nothing. @@ -150,6 +150,10 @@ impl TransactionPool for NoopTransactionPool { ) -> Vec>> { vec![] } + + fn unique_senders(&self) -> HashSet
{ + Default::default() + } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index e7f167c82a6e..560c6ee99488 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -90,12 +90,13 @@ use std::{ time::Instant, }; use tokio::sync::mpsc; -use tracing::debug; +use tracing::{debug, trace}; mod events; pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; +use crate::pool::txpool::UpdateOutcome; pub use listener::{AllTransactionsEvents, TransactionEvents}; mod best; @@ -163,6 +164,11 @@ where self.identifiers.write().sender_id_or_create(addr) } + /// Returns all senders in the pool + pub(crate) fn unique_senders(&self) -> HashSet
{ + self.pool.read().unique_senders() + } + /// Converts the changed accounts to a map of sender ids to sender info (internal identifier /// used for accounts) fn changed_senders( @@ -243,6 +249,8 @@ where /// Updates the entire pool after a new block was executed. pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { + trace!(target: "txpool", %update, "updating pool on canonical state change"); + let CanonicalStateUpdate { hash, number, @@ -264,6 +272,18 @@ where self.notify_on_new_state(outcome); } + /// Performs account updates on the pool. + /// + /// This will either promote or discard transactions based on the new account state. + pub(crate) fn update_accounts(&self, accounts: Vec) { + let changed_senders = self.changed_senders(accounts.into_iter()); + let UpdateOutcome { promoted, discarded } = + self.pool.write().update_accounts(changed_senders); + let mut listener = self.event_listener.write(); + promoted.iter().for_each(|tx| listener.pending(tx, None)); + discarded.iter().for_each(|tx| listener.discarded(tx)); + } + /// Add a single validated transaction into the pool. /// /// Note: this is only used internally by [`Self::add_transactions()`], all new transaction(s) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 6b1bad86442f..b6a1bc174da1 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -19,11 +19,11 @@ use crate::{ use fnv::FnvHashMap; use reth_primitives::{ constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, - TxHash, H256, + Address, TxHash, H256, }; use std::{ cmp::Ordering, - collections::{btree_map::Entry, hash_map, BTreeMap, HashMap}, + collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}, fmt, ops::Bound::{Excluded, Unbounded}, sync::Arc, @@ -111,6 +111,11 @@ impl TxPool { &self.all_transactions } + /// Returns all senders in the pool + pub(crate) fn unique_senders(&self) -> HashSet
{ + self.all_transactions.txs.values().map(|tx| tx.transaction.sender()).collect() + } + /// Returns stats about the size of pool. pub(crate) fn size(&self) -> PoolSize { PoolSize { @@ -227,6 +232,22 @@ impl TxPool { self.all_transactions.txs_iter(sender).map(|(_, tx)| Arc::clone(&tx.transaction)).collect() } + /// Updates the transactions for the changed senders. + pub(crate) fn update_accounts( + &mut self, + changed_senders: HashMap, + ) -> UpdateOutcome { + // track changed accounts + self.sender_info.extend(changed_senders.clone()); + // Apply the state changes to the total set of transactions which triggers sub-pool updates. + let updates = self.all_transactions.update(changed_senders); + // Process the sub-pool updates + let update = self.process_updates(updates); + // update the metrics after the update + self.update_size_metrics(); + update + } + /// Updates the entire pool after a new block was mined. /// /// This removes all mined transactions, updates according to the new base fee and rechecks @@ -237,9 +258,6 @@ impl TxPool { mined_transactions: Vec, changed_senders: HashMap, ) -> OnNewCanonicalStateOutcome { - // track changed accounts - self.sender_info.extend(changed_senders.clone()); - // update block info let block_hash = block_info.last_seen_block_hash; self.all_transactions.set_block_info(block_info); @@ -252,14 +270,7 @@ impl TxPool { } } - // Apply the state changes to the total set of transactions which triggers sub-pool updates. - let updates = self.all_transactions.update(changed_senders); - - // Process the sub-pool updates - let UpdateOutcome { promoted, discarded } = self.process_updates(updates); - - // update the metrics after the update - self.update_size_metrics(); + let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders); self.metrics.performed_state_updates.increment(1); @@ -1281,11 +1292,11 @@ impl PoolInternalTransaction { /// Tracks the result after updating the pool #[derive(Default, Debug)] -pub struct UpdateOutcome { +pub(crate) struct UpdateOutcome { /// transactions promoted to the ready queue - promoted: Vec, + pub(crate) promoted: Vec, /// transaction that failed and became discarded - discarded: Vec, + pub(crate) discarded: Vec, } /// Represents the outcome of a prune diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index b5f5d3186341..368492a28f5c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -11,7 +11,7 @@ use reth_primitives::{ }; use reth_rlp::Encodable; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, fmt, pin::Pin, sync::Arc, @@ -242,6 +242,9 @@ pub trait TransactionPool: Send + Sync + Clone { &self, sender: Address, ) -> Vec>>; + + /// Returns a set of all senders of transactions in the pool + fn unique_senders(&self) -> HashSet
; } /// Extension for [TransactionPool] trait that allows to set the current block info. @@ -256,6 +259,9 @@ pub trait TransactionPoolExt: TransactionPool { /// For example the base fee of the pending block is determined after a block is mined which /// affects the dynamic fee requirement of pending transactions in the pool. fn on_canonical_state_change(&self, update: CanonicalStateUpdate); + + /// Updates the accounts in the pool + fn update_accounts(&self, accounts: Vec); } /// A Helper type that bundles all transactions in the pool. @@ -389,6 +395,13 @@ pub struct CanonicalStateUpdate { pub mined_transactions: Vec, } +impl fmt::Display for CanonicalStateUpdate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{{ hash: {}, number: {}, pending_block_base_fee: {}, changed_accounts: {}, mined_transactions: {} }}", + self.hash, self.number, self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) + } +} + /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] pub struct ChangedAccount { From 99a8e0f98211be6afb49e8e04ca61ddd7e352583 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 15 Jul 2023 18:48:48 +0200 Subject: [PATCH 176/722] feat(txpool): add best_with_base_fee (#3737) --- crates/transaction-pool/src/lib.rs | 7 ++++ crates/transaction-pool/src/noop.rs | 7 ++++ crates/transaction-pool/src/ordering.rs | 6 +++ crates/transaction-pool/src/pool/best.rs | 44 ++++++++++++++++++++- crates/transaction-pool/src/pool/mod.rs | 10 +++++ crates/transaction-pool/src/pool/parked.rs | 32 ++++++++++++--- crates/transaction-pool/src/pool/pending.rs | 38 ++++++++++++++++++ crates/transaction-pool/src/pool/txpool.rs | 25 ++++++++++++ crates/transaction-pool/src/traits.rs | 9 +++++ 9 files changed, 171 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index a87e0600752c..93ebe6fd1a08 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -397,6 +397,13 @@ where Box::new(self.pool.best_transactions()) } + fn best_transactions_with_base_fee( + &self, + base_fee: u128, + ) -> Box>>> { + self.pool.best_transactions_with_base_fee(base_fee) + } + fn pending_transactions(&self) -> Vec>> { self.pool.pending_transactions() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index a45d71662a7a..c03a475e50a7 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -110,6 +110,13 @@ impl TransactionPool for NoopTransactionPool { Box::new(std::iter::empty()) } + fn best_transactions_with_base_fee( + &self, + _: u128, + ) -> Box>>> { + Box::new(std::iter::empty()) + } + fn pending_transactions(&self) -> Vec>> { vec![] } diff --git a/crates/transaction-pool/src/ordering.rs b/crates/transaction-pool/src/ordering.rs index 34044588de2c..40ee1700661d 100644 --- a/crates/transaction-pool/src/ordering.rs +++ b/crates/transaction-pool/src/ordering.rs @@ -45,3 +45,9 @@ impl Default for GasCostOrdering { Self(Default::default()) } } + +impl Clone for GasCostOrdering { + fn clone(&self) -> Self { + Self::default() + } +} diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index c0ed8786a312..fc6ff5595666 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,7 +1,7 @@ use crate::{ identifier::TransactionId, pool::pending::{PendingTransaction, PendingTransactionRef}, - TransactionOrdering, ValidPoolTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; use reth_primitives::H256 as TxHash; use std::{ @@ -10,6 +10,40 @@ use std::{ }; use tracing::debug; +/// An iterator that returns transactions that can be executed on the current state (*best* +/// transactions). +/// +/// This is a wrapper around [`BestTransactions`] that also enforces a specific basefee. +/// +/// This iterator guarantees that all transaction it returns satisfy the base fee. +pub(crate) struct BestTransactionsWithBasefee { + pub(crate) best: BestTransactions, + pub(crate) base_fee: u128, +} + +impl crate::traits::BestTransactions for BestTransactionsWithBasefee { + fn mark_invalid(&mut self, tx: &Self::Item) { + BestTransactions::mark_invalid(&mut self.best, tx) + } +} + +impl Iterator for BestTransactionsWithBasefee { + type Item = Arc>; + + fn next(&mut self) -> Option { + // find the next transaction that satisfies the base fee + loop { + let best = self.best.next()?; + if best.transaction.max_fee_per_gas() < self.base_fee { + // tx violates base fee, mark it as invalid and continue + crate::traits::BestTransactions::mark_invalid(self, &best); + } else { + return Some(best) + } + } + } +} + /// An iterator that returns transactions that can be executed on the current state (*best* /// transactions). /// @@ -35,6 +69,14 @@ impl BestTransactions { pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { self.invalid.insert(*tx.hash()); } + + /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. + /// + /// Note: for a transaction with nonce higher than the current on chain nonce this will always + /// return an ancestor since all transaction in this pool are gapless. + pub(crate) fn ancestor(&self, id: &TransactionId) -> Option<&Arc>> { + self.all.get(&id.unchecked_ancestor()?) + } } impl crate::traits::BestTransactions for BestTransactions { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 560c6ee99488..2c055bf9f758 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -462,6 +462,16 @@ where self.pool.read().best_transactions() } + /// Returns an iterator that yields transactions that are ready to be included in the block with + /// the given base fee. + pub(crate) fn best_transactions_with_base_fee( + &self, + base_fee: u128, + ) -> Box>>> + { + self.pool.read().best_transactions_with_base_fee(base_fee) + } + /// Returns all transactions from the pending sub-pool pub(crate) fn pending_transactions(&self) -> Vec>> { self.pool.read().pending_transactions() diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index d6a62685cfbf..0916886d130c 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -110,13 +110,24 @@ impl ParkedPool { } impl ParkedPool> { - /// Removes all transactions and their dependent transaction from the subpool that no longer - /// satisfy the given basefee. + /// Returns all transactions that satisfy the given basefee. /// - /// Note: the transactions are not returned in a particular order. - pub(crate) fn enforce_basefee(&mut self, basefee: u128) -> Vec>> { - let mut to_remove = Vec::new(); + /// Note: this does _not_ remove the transactions + pub(crate) fn satisfy_base_fee_transactions( + &self, + basefee: u128, + ) -> Vec>> { + let ids = self.satisfy_base_fee_ids(basefee); + let mut txs = Vec::with_capacity(ids.len()); + for id in ids { + txs.push(self.by_id.get(&id).expect("transaction exists").transaction.clone().into()); + } + txs + } + /// Returns all transactions that satisfy the given basefee. + fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { + let mut transactions = Vec::new(); { let mut iter = self.by_id.iter().peekable(); @@ -130,10 +141,19 @@ impl ParkedPool> { iter.next(); } } else { - to_remove.push(*id); + transactions.push(*id); } } } + transactions + } + + /// Removes all transactions and their dependent transaction from the subpool that no longer + /// satisfy the given basefee. + /// + /// Note: the transactions are not returned in a particular order. + pub(crate) fn enforce_basefee(&mut self, basefee: u128) -> Vec>> { + let to_remove = self.satisfy_base_fee_ids(basefee); let mut removed = Vec::with_capacity(to_remove.len()); for id in to_remove { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 1d6d841b8f04..71b873a3022f 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -4,6 +4,7 @@ use crate::{ TransactionOrdering, ValidPoolTransaction, }; +use crate::pool::best::BestTransactionsWithBasefee; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, @@ -84,6 +85,43 @@ impl PendingPool { } } + /// Same as `best` but only returns transactions that satisfy the given basefee. + pub(crate) fn best_with_basefee(&self, base_fee: u128) -> BestTransactionsWithBasefee { + BestTransactionsWithBasefee { best: self.best(), base_fee } + } + + /// Same as `best` but also includes the given unlocked transactions. + /// + /// This mimics the [Self::add_transaction] method, but does not insert the transactions into + /// pool but only into the returned iterator. + /// + /// Note: this does not insert the unlocked transactions into the pool. + /// + /// # Panics + /// + /// if the transaction is already included + pub(crate) fn best_with_unlocked( + &self, + unlocked: Vec>>, + ) -> BestTransactions { + let mut best = self.best(); + let mut submission_id = self.submission_id; + for tx in unlocked { + submission_id += 1; + debug_assert!(!best.all.contains_key(tx.id()), "transaction already included"); + let priority = self.ordering.priority(&tx.transaction); + let tx_id = *tx.id(); + let transaction = PendingTransactionRef { submission_id, transaction: tx, priority }; + if best.ancestor(&tx_id).is_none() { + best.independent.insert(transaction.clone()); + } + let transaction = Arc::new(PendingTransaction { transaction }); + best.all.insert(tx_id, transaction); + } + + best + } + /// Returns an iterator over all transactions in the pool pub(crate) fn all( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index b6a1bc174da1..e3820ee65575 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -191,6 +191,31 @@ impl TxPool { self.pending_pool.best() } + /// Returns an iterator that yields transactions that are ready to be included in the block with + /// the given base fee. + pub(crate) fn best_transactions_with_base_fee( + &self, + basefee: u128, + ) -> Box>>> + { + match basefee.cmp(&self.all_transactions.pending_basefee) { + Ordering::Equal => { + // fee unchanged, nothing to shift + Box::new(self.best_transactions()) + } + Ordering::Greater => { + // base fee increased, we only need to enforces this on the pending pool + Box::new(self.pending_pool.best_with_basefee(basefee)) + } + Ordering::Less => { + // base fee decreased, we need to move transactions from the basefee pool to the + // pending pool + let unlocked = self.basefee_pool.satisfy_base_fee_transactions(basefee); + Box::new(self.pending_pool.best_with_unlocked(unlocked)) + } + } + } + /// Returns all transactions from the pending sub-pool pub(crate) fn pending_transactions(&self) -> Vec>> { self.pending_pool.all().collect() diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 368492a28f5c..8b2110353d64 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -175,6 +175,15 @@ pub trait TransactionPool: Send + Sync + Clone { &self, ) -> Box>>>; + /// Returns an iterator that yields transactions that are ready for block production with the + /// given base fee. + /// + /// Consumer: Block production + fn best_transactions_with_base_fee( + &self, + base_fee: u128, + ) -> Box>>>; + /// Returns all transactions that can be included in the next block. /// /// This is primarily used for the `txpool_` RPC namespace: which distinguishes between `pending` and `queued` transactions, where `pending` are transactions ready for inclusion in the next block and `queued` are transactions that are ready for inclusion in future blocks. From d001313f99137e3599d74629e79994f20b042bda Mon Sep 17 00:00:00 2001 From: rakita Date: Sat, 15 Jul 2023 21:36:27 +0200 Subject: [PATCH 177/722] bug(stages): TxLookup/Sender stages fix range if there is no tx (#3479) --- bin/reth/src/stage/drop.rs | 8 +++ crates/stages/src/stage.rs | 56 +++++++++++++-------- crates/stages/src/stages/sender_recovery.rs | 2 +- crates/stages/src/test_utils/test_db.rs | 20 +++++--- 4 files changed, 58 insertions(+), 28 deletions(-) diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index 771dff1d2049..33298d287fd5 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -155,6 +155,14 @@ impl Command { )?; insert_genesis_header::(tx, self.chain)?; } + StageEnum::TxLookup => { + tx.clear::()?; + tx.put::( + StageId::TransactionLookup.to_string(), + Default::default(), + )?; + insert_genesis_header::(tx, self.chain)?; + } _ => { info!("Nothing to do for stage {:?}", self.stage); return Ok(()) diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 7580c6bab422..e369af06023c 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -1,14 +1,14 @@ use crate::error::StageError; use async_trait::async_trait; -use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; +use reth_db::database::Database; use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, TxNumber, }; -use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError}; +use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError, TransactionsProvider}; use std::{ cmp::{max, min}, - ops::RangeInclusive, + ops::{Range, RangeInclusive}, }; /// Stage execution input, see [Stage::execute]. @@ -77,30 +77,46 @@ impl ExecInput { &self, provider: &DatabaseProviderRW<'_, DB>, tx_threshold: u64, - ) -> Result<(RangeInclusive, RangeInclusive, bool), StageError> { + ) -> Result<(Range, RangeInclusive, bool), StageError> { let start_block = self.next_block(); + let target_block = self.target(); + let start_block_body = provider .block_body_indices(start_block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(start_block))?; + let first_tx_num = start_block_body.first_tx_num(); - let target_block = self.target(); + let target_block_body = provider + .block_body_indices(target_block)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(target_block))?; - let first_tx_number = start_block_body.first_tx_num(); - let mut last_tx_number = start_block_body.last_tx_num(); - let mut end_block_number = start_block; - let mut body_indices_cursor = - provider.tx_ref().cursor_read::()?; - for entry in body_indices_cursor.walk_range(start_block..=target_block)? { - let (block, body) = entry?; - last_tx_number = body.last_tx_num(); - end_block_number = block; - let tx_count = (first_tx_number..=last_tx_number).count() as u64; - if tx_count > tx_threshold { - break - } + // number of transactions left to execute. + let all_tx_cnt = target_block_body.next_tx_num() - first_tx_num; + + if all_tx_cnt == 0 { + // if there is no more transaction return back. + return Ok((first_tx_num..first_tx_num, start_block..=target_block, true)) } - let is_final_range = end_block_number >= target_block; - Ok((first_tx_number..=last_tx_number, start_block..=end_block_number, is_final_range)) + + // get block of this tx + let (end_block, is_final_range, next_tx_num) = if all_tx_cnt <= tx_threshold { + (target_block, true, target_block_body.next_tx_num()) + } else { + // get tx block number. next_tx_num in this case will be less thean all_tx_cnt. + // So we are sure that transaction must exist. + let end_block_number = provider + .transaction_block(first_tx_num + tx_threshold)? + .expect("block of tx must exist"); + // we want to get range of all transactions of this block, so we are fetching block + // body. + let end_block_body = provider + .block_body_indices(end_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(target_block))?; + (end_block_number, false, end_block_body.next_tx_num()) + }; + + let tx_range = first_tx_num..next_tx_num; + Ok((tx_range, start_block..=end_block, is_final_range)) } } diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index e6294b3a861b..8872c8138ffa 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -85,7 +85,7 @@ impl Stage for SenderRecoveryStage { // Acquire the cursor over the transactions let mut tx_cursor = tx.cursor_read::>()?; // Walk the transactions from start to end index (inclusive) - let raw_tx_range = RawKey::new(*tx_range.start())..=RawKey::new(*tx_range.end()); + let raw_tx_range = RawKey::new(tx_range.start)..RawKey::new(tx_range.end); let tx_walker = tx_cursor.walk_range(raw_tx_range)?; // Iterate over transactions in chunks diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index e3ad10d15c30..ddab55040734 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -246,13 +246,19 @@ impl TestTransaction { blocks.into_iter().try_for_each(|block| { Self::insert_header(tx, &block.header)?; // Insert into body tables. - tx.put::( - block.number, - StoredBlockBodyIndices { - first_tx_num: next_tx_num, - tx_count: block.body.len() as u64, - }, - )?; + let block_body_indices = StoredBlockBodyIndices { + first_tx_num: next_tx_num, + tx_count: block.body.len() as u64, + }; + + if !block.body.is_empty() { + tx.put::( + block_body_indices.last_tx_num(), + block.number, + )?; + } + tx.put::(block.number, block_body_indices)?; + block.body.iter().try_for_each(|body_tx| { tx.put::(next_tx_num, body_tx.clone().into())?; next_tx_num += 1; From 6934428be9a8b1c3c0e41fb5749b5c8890124b2a Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 15 Jul 2023 15:36:39 -0400 Subject: [PATCH 178/722] feat: better blocksize heuristics (#3748) --- book/run/config.md | 6 +- crates/config/src/config.rs | 8 +- crates/interfaces/src/p2p/bodies/response.rs | 9 +- crates/net/downloaders/src/bodies/bodies.rs | 10 +- crates/net/downloaders/src/bodies/request.rs | 27 +++- crates/net/downloaders/src/metrics.rs | 13 ++ crates/primitives/src/block.rs | 36 +++++ crates/primitives/src/header.rs | 33 ++++- .../primitives/src/transaction/access_list.rs | 18 +++ crates/primitives/src/transaction/mod.rs | 69 +++++++++ .../primitives/src/transaction/signature.rs | 23 +++ crates/primitives/src/withdrawal.rs | 8 + etc/grafana/dashboards/overview.json | 137 ++++++++++++++++++ 13 files changed, 374 insertions(+), 23 deletions(-) diff --git a/book/run/config.md b/book/run/config.md index 43319d9d2faa..3d5c6ce5d6b9 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -89,7 +89,7 @@ downloader_request_limit = 200 # # A lower value means more frequent disk I/O (writes), but also # lowers memory usage. -downloader_stream_batch_size = 10000 +downloader_stream_batch_size = 1000 # The size of the internal block buffer in bytes. # # A bigger buffer means that bandwidth can be saturated for longer periods, @@ -98,8 +98,8 @@ downloader_stream_batch_size = 10000 # If the buffer is full, no more requests will be made to peers until # space is made for new blocks in the buffer. # -# Defaults to around 4GB. -downloader_max_buffered_blocks_size_bytes = 4294967296 +# Defaults to around 2GB. +downloader_max_buffered_blocks_size_bytes = 2147483648 # The minimum and maximum number of concurrent requests to have in flight at a time. # # The downloader uses these as best effort targets, which means that the number diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 659df006b5e3..d46e665430c6 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -146,11 +146,11 @@ pub struct BodiesConfig { pub downloader_request_limit: u64, /// The maximum number of block bodies returned at once from the stream /// - /// Default: 10_000 + /// Default: 1_000 pub downloader_stream_batch_size: usize, /// The size of the internal block buffer in bytes. /// - /// Default: 4GB + /// Default: 2GB pub downloader_max_buffered_blocks_size_bytes: usize, /// The minimum number of requests to send concurrently. /// @@ -167,8 +167,8 @@ impl Default for BodiesConfig { fn default() -> Self { Self { downloader_request_limit: 200, - downloader_stream_batch_size: 10_000, - downloader_max_buffered_blocks_size_bytes: 4 * 1024 * 1024 * 1024, // ~4GB + downloader_stream_batch_size: 1_000, + downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB downloader_min_concurrent_requests: 5, downloader_max_concurrent_requests: 100, } diff --git a/crates/interfaces/src/p2p/bodies/response.rs b/crates/interfaces/src/p2p/bodies/response.rs index a3033bd4ddf0..2b32b70097f6 100644 --- a/crates/interfaces/src/p2p/bodies/response.rs +++ b/crates/interfaces/src/p2p/bodies/response.rs @@ -18,13 +18,12 @@ impl BlockResponse { } } - /// Returns the total number of bytes of all transactions input data in the block + /// Calculates a heuristic for the in-memory size of the [BlockResponse]. + #[inline] pub fn size(&self) -> usize { match self { - BlockResponse::Full(block) => { - block.body.iter().map(|tx| tx.transaction.input().len()).sum() - } - BlockResponse::Empty(_) => 0, + BlockResponse::Full(block) => SealedBlock::size(block), + BlockResponse::Empty(header) => SealedHeader::size(header), } } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 7a0c3aa4b742..e615520e33eb 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -19,6 +19,7 @@ use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, collections::BinaryHeap, + mem, ops::RangeInclusive, pin::Pin, sync::Arc, @@ -225,13 +226,16 @@ where self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); self.metrics.buffered_blocks.decrement(resp.len() as f64); - self.metrics.buffered_blocks_size_bytes.set(resp.size() as f64); + self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64); Some(resp) } /// Adds a new response to the internal buffer fn buffer_bodies_response(&mut self, response: Vec) { - let size = response.iter().map(|b| b.size()).sum::(); + // take into account capacity + let size = response.iter().map(BlockResponse::size).sum::() + + response.capacity() * mem::size_of::(); + let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -516,7 +520,7 @@ impl Default for BodiesDownloaderBuilder { Self { request_limit: 200, stream_batch_size: 10_000, - max_buffered_blocks_size_bytes: 4 * 1024 * 1024 * 1024, // ~4GB + max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB concurrent_requests_range: 5..=100, } } diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index a71b81f33e73..8698e881ebbe 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,4 +1,4 @@ -use crate::metrics::BodyDownloaderMetrics; +use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use futures::{Future, FutureExt}; use reth_interfaces::{ consensus::{Consensus as ConsensusTrait, Consensus}, @@ -11,6 +11,7 @@ use reth_interfaces::{ use reth_primitives::{BlockBody, PeerId, SealedBlock, SealedHeader, WithPeerId, H256}; use std::{ collections::VecDeque, + mem, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -39,6 +40,9 @@ pub(crate) struct BodiesRequestFuture { client: Arc, consensus: Arc, metrics: BodyDownloaderMetrics, + /// Metrics for individual responses. This can be used to observe how the size (in bytes) of + /// responses change while bodies are being downloaded. + response_metrics: ResponseMetrics, // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks @@ -62,6 +66,7 @@ where client, consensus, metrics, + response_metrics: Default::default(), pending_headers: Default::default(), buffer: Default::default(), last_request_len: None, @@ -153,8 +158,11 @@ where /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> { + let bodies_capacity = bodies.capacity(); + let bodies_len = bodies.len(); let mut bodies = bodies.into_iter().peekable(); + let mut total_size = bodies_capacity * mem::size_of::(); while bodies.peek().is_some() { let next_header = match self.pending_headers.pop_front() { Some(header) => header, @@ -162,15 +170,16 @@ where }; if next_header.is_empty() { + // increment empty block body metric + total_size += mem::size_of::(); self.buffer.push(BlockResponse::Empty(next_header)); } else { let next_body = bodies.next().unwrap(); - let block = SealedBlock { - header: next_header, - body: next_body.transactions, - ommers: next_body.ommers, - withdrawals: next_body.withdrawals, - }; + + // increment full block body metric + total_size += next_body.size(); + + let block = SealedBlock::new(next_header, next_body); if let Err(error) = self.consensus.validate_block(&block) { // Body is invalid, put the header back and return an error @@ -183,6 +192,10 @@ where } } + // Increment per-response metric + self.response_metrics.response_size_bytes.set(total_size as f64); + self.response_metrics.response_length.set(bodies_len as f64); + Ok(()) } } diff --git a/crates/net/downloaders/src/metrics.rs b/crates/net/downloaders/src/metrics.rs index 38fc642eb318..a227f38f8b7b 100644 --- a/crates/net/downloaders/src/metrics.rs +++ b/crates/net/downloaders/src/metrics.rs @@ -62,6 +62,19 @@ impl BodyDownloaderMetrics { } } +/// Metrics for an individual response, i.e. the size in bytes, and length (number of bodies) in the +/// response. +/// +/// These metrics will be initialized with the `downloaders.bodies.response` scope. +#[derive(Clone, Metrics)] +#[metrics(scope = "downloaders.bodies.response")] +pub struct ResponseMetrics { + /// The size (in bytes) of an individual bodies response received by the downloader. + pub response_size_bytes: Gauge, + /// The number of bodies in an individual bodies response received by the downloader. + pub response_length: Gauge, +} + /// Common header downloader metrics. /// /// These metrics will be initialized with the `downloaders.headers` scope. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 6d652f92e476..0c9866ba28ef 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -59,6 +59,16 @@ impl Block { BlockWithSenders { block: self, senders } } + + /// Calculates a heuristic for the in-memory size of the [Block]. + #[inline] + pub fn size(&self) -> usize { + self.header.size() + + // take into account capacity + self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * std::mem::size_of::() + + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * std::mem::size_of::
() + + self.withdrawals.as_ref().map(|w| w.iter().map(Withdrawal::size).sum::() + w.capacity() * std::mem::size_of::()).unwrap_or(std::mem::size_of::>>()) + } } impl Deref for Block { @@ -178,6 +188,16 @@ impl SealedBlock { withdrawals: self.withdrawals, } } + + /// Calculates a heuristic for the in-memory size of the [SealedBlock]. + #[inline] + pub fn size(&self) -> usize { + self.header.size() + + // take into account capacity + self.body.iter().map(TransactionSigned::size).sum::() + self.body.capacity() * std::mem::size_of::() + + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * std::mem::size_of::
() + + self.withdrawals.as_ref().map(|w| w.iter().map(Withdrawal::size).sum::() + w.capacity() * std::mem::size_of::()).unwrap_or(std::mem::size_of::>>()) + } } impl From for Block { @@ -819,6 +839,22 @@ impl BlockBody { withdrawals_root: self.calculate_withdrawals_root(), } } + + /// Calculates a heuristic for the in-memory size of the [BlockBody]. + #[inline] + pub fn size(&self) -> usize { + self.transactions.iter().map(TransactionSigned::size).sum::() + + self.transactions.capacity() * std::mem::size_of::() + + self.ommers.iter().map(Header::size).sum::() + + self.ommers.capacity() * std::mem::size_of::
() + + self.withdrawals + .as_ref() + .map(|w| { + w.iter().map(Withdrawal::size).sum::() + + w.capacity() * std::mem::size_of::() + }) + .unwrap_or(std::mem::size_of::>>()) + } } /// A struct that represents roots associated with a block body. This can be used to correlate diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 54b81649b84e..ba8e8f31deb4 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -9,7 +9,10 @@ use bytes::{Buf, BufMut, BytesMut}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; +use std::{ + mem, + ops::{Deref, DerefMut}, +}; /// Describes the current head block. /// @@ -180,6 +183,28 @@ impl Header { self.seal(hash) } + /// Calculate a heuristic for the in-memory size of the [Header]. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // parent hash + mem::size_of::() + // ommers hash + mem::size_of::() + // beneficiary + mem::size_of::() + // state root + mem::size_of::() + // transactions root + mem::size_of::() + // receipts root + mem::size_of::>() + // withdrawals root + mem::size_of::() + // logs bloom + mem::size_of::() + // difficulty + mem::size_of::() + // number + mem::size_of::() + // gas limit + mem::size_of::() + // gas used + mem::size_of::() + // timestamp + mem::size_of::() + // mix hash + mem::size_of::() + // nonce + mem::size_of::>() + // base fee per gas + self.extra_data.len() // extra data + } + fn header_payload_length(&self) -> usize { let mut length = 0; length += self.parent_hash.length(); @@ -331,6 +356,12 @@ impl SealedHeader { pub fn num_hash(&self) -> BlockNumHash { BlockNumHash::new(self.number, self.hash) } + + /// Calculates a heuristic for the in-memory size of the [SealedHeader]. + #[inline] + pub fn size(&self) -> usize { + self.header.size() + mem::size_of::() + } } #[cfg(any(test, feature = "arbitrary"))] diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index eaa60b2603f1..a86b33b69eb8 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -1,3 +1,5 @@ +use std::mem; + use crate::{Address, H256}; use reth_codecs::{main_codec, Compact}; use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; @@ -22,6 +24,14 @@ pub struct AccessListItem { pub storage_keys: Vec, } +impl AccessListItem { + /// Calculates a heuristic for the in-memory size of the [AccessListItem]. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::
() + self.storage_keys.capacity() * mem::size_of::() + } +} + /// AccessList as defined in EIP-2930 #[main_codec(rlp)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, RlpDecodableWrapper, RlpEncodableWrapper)] @@ -48,6 +58,14 @@ impl AccessList { }) .collect() } + + /// Calculates a heuristic for the in-memory size of the [AccessList]. + #[inline] + pub fn size(&self) -> usize { + // take into account capacity + self.0.iter().map(AccessListItem::size).sum::() + + self.0.capacity() * mem::size_of::() + } } /// Access list with gas used appended. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 796baf2986a9..c0371aa2a849 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,3 +1,5 @@ +use std::mem; + use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, keccak256, Address, Bytes, ChainId, TxHash, H256, @@ -64,6 +66,20 @@ pub struct TxLegacy { pub input: Bytes, } +impl TxLegacy { + /// Calculates a heuristic for the in-memory size of the [TxLegacy] transaction. + #[inline] + fn size(&self) -> usize { + mem::size_of::>() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_price + mem::size_of::() + // gas_limit + self.to.size() + // to + mem::size_of::() + // value + self.input.len() // input + } +} + /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] @@ -112,6 +128,21 @@ pub struct TxEip2930 { pub input: Bytes, } +impl TxEip2930 { + /// Calculates a heuristic for the in-memory size of the [TxEip2930] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_price + mem::size_of::() + // gas_limit + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() // input + } +} + /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] @@ -168,6 +199,22 @@ pub struct TxEip1559 { pub input: Bytes, } +impl TxEip1559 { + /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_limit + mem::size_of::() + // max_fee_per_gas + mem::size_of::() + // max_priority_fee_per_gas + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() // input + } +} + /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -251,6 +298,16 @@ impl Transaction { Transaction::Eip1559(tx) => tx.input = input, } } + + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Transaction::Legacy(tx) => tx.size(), + Transaction::Eip2930(tx) => tx.size(), + Transaction::Eip1559(tx) => tx.size(), + } + } } impl Compact for Transaction { @@ -720,6 +777,12 @@ impl TransactionKind { TransactionKind::Call(to) => Some(to), } } + + /// Calculates a heuristic for the in-memory size of the [TransactionKind]. + #[inline] + fn size(self) -> usize { + mem::size_of::() + } } impl Compact for TransactionKind { @@ -1033,6 +1096,12 @@ impl TransactionSigned { initial_tx } + /// Calculate a heuristic for the in-memory size of the [TransactionSigned]. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + self.transaction.size() + self.signature.size() + } + /// Decodes legacy transaction from the data buffer. /// /// This expects `rlp(legacy_tx)` diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index faf44c2b9705..6977d8c26182 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -136,6 +136,12 @@ impl Signature { sig[64] = v; sig } + + /// Calculates a heuristic for the in-memory size of the [Signature]. + #[inline] + pub fn size(&self) -> usize { + std::mem::size_of::() + } } #[cfg(test)] @@ -220,4 +226,21 @@ mod tests { let expected = Address::from_str("0x9d8a62f656a8d1615c1294fd71e9cfb3e4855a4f").unwrap(); assert_eq!(expected, signer); } + + #[test] + fn ensure_size_equals_sum_of_fields() { + let signature = Signature { + r: U256::from_str( + "18515461264373351373200002665853028612451056578545711640558177340181847433846", + ) + .unwrap(), + s: U256::from_str( + "46948507304638947509940763649030358759909902576025900602547168820602576006531", + ) + .unwrap(), + odd_y_parity: false, + }; + + assert!(signature.size() >= 65); + } } diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index d8d0145b4a29..098110803c47 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,3 +1,5 @@ +use std::mem; + use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address, U256}; use reth_codecs::{main_codec, Compact}; use reth_rlp::{RlpDecodable, RlpEncodable}; @@ -24,6 +26,12 @@ impl Withdrawal { pub fn amount_wei(&self) -> U256 { U256::from(self.amount) * U256::from(GWEI_TO_WEI) } + + /// Calculate a heuristic for the in-memory size of the [Withdrawal]. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + } } #[cfg(test)] diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 4d27f32f7e8d..4e11ec203aa3 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2643,6 +2643,143 @@ "title": "Downloader buffer", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of blocks in a request and size in bytes of those block responses", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "lineInterpolation": "linear", + "barAlignment": 0, + "lineWidth": 1, + "fillOpacity": 0, + "gradientMode": "none", + "spanNulls": false, + "showPoints": "auto", + "pointSize": 5, + "stacking": { + "mode": "none", + "group": "A" + }, + "axisPlacement": "auto", + "axisLabel": "", + "axisColorMode": "text", + "scaleDistribution": { + "type": "linear" + }, + "axisCenteredZero": false, + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "blocks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 5, + "y": 110 + }, + "id": 102, + "options": { + "tooltip": { + "mode": "multi", + "sort": "none" + }, + "legend": { + "showLegend": true, + "displayMode": "list", + "placement": "bottom", + "calcs": [] + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "Prometheus" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_size_bytes{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Response size", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "Prometheus" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Individual response length", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length", + "hide": false, + "instant": false, + "legendFormat": "Mean body size in response", + "range": true, + "refId": "C" + } + ], + "title": "Block body response sizes", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { From 90b6d006d1fc660c4883a4335e42f8e760c4adbe Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sat, 15 Jul 2023 21:59:03 +0100 Subject: [PATCH 179/722] fix(stages): update entities metrics on `SyncHeight` event (#3796) --- crates/stages/src/metrics/listener.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/stages/src/metrics/listener.rs b/crates/stages/src/metrics/listener.rs index 8e8102d3f34c..18bd45477276 100644 --- a/crates/stages/src/metrics/listener.rs +++ b/crates/stages/src/metrics/listener.rs @@ -59,8 +59,14 @@ impl MetricsListener { match event { MetricEvent::SyncHeight { height } => { for stage_id in StageId::ALL { - let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); - stage_metrics.checkpoint.set(height as f64); + self.handle_event(MetricEvent::StageCheckpoint { + stage_id, + checkpoint: StageCheckpoint { + block_number: height, + stage_checkpoint: None, + }, + max_block_number: Some(height), + }); } } MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number } => { From 07b06b4efe32dcb540ef12b8c2ee21798f7f6f73 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 17 Jul 2023 10:25:00 +0200 Subject: [PATCH 180/722] chore: use best with base fee (#3804) --- crates/payload/basic/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c598ab53509e..2d598a270268 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -578,12 +578,12 @@ fn build_payload( let mut cumulative_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let base_fee = initialized_block_env.basefee.to::(); let mut executed_txs = Vec::new(); - let mut best_txs = pool.best_transactions(); + let mut best_txs = pool.best_transactions_with_base_fee(base_fee as u128); let mut total_fees = U256::ZERO; - let base_fee = initialized_block_env.basefee.to::(); let block_number = initialized_block_env.number.to::(); From 6d1e8a2ecccba6b91ca3052d3ad0b0705830ec03 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Mon, 17 Jul 2023 15:12:28 +0530 Subject: [PATCH 181/722] feat:new discovered node record event stream (#3707) Co-authored-by: Matthias Seitz --- crates/net/network/src/discovery.rs | 43 ++++++++++++++++++++++------- crates/net/network/src/manager.rs | 10 ++++++- crates/net/network/src/network.rs | 15 ++++++++-- crates/net/network/src/state.rs | 13 ++++++++- 4 files changed, 67 insertions(+), 14 deletions(-) diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 0bf64e7c028c..5af2579567e0 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,6 +1,9 @@ //! Discovery support for the network. -use crate::error::{NetworkError, ServiceKind}; +use crate::{ + error::{NetworkError, ServiceKind}, + manager::DiscoveredEvent, +}; use futures::StreamExt; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config, EnrForkIdEntry}; use reth_dns_discovery::{ @@ -14,12 +17,14 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tokio::task::JoinHandle; +use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::wrappers::ReceiverStream; /// An abstraction over the configured discovery protocol. /// -/// Listens for new discovered nodes and emits events for discovered nodes and their address. +/// Listens for new discovered nodes and emits events for discovered nodes and their +/// address.#[derive(Debug, Clone)] + pub struct Discovery { /// All nodes discovered via discovery protocol. /// @@ -41,6 +46,8 @@ pub struct Discovery { _dns_disc_service: Option>, /// Events buffered until polled. queued_events: VecDeque, + /// List of listeners subscribed to discovery events. + discovery_listeners: Vec>, } impl Discovery { @@ -84,6 +91,7 @@ impl Discovery { }; Ok(Self { + discovery_listeners: Default::default(), local_enr, discv4, discv4_updates, @@ -96,6 +104,17 @@ impl Discovery { }) } + /// Registers a listener for receiving [DiscoveryEvent] updates. + pub(crate) fn add_listener(&mut self, tx: mpsc::UnboundedSender) { + self.discovery_listeners.push(tx); + } + + /// Notifies all registered listeners with the provided `event`. + #[inline] + fn notify_listeners(&mut self, event: &DiscoveryEvent) { + self.discovery_listeners.retain_mut(|listener| listener.send(event.clone()).is_ok()); + } + /// Updates the `eth:ForkId` field in discv4. #[allow(unused)] pub(crate) fn update_fork_id(&self, fork_id: ForkId) { @@ -139,11 +158,9 @@ impl Discovery { Entry::Occupied(_entry) => {} Entry::Vacant(entry) => { entry.insert(addr); - self.queued_events.push_back(DiscoveryEvent::Discovered { - peer_id: id, - socket_addr: addr, - fork_id, - }); + self.queued_events.push_back(DiscoveryEvent::NewNode( + DiscoveredEvent::EventQueued { peer_id: id, socket_addr: addr, fork_id }, + )); } } } @@ -174,6 +191,7 @@ impl Discovery { loop { // Drain all buffered events first if let Some(event) = self.queued_events.pop_front() { + self.notify_listeners(&event); return Poll::Ready(event) } @@ -204,6 +222,9 @@ impl Discovery { /// /// NOTE: This instance does nothing pub(crate) fn noop() -> Self { + let (_discovery_listeners, _): (mpsc::UnboundedSender, _) = + mpsc::unbounded_channel(); + Self { discovered_nodes: Default::default(), local_enr: NodeRecord { @@ -219,14 +240,16 @@ impl Discovery { _dns_discovery: None, dns_discovery_updates: None, _dns_disc_service: None, + discovery_listeners: Default::default(), } } } /// Events produced by the [`Discovery`] manager. +#[derive(Debug, Clone)] pub enum DiscoveryEvent { - /// A new node was discovered - Discovered { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, + /// Discovered a node + NewNode(DiscoveredEvent), /// Retrieved a [`ForkId`] from the peer via ENR request, See EnrForkId(PeerId, ForkId), } diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0a997d85a60e..78ec6d332111 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -41,7 +41,7 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::ReputationChangeKind; -use reth_primitives::{listener::EventListeners, NodeRecord, PeerId, H256}; +use reth_primitives::{listener::EventListeners, ForkId, NodeRecord, PeerId, H256}; use reth_provider::BlockReader; use reth_rpc_types::{EthProtocolInfo, NetworkStatus}; use std::{ @@ -515,6 +515,9 @@ where NetworkHandleMessage::EventListener(tx) => { self.event_listeners.push_listener(tx); } + NetworkHandleMessage::DiscoveryListener(tx) => { + self.swarm.state_mut().discovery_mut().add_listener(tx); + } NetworkHandleMessage::AnnounceBlock(block, hash) => { if self.handle.mode().is_stake() { // See [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p) @@ -919,3 +922,8 @@ pub enum NetworkEvent { /// Event emitted when a new peer is removed PeerRemoved(PeerId), } + +#[derive(Debug, Clone)] +pub enum DiscoveredEvent { + EventQueued { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, +} diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 44337e1555eb..9a3c8926caf5 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,6 @@ use crate::{ - config::NetworkMode, manager::NetworkEvent, message::PeerRequest, peers::PeersHandle, - session::PeerInfo, FetchClient, + config::NetworkMode, discovery::DiscoveryEvent, manager::NetworkEvent, message::PeerRequest, + peers::PeersHandle, session::PeerInfo, FetchClient, }; use async_trait::async_trait; use parking_lot::Mutex; @@ -83,6 +83,15 @@ impl NetworkHandle { UnboundedReceiverStream::new(rx) } + /// Returns a new [`DiscoveryEvent`] stream. + /// + /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. + pub fn discovery_listener(&self) -> UnboundedReceiverStream { + let (tx, rx) = mpsc::unbounded_channel(); + let _ = self.manager().send(NetworkHandleMessage::DiscoveryListener(tx)); + UnboundedReceiverStream::new(rx) + } + /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. @@ -320,4 +329,6 @@ pub(crate) enum NetworkHandleMessage { GetReputationById(PeerId, oneshot::Sender>), /// Gracefully shutdown network Shutdown(oneshot::Sender<()>), + /// Add a new listener for `DiscoveryEvent`. + DiscoveryListener(UnboundedSender), } diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 104f263f4b71..446a67962927 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -4,6 +4,7 @@ use crate::{ cache::LruCache, discovery::{Discovery, DiscoveryEvent}, fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, + manager::DiscoveredEvent, message::{ BlockRequest, NewBlockMessage, PeerRequest, PeerRequestSender, PeerResponse, PeerResponseResult, @@ -11,6 +12,7 @@ use crate::{ peers::{PeerAction, PeersManager}, FetchClient, }; + use reth_eth_wire::{ capability::Capabilities, BlockHashNumber, DisconnectReason, NewBlockHashes, Status, }; @@ -95,6 +97,11 @@ where &mut self.peers_manager } + /// Returns mutable access to the [`Discovery`] + pub(crate) fn discovery_mut(&mut self) -> &mut Discovery { + &mut self.discovery + } + /// Returns access to the [`PeersManager`] pub(crate) fn peers(&self) -> &PeersManager { &self.peers_manager @@ -277,7 +284,11 @@ where /// Event hook for events received from the discovery service. fn on_discovery_event(&mut self, event: DiscoveryEvent) { match event { - DiscoveryEvent::Discovered { peer_id, socket_addr, fork_id } => { + DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { + peer_id, + socket_addr, + fork_id, + }) => { self.queued_messages.push_back(StateAction::DiscoveredNode { peer_id, socket_addr, From a0e125f7097ba36dbfd88b236f74c791aa6fe552 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Vincent?= <28714795+leovct@users.noreply.github.com> Date: Mon, 17 Jul 2023 12:16:25 +0200 Subject: [PATCH 182/722] feat: make rpc gas cap configurable (#3458) Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz --- bin/reth/src/args/rpc_server_args.rs | 32 +++++++++++++++++++++- book/cli/node.md | 3 ++ crates/rpc/rpc-builder/src/auth.rs | 2 ++ crates/rpc/rpc-builder/src/eth.rs | 10 +++++++ crates/rpc/rpc-builder/src/lib.rs | 1 + crates/rpc/rpc/src/eth/api/mod.rs | 12 ++++++++ crates/rpc/rpc/src/eth/api/server.rs | 5 ++-- crates/rpc/rpc/src/eth/api/state.rs | 4 ++- crates/rpc/rpc/src/eth/api/transactions.rs | 3 +- 9 files changed, 67 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 230741885bc6..4a1f8d2fb89a 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -2,11 +2,12 @@ use crate::args::GasPriceOracleArgs; use clap::{ - builder::{PossibleValue, TypedValueParser}, + builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use futures::TryFutureExt; use reth_network_api::{NetworkInfo, Peers}; +use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::{ BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, @@ -48,6 +49,9 @@ pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; /// Default number of incoming connections. pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; +/// Default max gas limit for `eth_call` and call tracing RPC methods. +pub(crate) const RPC_DEFAULT_GAS_CAP: u64 = ETHEREUM_BLOCK_GAS_LIMIT; + /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Args, PartialEq, Eq, Default)] #[command(next_help_heading = "RPC")] @@ -132,6 +136,16 @@ pub struct RpcServerArgs { #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_TRACING_REQUESTS)] pub rpc_max_tracing_requests: u32, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. + #[arg( + long, + alias = "rpc.gascap", + value_name = "GAS_CAP", + value_parser = RangedU64ValueParser::::new().range(1..), + default_value_t = RPC_DEFAULT_GAS_CAP + )] + pub rpc_gas_cap: u64, + /// Gas price oracle configuration. #[clap(flatten)] pub gas_price_oracle: GasPriceOracleArgs, @@ -174,6 +188,7 @@ impl RpcServerArgs { pub fn eth_config(&self) -> EthConfig { EthConfig::default() .max_tracing_requests(self.rpc_max_tracing_requests) + .rpc_gas_cap(self.rpc_gas_cap) .gpo_config(self.gas_price_oracle_config()) } @@ -495,6 +510,21 @@ mod tests { args: T, } + #[test] + fn test_rpc_gas_cap() { + let args = CommandParser::::parse_from(["reth"]).args; + let config = args.eth_config(); + assert_eq!(config.rpc_gas_cap, RPC_DEFAULT_GAS_CAP); + + let args = + CommandParser::::parse_from(["reth", "--rpc.gascap", "1000"]).args; + let config = args.eth_config(); + assert_eq!(config.rpc_gas_cap, 1000); + + let args = CommandParser::::try_parse_from(["reth", "--rpc.gascap", "0"]); + assert!(args.is_err()); + } + #[test] fn test_rpc_server_args_parser() { let args = diff --git a/book/cli/node.md b/book/cli/node.md index 4d447428a4c6..78d9b40dba2f 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -187,6 +187,9 @@ Gas Price Oracle: The percentile of gas prices to use for the estimate [default: 60] + + --rpc.gascap + Maximum gas limit for `eth_call` and call tracing RPC methods --block-cache-len Maximum number of block cache entries diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 567f10bfca4f..758713bbdebc 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -2,6 +2,7 @@ use crate::{ constants, error::{RpcError, ServerKind}, eth::DEFAULT_MAX_LOGS_PER_RESPONSE, + EthConfig, }; use hyper::header::AUTHORIZATION; pub use jsonrpsee::server::ServerBuilder; @@ -61,6 +62,7 @@ where network, eth_cache.clone(), gas_oracle, + EthConfig::default().rpc_gas_cap, Box::new(executor.clone()), ); let eth_filter = EthFilter::new( diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 3968eef6f83e..767660249e27 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,3 +1,4 @@ +use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_rpc::{ eth::{ cache::{EthStateCache, EthStateCacheConfig}, @@ -37,6 +38,8 @@ pub struct EthConfig { pub max_tracing_requests: u32, /// Maximum number of logs that can be returned in a single response in `eth_getLogs` calls. pub max_logs_per_response: usize, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. + pub rpc_gas_cap: u64, } impl Default for EthConfig { @@ -46,6 +49,7 @@ impl Default for EthConfig { gas_oracle: GasPriceOracleConfig::default(), max_tracing_requests: DEFAULT_MAX_TRACING_REQUESTS, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, + rpc_gas_cap: ETHEREUM_BLOCK_GAS_LIMIT, } } } @@ -74,4 +78,10 @@ impl EthConfig { self.max_logs_per_response = max_logs; self } + + /// Configures the maximum gas limit for `eth_call` and call tracing RPC methods + pub fn rpc_gas_cap(mut self, rpc_gas_cap: u64) -> Self { + self.rpc_gas_cap = rpc_gas_cap; + self + } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 7013131ec5c0..d01036f8d4b7 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -967,6 +967,7 @@ where self.network.clone(), cache.clone(), gas_oracle, + self.config.eth.rpc_gas_cap, executor.clone(), ); let filter = EthFilter::new( diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index b53a80993aed..f291bf2566fb 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -83,6 +83,7 @@ where network: Network, eth_cache: EthStateCache, gas_oracle: GasPriceOracle, + gas_cap: u64, ) -> Self { Self::with_spawner( provider, @@ -90,6 +91,7 @@ where network, eth_cache, gas_oracle, + gas_cap, Box::::default(), ) } @@ -101,6 +103,7 @@ where network: Network, eth_cache: EthStateCache, gas_oracle: GasPriceOracle, + gas_cap: u64, task_spawner: Box, ) -> Self { // get the block number of the latest block @@ -118,6 +121,7 @@ where signers: Default::default(), eth_cache, gas_oracle, + gas_cap, starting_block: U256::from(latest_block), task_spawner, pending_block: Default::default(), @@ -155,6 +159,12 @@ where &self.inner.gas_oracle } + /// Returns the configured gas limit cap for `eth_call` and tracing related calls + #[allow(unused)] + pub(crate) fn gas_cap(&self) -> u64 { + self.inner.gas_cap + } + /// Returns the inner `Provider` pub fn provider(&self) -> &Provider { &self.inner.provider @@ -354,6 +364,8 @@ struct EthApiInner { eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions gas_oracle: GasPriceOracle, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. + gas_cap: u64, /// The block number at which the node started starting_block: U256, /// The type that can spawn tasks which would otherwise block. diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index b1643e89671a..1cca7addc90c 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -398,8 +398,8 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; use reth_primitives::{ - basefee::calculate_next_block_base_fee, Block, BlockNumberOrTag, Header, TransactionSigned, - H256, U256, + basefee::calculate_next_block_base_fee, constants::ETHEREUM_BLOCK_GAS_LIMIT, Block, + BlockNumberOrTag, Header, TransactionSigned, H256, U256, }; use reth_provider::{ test_utils::{MockEthProvider, NoopProvider}, @@ -427,6 +427,7 @@ mod tests { NoopNetwork::default(), cache.clone(), GasPriceOracle::new(provider, Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, ) } diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 281198be0796..0930bf0b6c50 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -147,7 +147,7 @@ where mod tests { use super::*; use crate::eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}; - use reth_primitives::{StorageKey, StorageValue}; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, StorageKey, StorageValue}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_transaction_pool::test_utils::testing_pool; use std::collections::HashMap; @@ -164,6 +164,7 @@ mod tests { (), cache.clone(), GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, ); let address = Address::random(); let storage = eth_api.storage_at(address, U256::ZERO.into(), None).unwrap(); @@ -184,6 +185,7 @@ mod tests { (), cache.clone(), GasPriceOracle::new(mock_provider, Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, ); let storage_key: U256 = storage_key.into(); diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index c932ec825bcf..6a5e21d5e162 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -872,7 +872,7 @@ mod tests { EthApi, }; use reth_network_api::noop::NoopNetwork; - use reth_primitives::{hex_literal::hex, Bytes}; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex, Bytes}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; @@ -890,6 +890,7 @@ mod tests { noop_network_provider, cache.clone(), GasPriceOracle::new(noop_provider, Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, ); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d From b2b2cbedb527c429128ef9302e006bc9af958fb5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 17 Jul 2023 12:27:14 +0200 Subject: [PATCH 183/722] chore: bump version .alpha4 (#3808) --- Cargo.lock | 92 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 47 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3aa656a4cddf..a4a7b44b51ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -886,7 +886,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1716,7 +1716,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "reth-db", "reth-interfaces", @@ -4917,7 +4917,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "backon", "clap", @@ -4983,7 +4983,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5000,7 +5000,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "futures-core", "futures-util", @@ -5019,7 +5019,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "futures", @@ -5045,7 +5045,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "aquamarine", "assert_matches", @@ -5064,7 +5064,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arbitrary", "bytes", @@ -5079,7 +5079,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "confy", "reth-discv4", @@ -5096,7 +5096,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "mockall", @@ -5107,7 +5107,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arbitrary", "assert_matches", @@ -5148,7 +5148,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "discv5", "enr", @@ -5171,7 +5171,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "async-trait", "data-encoding", @@ -5195,7 +5195,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "futures", @@ -5220,7 +5220,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "aes 0.8.2", "block-padding", @@ -5251,7 +5251,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arbitrary", "async-trait", @@ -5284,7 +5284,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arbitrary", "async-trait", @@ -5312,7 +5312,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "async-trait", "bytes", @@ -5331,7 +5331,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "bitflags 2.3.2", "byteorder", @@ -5351,7 +5351,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "bindgen 0.65.1", "cc", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "futures", "metrics", @@ -5370,7 +5370,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "metrics", "once_cell", @@ -5384,7 +5384,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "pin-project", "reth-primitives", @@ -5393,7 +5393,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "igd", "pin-project-lite", @@ -5407,7 +5407,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "aquamarine", "async-trait", @@ -5457,7 +5457,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "async-trait", "reth-eth-wire", @@ -5470,7 +5470,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "futures-util", "reth-interfaces", @@ -5489,7 +5489,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arbitrary", "assert_matches", @@ -5537,7 +5537,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "auto_impl", "derive_more", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "reth-primitives", "thiserror", @@ -5567,7 +5567,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "once_cell", "reth-consensus-common", @@ -5583,7 +5583,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "boa_engine", "boa_gc", @@ -5599,7 +5599,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "reth-primitives", "revm", @@ -5607,7 +5607,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "arrayvec", "auto_impl", @@ -5625,7 +5625,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.28", @@ -5634,7 +5634,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "async-trait", @@ -5680,7 +5680,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "jsonrpsee", "reth-primitives", @@ -5690,7 +5690,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "async-trait", "futures", @@ -5704,7 +5704,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "hyper", "jsonrpsee", @@ -5734,7 +5734,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "async-trait", @@ -5755,7 +5755,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "jsonrpsee-types", @@ -5771,7 +5771,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "aquamarine", "assert_matches", @@ -5807,7 +5807,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "dyn-clone", "futures-util", @@ -5820,7 +5820,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "tracing", "tracing-appender", @@ -5830,7 +5830,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "aquamarine", "async-trait", @@ -5856,7 +5856,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index c6b79a2c2164..523be9caa9cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From d095db50c4c430da61a0b3d683405fce1726651b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 17 Jul 2023 07:06:26 -0400 Subject: [PATCH 184/722] feat: add `yParity` to rpc signatures (#3800) --- .../rpc/rpc-types/src/eth/transaction/mod.rs | 44 ++++- .../src/eth/transaction/signature.rs | 184 +++++++++++++++++- 2 files changed, 225 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index c61d6080b9b9..24005376c82f 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -182,6 +182,8 @@ impl Transaction { #[cfg(test)] mod tests { + use crate::eth::transaction::signature::Parity; + use super::*; #[test] @@ -198,7 +200,12 @@ mod tests { gas_price: Some(U128::from(9)), gas: U256::from(10), input: Bytes::from(vec![11, 12, 13]), - signature: Some(Signature { v: U256::from(14), r: U256::from(14), s: U256::from(14) }), + signature: Some(Signature { + v: U256::from(14), + r: U256::from(14), + s: U256::from(14), + y_parity: None, + }), chain_id: Some(U64::from(17)), access_list: None, transaction_type: Some(U64::from(20)), @@ -213,4 +220,39 @@ mod tests { let deserialized: Transaction = serde_json::from_str(&serialized).unwrap(); assert_eq!(transaction, deserialized); } + + #[test] + fn serde_transaction_with_parity_bit() { + let transaction = Transaction { + hash: H256::from_low_u64_be(1), + nonce: U256::from(2), + block_hash: Some(H256::from_low_u64_be(3)), + block_number: Some(U256::from(4)), + transaction_index: Some(U256::from(5)), + from: Address::from_low_u64_be(6), + to: Some(Address::from_low_u64_be(7)), + value: U256::from(8), + gas_price: Some(U128::from(9)), + gas: U256::from(10), + input: Bytes::from(vec![11, 12, 13]), + signature: Some(Signature { + v: U256::from(14), + r: U256::from(14), + s: U256::from(14), + y_parity: Some(Parity(true)), + }), + chain_id: Some(U64::from(17)), + access_list: None, + transaction_type: Some(U64::from(20)), + max_fee_per_gas: Some(U128::from(21)), + max_priority_fee_per_gas: Some(U128::from(22)), + }; + let serialized = serde_json::to_string(&transaction).unwrap(); + assert_eq!( + serialized, + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","nonce":"0x2","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x4","transactionIndex":"0x5","from":"0x0000000000000000000000000000000000000006","to":"0x0000000000000000000000000000000000000007","value":"0x8","gasPrice":"0x9","gas":"0xa","maxFeePerGas":"0x15","maxPriorityFeePerGas":"0x16","input":"0x0b0c0d","r":"0xe","s":"0xe","v":"0xe","yParity":"0x1","chainId":"0x11","type":"0x14"}"# + ); + let deserialized: Transaction = serde_json::from_str(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + } } diff --git a/crates/rpc/rpc-types/src/eth/transaction/signature.rs b/crates/rpc/rpc-types/src/eth/transaction/signature.rs index 6538636b40b5..3c31a126002e 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/signature.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/signature.rs @@ -9,6 +9,8 @@ pub struct Signature { pub r: U256, /// The S field of the signature; the point on the curve. pub s: U256, + // TODO: change these fields to an untagged enum for `v` XOR `y_parity` if/when CLs support it. + // See for more information /// For EIP-155, EIP-2930 and Blob transactions this is set to the parity (0 for even, 1 for /// odd) of the y-value of the secp256k1 signature. /// @@ -16,6 +18,9 @@ pub struct Signature { /// /// See also and pub v: U256, + /// The y parity of the signature. This is only used for typed (non-legacy) transactions. + #[serde(default, rename = "yParity", skip_serializing_if = "Option::is_none")] + pub y_parity: Option, } impl Signature { @@ -28,14 +33,24 @@ impl Signature { signature: PrimitiveSignature, chain_id: Option, ) -> Self { - Self { r: signature.r, s: signature.s, v: U256::from(signature.v(chain_id)) } + Self { + r: signature.r, + s: signature.s, + v: U256::from(signature.v(chain_id)), + y_parity: None, + } } /// Creates a new rpc signature from a non-legacy [primitive /// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on /// the signature's `odd_y_parity`. pub(crate) fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Self { - Self { r: signature.r, s: signature.s, v: U256::from(signature.odd_y_parity as u8) } + Self { + r: signature.r, + s: signature.s, + v: U256::from(signature.odd_y_parity as u8), + y_parity: Some(Parity(signature.odd_y_parity)), + } } /// Creates a new rpc signature from a legacy [primitive @@ -58,3 +73,168 @@ impl Signature { } } } + +/// Type that represents the signature parity byte, meant for use in RPC. +/// +/// This will be serialized as "0x0" if false, and "0x1" if true. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Parity( + #[serde(serialize_with = "serialize_parity", deserialize_with = "deserialize_parity")] pub bool, +); + +fn serialize_parity(parity: &bool, serializer: S) -> Result +where + S: serde::Serializer, +{ + serializer.serialize_str(if *parity { "0x1" } else { "0x0" }) +} + +/// This implementation disallows serialization of the y parity bit that are not `"0x0"` or `"0x1"`. +fn deserialize_parity<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + match s.as_str() { + "0x0" => Ok(false), + "0x1" => Ok(true), + _ => Err(serde::de::Error::custom(format!( + "invalid parity value, parity should be either \"0x0\" or \"0x1\": {}", + s + ))), + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn deserialize_without_parity() { + let raw_signature_without_y_parity = r#"{ + "r":"0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0", + "s":"0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05", + "v":"0x1" + }"#; + + let signature: Signature = serde_json::from_str(raw_signature_without_y_parity).unwrap(); + let expected = Signature { + r: U256::from_str("0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0") + .unwrap(), + s: U256::from_str("0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05") + .unwrap(), + v: U256::from_str("1").unwrap(), + y_parity: None, + }; + + assert_eq!(signature, expected); + } + + #[test] + fn deserialize_with_parity() { + let raw_signature_with_y_parity = r#"{ + "r":"0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0", + "s":"0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05", + "v":"0x1", + "yParity": "0x1" + }"#; + + let signature: Signature = serde_json::from_str(raw_signature_with_y_parity).unwrap(); + let expected = Signature { + r: U256::from_str("0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0") + .unwrap(), + s: U256::from_str("0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05") + .unwrap(), + v: U256::from_str("1").unwrap(), + y_parity: Some(Parity(true)), + }; + + assert_eq!(signature, expected); + } + + #[test] + fn serialize_both_parity() { + // this test should be removed if the struct moves to an enum based on tx type + let signature = Signature { + r: U256::from_str("0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0") + .unwrap(), + s: U256::from_str("0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05") + .unwrap(), + v: U256::from_str("1").unwrap(), + y_parity: Some(Parity(true)), + }; + + let serialized = serde_json::to_string(&signature).unwrap(); + assert_eq!( + serialized, + r#"{"r":"0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0","s":"0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05","v":"0x1","yParity":"0x1"}"# + ); + } + + #[test] + fn serialize_v_only() { + // this test should be removed if the struct moves to an enum based on tx type + let signature = Signature { + r: U256::from_str("0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0") + .unwrap(), + s: U256::from_str("0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05") + .unwrap(), + v: U256::from_str("1").unwrap(), + y_parity: None, + }; + + let expected = r#"{"r":"0xc569c92f176a3be1a6352dd5005bfc751dcb32f57623dd2a23693e64bf4447b0","s":"0x1a891b566d369e79b7a66eecab1e008831e22daa15f91a0a0cf4f9f28f47ee05","v":"0x1"}"#; + + let serialized = serde_json::to_string(&signature).unwrap(); + assert_eq!(serialized, expected); + } + + #[test] + fn serialize_parity() { + let parity = Parity(true); + let serialized = serde_json::to_string(&parity).unwrap(); + assert_eq!(serialized, r#""0x1""#); + + let parity = Parity(false); + let serialized = serde_json::to_string(&parity).unwrap(); + assert_eq!(serialized, r#""0x0""#); + } + + #[test] + fn deserialize_parity() { + let raw_parity = r#""0x1""#; + let parity: Parity = serde_json::from_str(raw_parity).unwrap(); + assert_eq!(parity, Parity(true)); + + let raw_parity = r#""0x0""#; + let parity: Parity = serde_json::from_str(raw_parity).unwrap(); + assert_eq!(parity, Parity(false)); + } + + #[test] + fn deserialize_parity_invalid() { + let raw_parity = r#""0x2""#; + let parity: Result = serde_json::from_str(raw_parity); + assert!(parity.is_err()); + + let raw_parity = r#""0x""#; + let parity: Result = serde_json::from_str(raw_parity); + assert!(parity.is_err()); + + // In the spec this is defined as a uint, which requires 0x + // yParity: + // + // + // uint: + // + let raw_parity = r#""1""#; + let parity: Result = serde_json::from_str(raw_parity); + assert!(parity.is_err()); + + let raw_parity = r#""0""#; + let parity: Result = serde_json::from_str(raw_parity); + assert!(parity.is_err()); + } +} From eb32fd3c6d2feec5e845981ecbe91139656e6bff Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 17 Jul 2023 16:25:46 +0200 Subject: [PATCH 185/722] feat: add network txpool example (#3809) --- Cargo.lock | 1 + examples/Cargo.toml | 5 +++ examples/network-txpool.rs | 86 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 examples/network-txpool.rs diff --git a/Cargo.lock b/Cargo.lock index a4a7b44b51ba..40fe1b49bd2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2157,6 +2157,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "examples" version = "0.0.0" dependencies = [ + "async-trait", "eyre", "futures", "reth-beacon-consensus", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 6114a5732777..fd244ad0fe61 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -25,6 +25,7 @@ reth-tasks.workspace = true eyre = "0.6.8" futures.workspace = true +async-trait.workspace = true tokio.workspace = true [[example]] @@ -38,3 +39,7 @@ path = "db-access.rs" [[example]] name = "network" path = "network.rs" + +[[example]] +name = "network-txpool" +path = "network-txpool.rs" diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs new file mode 100644 index 000000000000..d85426d1fa6f --- /dev/null +++ b/examples/network-txpool.rs @@ -0,0 +1,86 @@ +//! Example of how to use the network as a standalone component together with a transaction pool and +//! a custom pool validator. +//! +//! Run with +//! +//! ```not_rust +//! cargo run --example network-txpool +//! ``` + +use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +use reth_provider::test_utils::NoopProvider; +use reth_transaction_pool::{ + GasCostOrdering, PoolTransaction, PooledTransaction, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, +}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + // This block provider implementation is used for testing purposes. + // NOTE: This also means that we don't have access to the blockchain and are not able to serve + // any requests for headers or bodies which can result in dropped connections initiated by + // remote or able to validate transaction against the latest state. + let client = NoopProvider::default(); + + let pool = reth_transaction_pool::Pool::new( + OkValidator::default(), + GasCostOrdering::default(), + Default::default(), + ); + + // The key that's used for encrypting sessions and to identify our node. + let local_key = rng_secret_key(); + + // Configure the network + let config = + NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + + // create the network instance + let (_handle, network, txpool, _) = + NetworkManager::builder(config).await?.transactions(pool.clone()).split_with_handle(); + + // spawn the network task + tokio::task::spawn(network); + // spawn the pool task + tokio::task::spawn(txpool); + + // listen for new transactions + let mut txs = pool.pending_transactions_listener(); + + while let Some(tx) = txs.recv().await { + println!("Received new transaction: {:?}", tx); + } + + Ok(()) +} + +/// A transaction validator that determines all transactions to be valid. +/// +/// An actual validator impl like +/// [EthTransactionValidator](reth_transaction_pool::EthTransactionValidator) would require up to +/// date db access. +/// +/// CAUTION: This validator is not safe to use since it doesn't actually validate the transaction's +/// properties such as chain id, balance, nonce, etc. +#[derive(Default)] +#[non_exhaustive] +struct OkValidator; + +#[async_trait::async_trait] +impl TransactionValidator for OkValidator { + type Transaction = PooledTransaction; + + async fn validate_transaction( + &self, + _origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + // Always return valid + TransactionValidationOutcome::Valid { + balance: transaction.cost(), + state_nonce: transaction.nonce(), + transaction, + propagate: false, + } + } +} From 01e1344cc1175b7c7110498db6e5319329b62472 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 17 Jul 2023 18:31:25 +0200 Subject: [PATCH 186/722] chore: use 50M default gas limit for calls in rpc (#3812) --- bin/reth/src/args/rpc_server_args.rs | 5 +---- crates/rpc/rpc-builder/src/constants.rs | 7 +++++++ crates/rpc/rpc-builder/src/eth.rs | 8 +++++--- crates/rpc/rpc/src/debug.rs | 9 ++++++++- crates/rpc/rpc/src/eth/api/mod.rs | 3 +-- crates/rpc/rpc/src/eth/api/transactions.rs | 13 +++++++++++-- crates/rpc/rpc/src/eth/revm_utils.rs | 10 +++++----- crates/rpc/rpc/src/trace.rs | 2 ++ 8 files changed, 40 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 4a1f8d2fb89a..a1c954b4a8a5 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -7,7 +7,6 @@ use clap::{ }; use futures::TryFutureExt; use reth_network_api::{NetworkInfo, Peers}; -use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::{ BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, @@ -24,6 +23,7 @@ use reth_rpc::{ use reth_rpc_builder::{ auth::{AuthServerConfig, AuthServerHandle}, constants, + constants::RPC_DEFAULT_GAS_CAP, error::RpcError, EthConfig, IpcServerBuilder, RethRpcModule, RpcModuleBuilder, RpcModuleConfig, RpcModuleSelection, RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, @@ -49,9 +49,6 @@ pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; /// Default number of incoming connections. pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; -/// Default max gas limit for `eth_call` and call tracing RPC methods. -pub(crate) const RPC_DEFAULT_GAS_CAP: u64 = ETHEREUM_BLOCK_GAS_LIMIT; - /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Args, PartialEq, Eq, Default)] #[command(next_help_heading = "RPC")] diff --git a/crates/rpc/rpc-builder/src/constants.rs b/crates/rpc/rpc-builder/src/constants.rs index a1b2bc36a82d..6768ca62a659 100644 --- a/crates/rpc/rpc-builder/src/constants.rs +++ b/crates/rpc/rpc-builder/src/constants.rs @@ -7,6 +7,13 @@ pub const DEFAULT_WS_RPC_PORT: u16 = 8546; /// The default port for the auth server. pub const DEFAULT_AUTH_PORT: u16 = 8551; +/// The default gas limit for eth_call and adjacent calls. +/// +/// This is different from the default to regular 30M block gas limit +/// [ETHEREUM_BLOCK_GAS_LIMIT](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for +/// more complex calls. +pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; + /// The default IPC endpoint #[cfg(windows)] pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc"; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 767660249e27..5b2d6780a531 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,4 +1,4 @@ -use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; +use crate::constants::RPC_DEFAULT_GAS_CAP; use reth_rpc::{ eth::{ cache::{EthStateCache, EthStateCacheConfig}, @@ -38,7 +38,9 @@ pub struct EthConfig { pub max_tracing_requests: u32, /// Maximum number of logs that can be returned in a single response in `eth_getLogs` calls. pub max_logs_per_response: usize, - /// Maximum gas limit for `eth_call` and call tracing RPC methods. + /// Gas limit for `eth_call` and call tracing RPC methods. + /// + /// Defaults to [RPC_DEFAULT_GAS_CAP] pub rpc_gas_cap: u64, } @@ -49,7 +51,7 @@ impl Default for EthConfig { gas_oracle: GasPriceOracleConfig::default(), max_tracing_requests: DEFAULT_MAX_TRACING_REQUESTS, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, - rpc_gas_cap: ETHEREUM_BLOCK_GAS_LIMIT, + rpc_gas_cap: RPC_DEFAULT_GAS_CAP, } } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 3f8fd11a026d..d6abad21fe22 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -319,7 +319,14 @@ where let state = self.inner.eth_api.state_at(at)?; let mut db = SubState::new(State::new(state)); let has_state_overrides = overrides.has_state(); - let env = prepare_call_env(cfg, block_env, call, &mut db, overrides)?; + let env = prepare_call_env( + cfg, + block_env, + call, + self.inner.eth_api.call_gas_limit(), + &mut db, + overrides, + )?; // If the caller provided state overrides we need to clone the DB so the js // service has access these modifications diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index f291bf2566fb..50e634f424a2 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -160,8 +160,7 @@ where } /// Returns the configured gas limit cap for `eth_call` and tracing related calls - #[allow(unused)] - pub(crate) fn gas_cap(&self) -> u64 { + pub fn gas_cap(&self) -> u64 { self.inner.gas_cap } diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 6a5e21d5e162..80d83fdb8094 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -43,6 +43,9 @@ pub(crate) type StateCacheDB<'r> = CacheDB>>; /// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace #[async_trait::async_trait] pub trait EthTransactions: Send + Sync { + /// Returns default gas limit to use for `eth_call` and tracing RPC methods. + fn call_gas_limit(&self) -> u64; + /// Returns the state at the given [BlockId] fn state_at(&self, at: BlockId) -> EthResult>; @@ -226,6 +229,10 @@ where Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + Send + Sync + 'static, { + fn call_gas_limit(&self) -> u64 { + self.inner.gas_cap + } + fn state_at(&self, at: BlockId) -> EthResult> { self.state_at_block_id(at) } @@ -480,7 +487,8 @@ where let state = self.state_at(at)?; let mut db = SubState::new(State::new(state)); - let env = prepare_call_env(cfg, block_env, request, &mut db, overrides)?; + let env = + prepare_call_env(cfg, block_env, request, self.call_gas_limit(), &mut db, overrides)?; f(db, env) } @@ -520,7 +528,8 @@ where let state = self.state_at(at)?; let mut db = SubState::new(State::new(state)); - let env = prepare_call_env(cfg, block_env, request, &mut db, overrides)?; + let env = + prepare_call_env(cfg, block_env, request, self.call_gas_limit(), &mut db, overrides)?; inspect_and_return_db(db, env, inspector) } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 259c34589beb..d40689abe18e 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -2,8 +2,7 @@ use crate::eth::error::{EthApiError, EthResult, RpcInvalidTransactionError}; use reth_primitives::{ - constants::ETHEREUM_BLOCK_GAS_LIMIT, AccessList, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, H256, U256, + AccessList, Address, TransactionSigned, TransactionSignedEcRecovered, TxHash, H256, U256, }; use reth_revm::env::{fill_tx_env, fill_tx_env_with_recovered}; use reth_rpc_types::{ @@ -203,6 +202,7 @@ pub(crate) fn prepare_call_env( mut cfg: CfgEnv, block: BlockEnv, request: CallRequest, + gas_limit: u64, db: &mut CacheDB, overrides: EvmOverrides, ) -> EthResult @@ -247,10 +247,10 @@ where // If no gas price is specified, use maximum allowed gas limit. The reason for this is // that both Erigon and Geth use pre-configured gas cap even if it's possible // to derive the gas limit from the block: - // https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/cmd/rpcdaemon/commands/trace_adhoc.go#L956 - // https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/eth/ethconfig/config.go#L94 + // trace!(target: "rpc::eth::call", ?env, "Applying gas limit cap as the maximum gas limit"); - env.tx.gas_limit = ETHEREUM_BLOCK_GAS_LIMIT; + env.tx.gas_limit = gas_limit; } } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 9a19b9282b59..2fcf2d237b7d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -191,6 +191,7 @@ where let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; self.on_blocking_task(|this| async move { + let gas_limit = this.inner.eth_api.call_gas_limit(); // execute all transactions on top of each other and record the traces this.inner.eth_api.with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(calls.len()); @@ -203,6 +204,7 @@ where cfg.clone(), block_env.clone(), call, + gas_limit, &mut db, Default::default(), )?; From 8b66213e1555560ae8d89098182d9a979e032ea8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 17 Jul 2023 23:18:20 +0200 Subject: [PATCH 187/722] chore(deps): bump some deps (#3820) --- Cargo.lock | 598 +++++++++++++++-------------- Cargo.toml | 9 +- bin/reth/Cargo.toml | 2 +- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/chain/mod.rs | 8 +- crates/rpc/rpc-builder/Cargo.toml | 2 +- 6 files changed, 335 insertions(+), 286 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40fe1b49bd2b..d013576e75c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,10 +163,10 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759d98a5db12e9c9d98ef2b92f794ae5c7ded6ec18d21c3fa485c9c65bec237d" dependencies = [ - "itertools", + "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -222,9 +222,9 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -275,8 +275,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -388,8 +388,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "rustc-hash", "shlex", @@ -409,12 +409,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "rustc-hash", "shlex", - "syn 2.0.18", + "syn 2.0.26", ] [[package]] @@ -440,9 +440,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "bitvec" @@ -496,23 +496,23 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "boa_interner", "boa_macros", - "indexmap 1.9.3", + "indexmap 2.0.0", "num-bigint", "rustc-hash", ] [[package]] name = "boa_engine" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "boa_ast", "boa_gc", "boa_icu_provider", @@ -524,8 +524,8 @@ dependencies = [ "dashmap", "fast-float", "icu_normalizer", - "indexmap 1.9.3", - "itertools", + "indexmap 2.0.0", + "itertools 0.11.0", "num-bigint", "num-integer", "num-traits", @@ -547,8 +547,8 @@ dependencies = [ [[package]] name = "boa_gc" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ "boa_macros", "boa_profiler", @@ -557,8 +557,8 @@ dependencies = [ [[package]] name = "boa_icu_provider" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ "icu_collections", "icu_normalizer", @@ -570,13 +570,13 @@ dependencies = [ [[package]] name = "boa_interner" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.0", - "indexmap 1.9.3", + "indexmap 2.0.0", "once_cell", "phf", "rustc-hash", @@ -585,21 +585,21 @@ dependencies = [ [[package]] name = "boa_macros" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "synstructure 0.13.0", ] [[package]] name = "boa_parser" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "boa_ast", "boa_icu_provider", "boa_interner", @@ -616,8 +616,8 @@ dependencies = [ [[package]] name = "boa_profiler" -version = "0.16.0" -source = "git+https://github.com/boa-dev/boa#0e1b32a232109fc0e192c1297a7274091af2ac61" +version = "0.17.0" +source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" [[package]] name = "brotli" @@ -864,8 +864,8 @@ checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -890,10 +890,10 @@ version = "0.1.0-alpha.4" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "serde", - "syn 2.0.18", + "syn 2.0.26", ] [[package]] @@ -964,13 +964,13 @@ dependencies = [ [[package]] name = "comfy-table" -version = "6.1.4" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d" +checksum = "9ab77dbd8adecaf3f0db40581631b995f312a8a5ae3aa9993188bb8f23d83a5b" dependencies = [ - "crossterm", - "strum", - "strum_macros", + "crossterm 0.26.1", + "strum 0.24.1", + "strum_macros 0.24.3", "unicode-width", ] @@ -1096,7 +1096,7 @@ dependencies = [ "criterion-plot", "futures", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -1117,7 +1117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -1185,11 +1185,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "crossterm" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84cda67535339806297f1b331d6dd6320470d2a0fe65381e79ee9e156dd3d13" +dependencies = [ + "bitflags 1.3.2", + "crossterm_winapi", + "libc", + "mio", + "parking_lot 0.12.1", + "signal-hook", + "signal-hook-mio", + "winapi", +] + [[package]] name = "crossterm_winapi" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] @@ -1228,7 +1244,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.28", + "quote 1.0.31", "syn 1.0.109", ] @@ -1286,8 +1302,8 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "scratch", "syn 1.0.109", ] @@ -1304,8 +1320,8 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1347,8 +1363,8 @@ checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.9.3", "syn 1.0.109", ] @@ -1361,8 +1377,8 @@ checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", "syn 1.0.109", ] @@ -1375,10 +1391,10 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", - "syn 2.0.18", + "syn 2.0.26", ] [[package]] @@ -1388,7 +1404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", - "quote 1.0.28", + "quote 1.0.31", "syn 1.0.109", ] @@ -1399,7 +1415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" dependencies = [ "darling_core 0.14.3", - "quote 1.0.28", + "quote 1.0.31", "syn 1.0.109", ] @@ -1410,21 +1426,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core 0.20.1", - "quote 1.0.28", - "syn 2.0.18", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "dashmap" -version = "5.4.0" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", - "hashbrown 0.12.3", + "hashbrown 0.14.0", "lock_api", "once_cell", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -1468,8 +1484,8 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1481,8 +1497,8 @@ checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" dependencies = [ "darling 0.10.2", "derive_builder_core", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1493,8 +1509,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1505,8 +1521,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustc_version", "syn 1.0.109", ] @@ -1597,7 +1613,7 @@ dependencies = [ [[package]] name = "discv5" version = "0.3.0" -source = "git+https://github.com/sigp/discv5#47844ca54e8d22f4fd3db4594645e65afb288bb6" +source = "git+https://github.com/sigp/discv5#f78d538ef8f3c3b3981cfbb8ce2ba3179295eeab" dependencies = [ "aes 0.7.5", "aes-gcm", @@ -1630,9 +1646,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1709,8 +1725,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0188e3c3ba8df5753894d54461f0e39bc91741dc5b22e1c46999ec2c71f4e4" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1806,8 +1822,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1818,8 +1834,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1831,8 +1847,8 @@ checksum = "a62bb1df8b45ecb7ffa78dca1c17a438fb193eb083db0b1b494d2a61bcb5096a" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustc_version", "syn 1.0.109", ] @@ -1843,9 +1859,9 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48016319042fb7c87b78d2993084a831793a897a5cd1a2a67cab9d1eeb4b7d76" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1958,14 +1974,15 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e066a0d9cfc70c454672bf16bb433b0243427420076dc5b2f49c448fb5a10628" +checksum = "0d4719a44c3d37ab07c6dea99ab174068d8c35e441b60b6c20ce4e48357273e8" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", "ethers-core", "ethers-providers", + "ethers-signers", "futures-util", "hex", "once_cell", @@ -1977,9 +1994,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c113e3e86b6bc16d98484b2c3bb2d01d6fed9f489fe2e592e5cc87c3024d616b" +checksum = "155ea1b84d169d231317ed86e307af6f2bed6b40dd17e5e94bc84da21cadb21c" dependencies = [ "Inflector", "dunce", @@ -1987,37 +2004,37 @@ dependencies = [ "eyre", "hex", "prettyplease", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "serde", "serde_json", - "syn 2.0.18", + "syn 2.0.26", "toml 0.7.5", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3fb5adee25701c79ec58fcf2c63594cd8829bc9ad6037ff862d5a111101ed2" +checksum = "8567ff196c4a37c1a8c90ec73bda0ad2062e191e4f0a6dc4d943e2ec4830fc88" dependencies = [ "Inflector", "ethers-contract-abigen", "ethers-core", "hex", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "serde_json", - "syn 2.0.18", + "syn 2.0.26", ] [[package]] name = "ethers-core" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6da5fa198af0d3be20c19192df2bd9590b92ce09a8421e793bec8851270f1b05" +checksum = "60ca2514feb98918a0a31de7e1983c29f2267ebf61b2dc5d4294f91e5b866623" dependencies = [ "arrayvec", "bytes", @@ -2035,8 +2052,8 @@ dependencies = [ "rlp", "serde", "serde_json", - "strum", - "syn 2.0.18", + "strum 0.25.0", + "syn 2.0.26", "tempfile", "thiserror", "tiny-keccak", @@ -2045,9 +2062,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ebb401ba97c6f5af278c2c9936c4546cad75dec464b439ae6df249906f4caa" +checksum = "22b3a8269d3df0ed6364bc05b4735b95f4bf830ce3aef87d5e760fb0e93e5b91" dependencies = [ "ethers-core", "reqwest", @@ -2060,9 +2077,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740f4a773c19dd6d6a68c8c2e0996c096488d38997d524e21dc612c55da3bd24" +checksum = "e0c339aad74ae5c451d27e0e49c7a3c7d22620b119b4f9291d7aa21f72d7f366" dependencies = [ "async-trait", "auto_impl", @@ -2087,9 +2104,9 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56b498fd2a6c019d023e43e83488cd1fb0721f299055975aa6bac8dbf1e95f2c" +checksum = "b411b119f1cf0efb69e2190883dee731251882bb21270f893ee9513b3a697c48" dependencies = [ "async-trait", "auto_impl", @@ -2124,9 +2141,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c4b7e15f212fa7cc2e1251868320221d4ff77a3d48068e69f47ce1c491df2d" +checksum = "4864d387456a9c09a1157fa10e1528b29d90f1d859443acf06a1b23365fb518c" dependencies = [ "async-trait", "coins-bip32", @@ -2359,9 +2376,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -2957,8 +2974,8 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3045,8 +3062,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3174,6 +3191,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -3323,8 +3349,8 @@ checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" dependencies = [ "heck", "proc-macro-crate", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3439,9 +3465,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.141" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libloading" @@ -3529,9 +3555,9 @@ checksum = "3a04a5b2b6f54acba899926491d0a6c59d98012938ca2ab5befb281c034e8f94" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -3539,12 +3565,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lru" @@ -3672,8 +3695,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3746,14 +3769,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3778,8 +3801,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" dependencies = [ "cfg-if", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3799,8 +3822,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3971,9 +3994,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4027,8 +4050,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4110,8 +4133,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4147,7 +4170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -4166,15 +4189,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.1", ] [[package]] @@ -4235,9 +4258,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ "phf_macros", "phf_shared", @@ -4255,22 +4278,22 @@ dependencies = [ [[package]] name = "phf_macros" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92aacdc5f16768709a569e913f7451034034178b05bdc8acda226659a3dccc66" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" dependencies = [ "phf_generator", "phf_shared", - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 1.0.109", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "phf_shared" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", ] @@ -4290,9 +4313,9 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4437,7 +4460,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -4477,8 +4500,8 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2 1.0.63", - "syn 2.0.18", + "proc-macro2 1.0.66", + "syn 2.0.26", ] [[package]] @@ -4512,8 +4535,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "version_check", ] @@ -4524,8 +4547,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "version_check", ] @@ -4540,9 +4563,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -4661,11 +4684,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.28" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.66", ] [[package]] @@ -4925,7 +4948,7 @@ dependencies = [ "comfy-table", "confy", "const-str", - "crossterm", + "crossterm 0.25.0", "dirs-next", "eyre", "fdlimit", @@ -5201,7 +5224,7 @@ dependencies = [ "assert_matches", "futures", "futures-util", - "itertools", + "itertools 0.10.5", "pin-project", "rayon", "reth-db", @@ -5334,7 +5357,7 @@ dependencies = [ name = "reth-libmdbx" version = "0.1.0-alpha.4" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "byteorder", "criterion", "derive_more", @@ -5375,11 +5398,11 @@ version = "0.1.0-alpha.4" dependencies = [ "metrics", "once_cell", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "serial_test 0.10.0", - "syn 2.0.18", + "syn 2.0.26", "trybuild", ] @@ -5522,7 +5545,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "strum", + "strum 0.25.0", "sucds", "test-fuzz", "thiserror", @@ -5542,7 +5565,7 @@ version = "0.1.0-alpha.4" dependencies = [ "auto_impl", "derive_more", - "itertools", + "itertools 0.10.5", "parking_lot 0.12.1", "pin-project", "reth-db", @@ -5628,9 +5651,9 @@ dependencies = [ name = "reth-rlp-derive" version = "0.1.0-alpha.4" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -5725,7 +5748,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "strum", + "strum 0.25.0", "thiserror", "tokio", "tower", @@ -5779,7 +5802,7 @@ dependencies = [ "async-trait", "criterion", "futures-util", - "itertools", + "itertools 0.10.5", "num-traits", "paste", "pin-project", @@ -6005,8 +6028,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -6131,9 +6154,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -6196,8 +6219,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -6335,9 +6358,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.164" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] @@ -6353,20 +6376,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" dependencies = [ "itoa", "ryu", @@ -6417,8 +6440,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" dependencies = [ "darling 0.14.3", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -6456,8 +6479,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -6467,9 +6490,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -6567,9 +6590,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" +checksum = "b824b6e687aff278cdbf3b36f07aa52d4bd4099699324d5da86a2ebce3aa00b3" dependencies = [ "libc", "signal-hook-registry", @@ -6660,9 +6683,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "smol_str" @@ -6771,8 +6794,14 @@ name = "strum" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ - "strum_macros", + "strum_macros 0.25.1", ] [[package]] @@ -6782,12 +6811,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustversion", "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232" +dependencies = [ + "heck", + "proc-macro2 1.0.66", + "quote 1.0.31", + "rustversion", + "syn 2.0.26", +] + [[package]] name = "subprocess" version = "0.2.9" @@ -6866,19 +6908,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.18" +version = "2.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] @@ -6888,8 +6930,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -6900,9 +6942,9 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "unicode-xid 0.2.4", ] @@ -6959,10 +7001,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "385624eb0031d550fe1bf99c08af79b838605fc4fcec2c4d55e229a2c342fdd0" dependencies = [ "cargo_metadata", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "serde", - "strum_macros", + "strum_macros 0.24.3", ] [[package]] @@ -6973,12 +7015,12 @@ checksum = "69247423e2d89bd51160e42200f6f45f921a23e5b44a0e5b57b888a378334037" dependencies = [ "darling 0.20.1", "if_chain", - "itertools", + "itertools 0.10.5", "lazy_static", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "subprocess", - "syn 2.0.18", + "syn 2.0.26", "test-fuzz-internal", "toolchain_find", ] @@ -7005,22 +7047,22 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -7129,9 +7171,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -7333,8 +7375,8 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7529,7 +7571,7 @@ checksum = "ccdd26cbd674007e649a272da4475fb666d3aa0ad0531da7136db6fab0e5bad1" dependencies = [ "bitflags 1.3.2", "cassowary", - "crossterm", + "crossterm 0.25.0", "unicode-segmentation", "unicode-width", ] @@ -7594,9 +7636,9 @@ checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -7784,8 +7826,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "wasm-bindgen-shared", ] @@ -7808,7 +7850,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ - "quote 1.0.28", + "quote 1.0.31", "wasm-bindgen-macro-support", ] @@ -7818,8 +7860,8 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -7899,7 +7941,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -7932,7 +7974,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -7952,9 +7994,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", @@ -8146,8 +8188,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8167,8 +8209,8 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8188,9 +8230,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", - "syn 2.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -8210,8 +8252,8 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.28", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure 0.12.6", ] diff --git a/Cargo.toml b/Cargo.toml index 523be9caa9cf..92ed754a35aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,10 +105,10 @@ reth-network = { path = "./crates/net/network" } reth-network-api = { path = "./crates/net/network-api" } ## eth -ethers-core = { version = "2.0.7", default-features = false } -ethers-providers = { version = "2.0.7", default-features = false } -ethers-signers = { version = "2.0.7", default-features = false } -ethers-middleware = { version = "2.0.7", default-features = false } +ethers-core = { version = "2.0.8", default-features = false } +ethers-providers = { version = "2.0.8", default-features = false } +ethers-signers = { version = "2.0.8", default-features = false } +ethers-middleware = { version = "2.0.8", default-features = false } ## misc bytes = "1.4" @@ -117,6 +117,7 @@ thiserror = "1.0.37" serde_json = "1.0.94" serde = { version = "1.0", default-features = false } rand = "0.8.5" +strum = "0.25" ### proc-macros proc-macro2 = "1.0" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ac7aca802737..2c0965dfaa32 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -62,7 +62,7 @@ metrics-process = "1.0.9" proptest = "1.0" # tui -comfy-table = "6.1.4" +comfy-table = "7.0" crossterm = "0.25.0" tui = "0.19.0" human_bytes = "0.4.1" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 34039aa85448..aca12eb5468a 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -68,7 +68,7 @@ hash-db = "0.15" arbitrary = { version = "1.1.7", features = ["derive"], optional = true } proptest = { version = "1.0", optional = true } proptest-derive = { version = "0.3", optional = true } -strum = { version = "0.24", features = ["derive"] } +strum = { workspace = true, features = ["derive"] } [dev-dependencies] serde_json = { workspace = true } diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 43f4647a8c99..425939ce44b6 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -248,8 +248,14 @@ mod tests { } #[test] - fn test_legacy_named_chain() { + fn test_optimism_chain() { let chain = Chain::Named(ethers_core::types::Chain::Optimism); + assert!(!chain.is_legacy()); + } + + #[test] + fn test_legacy_named_chain() { + let chain = Chain::Named(ethers_core::types::Chain::BinanceSmartChain); assert!(chain.is_legacy()); } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index b4159bfb682b..8d51e178022e 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -29,7 +29,7 @@ tower = { version = "0.4", features = ["full"] } hyper = "0.14" # misc -strum = { version = "0.24", features = ["derive"] } +strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } tracing = { workspace = true } From 9c69f04380f3b3bf3e18f97c1d93f103ae447e63 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 17 Jul 2023 20:40:43 -0400 Subject: [PATCH 188/722] chore: fix SessionManagerMetrics typo (#3823) --- crates/net/network/src/metrics.rs | 2 +- crates/net/network/src/session/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 085b1f093b2d..43c3b03f481d 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -48,7 +48,7 @@ pub struct NetworkMetrics { /// Metrics for SessionManager #[derive(Metrics)] #[metrics(scope = "network")] -pub struct SesssionManagerMetrics { +pub struct SessionManagerMetrics { /// Number of dials that resulted in a peer being added to the peerset pub(crate) total_dial_successes: Counter, } diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index d31d2c635bc1..df19d144e433 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,7 +1,7 @@ //! Support for handling peer sessions. use crate::{ message::PeerMessage, - metrics::SesssionManagerMetrics, + metrics::SessionManagerMetrics, session::{active::ActiveSession, config::SessionCounter}, }; pub use crate::{message::PeerRequestSender, session::handle::PeerInfo}; @@ -100,7 +100,7 @@ pub struct SessionManager { /// Used to measure inbound & outbound bandwidth across all managed streams bandwidth_meter: BandwidthMeter, /// Metrics for the session manager. - metrics: SesssionManagerMetrics, + metrics: SessionManagerMetrics, } // === impl SessionManager === From ec672238ca6addf62fac89de027bd2320b49182e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 18 Jul 2023 12:18:35 +0300 Subject: [PATCH 189/722] chore(ci): enable docker push on workflow dispatch (#3825) --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 80f02f0cf8c9..06dbff26f2d0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,6 +1,7 @@ name: docker on: + workflow_dispatch: {} push: tags: - v* From 26b1ffa92aa2aca6712284b03c3323458cfbe9df Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 18 Jul 2023 14:06:27 +0200 Subject: [PATCH 190/722] chore: make clippy happy (#3827) --- bin/reth/src/init.rs | 1 + bin/reth/src/stage/dump/execution.rs | 2 +- bin/reth/src/stage/dump/mod.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 2 +- crates/primitives/src/hex_bytes.rs | 2 +- crates/revm/revm-inspectors/src/tracing/mod.rs | 6 +++--- crates/rlp/benches/bench.rs | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 2 +- crates/rpc/rpc-types/src/eth/fee.rs | 5 +---- crates/trie/src/hashed_cursor/post_state.rs | 3 +-- 10 files changed, 12 insertions(+), 15 deletions(-) diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 10b4fa14d883..a70949dbbe9d 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -190,6 +190,7 @@ mod tests { }; use std::collections::HashMap; + #[allow(clippy::type_complexity)] fn collect_table_entries( tx: &>::TX, ) -> Result, InitDatabaseError> diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index 9200065d5517..b2d7bbed32fc 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -35,7 +35,7 @@ pub(crate) async fn dump_execution_stage( /// Imports all the tables that can be copied over a range. fn import_tables_with_range( output_db: &DatabaseEnv, - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, to: u64, ) -> eyre::Result<()> { diff --git a/bin/reth/src/stage/dump/mod.rs b/bin/reth/src/stage/dump/mod.rs index 14c5fd259002..5e7206cf1a59 100644 --- a/bin/reth/src/stage/dump/mod.rs +++ b/bin/reth/src/stage/dump/mod.rs @@ -131,7 +131,7 @@ pub(crate) fn setup( from: u64, to: u64, output_db: &PathBuf, - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, ) -> eyre::Result<(DatabaseEnv, u64)> { assert!(from < to, "FROM block should be bigger than TO block."); diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 02da63d5aec1..ba988f1be87f 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -325,7 +325,7 @@ struct OrderedSealedBlock(SealedBlock); impl PartialOrd for OrderedSealedBlock { fn partial_cmp(&self, other: &Self) -> Option { - self.0.number.partial_cmp(&other.0.number) + Some(self.cmp(other)) } } diff --git a/crates/primitives/src/hex_bytes.rs b/crates/primitives/src/hex_bytes.rs index 322b9ff04e61..0e2db39e5545 100644 --- a/crates/primitives/src/hex_bytes.rs +++ b/crates/primitives/src/hex_bytes.rs @@ -328,7 +328,7 @@ mod tests { assert_eq!(b, vec[..]); assert_eq!(vec[..], b); - let wrong_vec = vec![1, 3, 52, 137]; + let wrong_vec = [1, 3, 52, 137]; assert_ne!(b, wrong_vec[..]); assert_ne!(wrong_vec[..], b); } diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 53a26f765a3c..fe6c5b8609d4 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -236,7 +236,7 @@ impl TracingInspector { /// /// This expects an existing [CallTrace], in other words, this panics if not within the context /// of a call. - fn start_step(&mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>) { + fn start_step(&mut self, interp: &Interpreter, data: &EVMData<'_, DB>) { let trace_idx = self.last_trace_idx(); let trace = &mut self.traces.arena[trace_idx]; @@ -283,8 +283,8 @@ impl TracingInspector { /// Invoked on [Inspector::step_end]. fn fill_step_on_step_end( &mut self, - interp: &mut Interpreter, - data: &mut EVMData<'_, DB>, + interp: &Interpreter, + data: &EVMData<'_, DB>, status: InstructionResult, ) { let StackStep { trace_idx, step_idx } = diff --git a/crates/rlp/benches/bench.rs b/crates/rlp/benches/bench.rs index abb70c7eb094..e69695183147 100644 --- a/crates/rlp/benches/bench.rs +++ b/crates/rlp/benches/bench.rs @@ -48,7 +48,7 @@ fn bench_decode(c: &mut Criterion) { }); c.bench_function("decode_u256", |b| { b.iter(|| { - let data = vec![ + let data = [ 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 0b111b33cc8d..7869f61574ed 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -30,7 +30,7 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { MAINNET.clone(), beacon_engine_handle, spawn_test_payload_service().into(), - Box::new(TokioTaskExecutor::default()), + Box::::default(), ); let module = AuthRpcModule::new(engine_api); module.start_server(config).await.unwrap() diff --git a/crates/rpc/rpc-types/src/eth/fee.rs b/crates/rpc/rpc-types/src/eth/fee.rs index 6463e7b433c1..c7f93f6eaf6b 100644 --- a/crates/rpc/rpc-types/src/eth/fee.rs +++ b/crates/rpc/rpc-types/src/eth/fee.rs @@ -12,10 +12,7 @@ pub struct TxGasAndReward { impl PartialOrd for TxGasAndReward { fn partial_cmp(&self, other: &Self) -> Option { - // compare only the reward - // see: - // - self.reward.partial_cmp(&other.reward) + Some(self.cmp(other)) } } diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 662a71163aec..c5418bc69245 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -885,8 +885,7 @@ mod tests { let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &hashed_post_state); let expected = - [(address, db_storage.into_iter().chain(post_state_storage.into_iter()).collect())] - .into_iter(); + [(address, db_storage.into_iter().chain(post_state_storage).collect())].into_iter(); assert_storage_cursor_order(&factory, expected); } From 20a06e840a0ae973ad377ae81413b2a777397982 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 18 Jul 2023 09:55:08 -0400 Subject: [PATCH 191/722] chore: add traces for _new_ invalid blocks (#3821) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/mod.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e42bc3c71709..f9fb030bf69b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -870,7 +870,7 @@ where // check if the new head was previously invalidated, if so then we deem this FCU // as invalid if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash) { - debug!(target: "consensus::engine", head=?state.head_block_hash, "Head was previously marked as invalid"); + debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=?error, "Head was previously marked as invalid"); return invalid_ancestor } @@ -1170,6 +1170,8 @@ where let (block, error) = err.split(); if error.is_invalid_block() { + warn!(target: "consensus::engine", invalid_hash=?block.hash, invalid_number=?block.number, ?error, "Invalid block error on new payload"); + // all of these occurred if the payload is invalid let parent_hash = block.parent_hash; @@ -1271,7 +1273,10 @@ where Err(err) => { warn!(target: "consensus::engine", ?err, "Failed to insert downloaded block"); if err.kind().is_invalid_block() { - self.invalid_headers.insert(err.into_block().header); + let (block, err) = err.split(); + warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash, ?err, "Marking block as invalid"); + + self.invalid_headers.insert(block.header); } } } @@ -1429,7 +1434,7 @@ where } if let ControlFlow::Unwind { bad_block, .. } = ctrl { - trace!(target: "consensus::engine", hash=?bad_block.hash, "Bad block detected in unwind"); + warn!(target: "consensus::engine", invalid_hash=?bad_block.hash, invalid_number=?bad_block.number, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid headers self.invalid_headers.insert(bad_block); From 7686371448e49ca272476dc245fffa3e47ceedba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 18 Jul 2023 16:48:24 +0200 Subject: [PATCH 192/722] feat: build local pending block (#3811) --- crates/rpc/rpc/src/eth/api/block.rs | 3 +- crates/rpc/rpc/src/eth/api/mod.rs | 49 ++++-- crates/rpc/rpc/src/eth/api/pending_block.rs | 158 ++++++++++++++++++-- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- 4 files changed, 185 insertions(+), 27 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 5220f907a5ab..1cb547e31535 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -7,6 +7,7 @@ use crate::{ }, EthApi, }; +use reth_network_api::NetworkInfo; use reth_primitives::{BlockId, BlockNumberOrTag, TransactionMeta}; use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::{Block, Index, RichBlock, TransactionReceipt}; @@ -16,7 +17,7 @@ impl EthApi where Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, Pool: TransactionPool + Clone + 'static, - Network: Send + Sync + 'static, + Network: NetworkInfo + Send + Sync + 'static, { /// Returns the uncle headers of the given block /// diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 50e634f424a2..e68dd3686ef7 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -4,6 +4,7 @@ //! files. use crate::eth::{ + api::pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}, cache::EthStateCache, error::{EthApiError, EthResult}, gas_oracle::GasPriceOracle, @@ -20,7 +21,11 @@ use reth_rpc_types::{SyncInfo, SyncStatus}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnv}; -use std::{future::Future, sync::Arc, time::Instant}; +use std::{ + future::Future, + sync::Arc, + time::{Duration, Instant}, +}; use tokio::sync::{oneshot, Mutex}; mod block; @@ -32,7 +37,6 @@ mod sign; mod state; mod transactions; -use crate::eth::api::pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; pub use transactions::{EthTransactions, TransactionSource}; /// `Eth` API trait. @@ -220,7 +224,7 @@ impl EthApi where Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, Pool: TransactionPool + Clone + 'static, - Network: Send + Sync + 'static, + Network: NetworkInfo + Send + Sync + 'static, { /// Configures the [CfgEnv] and [BlockEnv] for the pending block /// @@ -261,24 +265,41 @@ where // no pending block from the CL yet, so we need to build it ourselves via txpool self.on_blocking_task(|this| async move { - let PendingBlockEnv { cfg: _, block_env, origin } = pending; - let lock = this.inner.pending_block.lock().await; + let mut lock = this.inner.pending_block.lock().await; let now = Instant::now(); - // this is guaranteed to be the `latest` header - let parent_header = origin.into_header(); // check if the block is still good - if let Some(pending) = lock.as_ref() { - if block_env.number.to::() == pending.block.number && - pending.block.parent_hash == parent_header.parent_hash && - now <= pending.expires_at + if let Some(pending_block) = lock.as_ref() { + // this is guaranteed to be the `latest` header + if pending.block_env.number.to::() == pending_block.block.number && + pending.origin.header().hash == pending_block.block.parent_hash && + now <= pending_block.expires_at { - return Ok(Some(pending.block.clone())) + return Ok(Some(pending_block.block.clone())) } } - // TODO(mattsse): actually build the pending block - Ok(None) + // if we're currently syncing, we're unable to build a pending block + if this.network().is_syncing() { + return Ok(None) + } + + // we rebuild the block + let pending_block = match pending.build_block(this.provider(), this.pool()) { + Ok(block) => block, + Err(err) => { + tracing::debug!(target = "rpc", "Failed to build pending block: {:?}", err); + return Ok(None) + } + }; + + let now = Instant::now(); + *lock = Some(PendingBlock { + block: pending_block.clone(), + expires_at: now + Duration::from_secs(3), + }); + + Ok(Some(pending_block)) }) .await } diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index 8e57f893dca0..a427df3279a1 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -1,7 +1,18 @@ //! Support for building a pending block via local txpool. -use reth_primitives::{SealedBlock, SealedHeader}; -use revm_primitives::{BlockEnv, CfgEnv}; +use crate::eth::error::EthResult; +use reth_primitives::{ + constants::{BEACON_NONCE, EMPTY_WITHDRAWALS}, + proofs, Block, Header, IntoRecoveredTransaction, Receipt, SealedBlock, SealedHeader, + EMPTY_OMMER_ROOT, H256, U256, +}; +use reth_provider::{PostState, StateProviderFactory}; +use reth_revm::{ + database::State, env::tx_env_with_recovered, executor::commit_state_changes, into_reth_log, +}; +use reth_transaction_pool::TransactionPool; +use revm::db::CacheDB; +use revm_primitives::{BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState}; use std::time::Instant; /// Configured [BlockEnv] and [CfgEnv] for a pending block @@ -15,6 +26,133 @@ pub(crate) struct PendingBlockEnv { pub(crate) origin: PendingBlockEnvOrigin, } +impl PendingBlockEnv { + /// Builds a pending block from the given client and pool. + pub(crate) fn build_block( + self, + client: &Client, + pool: &Pool, + ) -> EthResult + where + Client: StateProviderFactory, + Pool: TransactionPool, + { + let Self { cfg, block_env, origin } = self; + + let parent_hash = origin.build_target_hash(); + let state = State::new(client.history_by_block_hash(parent_hash)?); + let mut db = CacheDB::new(state); + let mut post_state = PostState::default(); + + let mut cumulative_gas_used = 0; + let block_gas_limit: u64 = block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let base_fee = block_env.basefee.to::(); + let block_number = block_env.number.to::(); + + let mut executed_txs = Vec::new(); + let mut best_txs = pool.best_transactions_with_base_fee(base_fee as u128); + + while let Some(pool_tx) = best_txs.next() { + // ensure we still have capacity for this transaction + if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + // we can't fit this transaction into the block, so we need to mark it as invalid + // which also removes all dependent transaction from the iterator before we can + // continue + best_txs.mark_invalid(&pool_tx); + continue + } + + // convert tx to a signed transaction + let tx = pool_tx.to_recovered_transaction(); + + // Configure the environment for the block. + let env = + Env { cfg: cfg.clone(), block: block_env.clone(), tx: tx_env_with_recovered(&tx) }; + + let mut evm = revm::EVM::with_env(env); + evm.database(&mut db); + + let ResultAndState { result, state } = match evm.transact() { + Ok(res) => res, + Err(err) => { + match err { + EVMError::Transaction(err) => { + if matches!(err, InvalidTransaction::NonceTooLow { .. }) { + // if the nonce is too low, we can skip this transaction + } else { + // if the transaction is invalid, we can skip it and all of its + // descendants + best_txs.mark_invalid(&pool_tx); + } + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + return Err(err.into()) + } + } + } + }; + + let gas_used = result.gas_used(); + + // commit changes + commit_state_changes(&mut db, &mut post_state, block_number, state, true); + + // add gas used by the transaction to cumulative gas used, before creating the receipt + cumulative_gas_used += gas_used; + + // Push transaction changeset and calculate header bloom filter for receipt. + post_state.add_receipt( + block_number, + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().into_iter().map(into_reth_log).collect(), + }, + ); + // append transaction to the list of executed transactions + executed_txs.push(tx.into_signed()); + } + + let receipts_root = post_state.receipts_root(block_number); + let logs_bloom = post_state.logs_bloom(block_number); + + // calculate the state root + let state_root = db.db.state().state_root(post_state)?; + + // create the block header + let transactions_root = proofs::calculate_transaction_root(&executed_txs); + + let header = Header { + parent_hash, + ommers_hash: EMPTY_OMMER_ROOT, + beneficiary: block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root: Some(EMPTY_WITHDRAWALS), + logs_bloom, + timestamp: block_env.timestamp.to::(), + mix_hash: block_env.prevrandao.unwrap_or_default(), + nonce: BEACON_NONCE, + base_fee_per_gas: Some(base_fee), + number: block_number, + gas_limit: block_gas_limit, + difficulty: U256::ZERO, + gas_used: cumulative_gas_used, + extra_data: Default::default(), + }; + + // seal the block + let block = Block { header, body: executed_txs, ommers: vec![], withdrawals: Some(vec![]) }; + let sealed_block = block.seal_slow(); + + Ok(sealed_block) + } +} + /// The origin for a configured [PendingBlockEnv] #[derive(Clone, Debug)] pub(crate) enum PendingBlockEnvOrigin { @@ -38,18 +176,18 @@ impl PendingBlockEnvOrigin { } } - /// Returns the header this pending block is based on. - pub(crate) fn header(&self) -> &SealedHeader { + /// Returns the hash of the pending block should be built on + fn build_target_hash(&self) -> H256 { match self { - PendingBlockEnvOrigin::ActualPending(block) => &block.header, - PendingBlockEnvOrigin::DerivedFromLatest(header) => header, + PendingBlockEnvOrigin::ActualPending(block) => block.parent_hash, + PendingBlockEnvOrigin::DerivedFromLatest(header) => header.hash, } } - /// Consumes the type and returns the header this pending block is based on. - pub(crate) fn into_header(self) -> SealedHeader { + /// Returns the header this pending block is based on. + pub(crate) fn header(&self) -> &SealedHeader { match self { - PendingBlockEnvOrigin::ActualPending(block) => block.header, + PendingBlockEnvOrigin::ActualPending(block) => &block.header, PendingBlockEnvOrigin::DerivedFromLatest(header) => header, } } @@ -63,5 +201,3 @@ pub(crate) struct PendingBlock { /// Timestamp when the pending block is considered outdated pub(crate) expires_at: Instant, } - -impl PendingBlock {} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 80d83fdb8094..e8dd2ed7ccb1 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -663,7 +663,7 @@ impl EthApi where Pool: TransactionPool + 'static, Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, - Network: Send + Sync + 'static, + Network: NetworkInfo + Send + Sync + 'static, { pub(crate) fn sign_request( &self, From e66464b14f9bafe78b659ce76bd5148324fe36db Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 18 Jul 2023 10:52:09 -0400 Subject: [PATCH 193/722] chore: rename missing_parent to missing_ancestor (#3822) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/blockchain_tree.rs | 10 +++++----- crates/consensus/beacon/src/engine/mod.rs | 4 +++- crates/interfaces/src/blockchain_tree/mod.rs | 4 ++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 43fe1be6fa52..819d60d4c38c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -201,7 +201,7 @@ impl BlockchainTree // check if block is disconnected if let Some(block) = self.buffered_blocks.block(block) { - return Ok(Some(BlockStatus::Disconnected { missing_parent: block.parent_num_hash() })) + return Ok(Some(BlockStatus::Disconnected { missing_ancestor: block.parent_num_hash() })) } Ok(None) @@ -360,7 +360,7 @@ impl BlockchainTree ) })?; - Ok(BlockStatus::Disconnected { missing_parent: lowest_ancestor.parent_num_hash() }) + Ok(BlockStatus::Disconnected { missing_ancestor: lowest_ancestor.parent_num_hash() }) } /// This tries to append the given block to the canonical chain. @@ -1274,7 +1274,7 @@ mod tests { assert_eq!( tree.insert_block(block2.clone()).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Disconnected { - missing_parent: block2.parent_num_hash() + missing_ancestor: block2.parent_num_hash() }) ); @@ -1293,7 +1293,7 @@ mod tests { assert_eq!( tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Disconnected { missing_parent: block2.parent_num_hash() }) + Some(BlockStatus::Disconnected { missing_ancestor: block2.parent_num_hash() }) ); // check if random block is known @@ -1575,7 +1575,7 @@ mod tests { assert_eq!( tree.insert_block(block2b.clone()).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Disconnected { - missing_parent: block2b.parent_num_hash() + missing_ancestor: block2b.parent_num_hash() }) ); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f9fb030bf69b..e4f46ad9ea1f 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1262,7 +1262,9 @@ where // block is connected to the canonical chain, but not the current head self.try_make_sync_target_canonical(downloaded_num_hash); } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { missing_parent }) => { + InsertPayloadOk::Inserted(BlockStatus::Disconnected { + missing_ancestor: missing_parent, + }) => { // block is not connected to the canonical head, we need to download its // missing branch first self.on_disconnected_block(downloaded_num_hash, missing_parent); diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 8025c7901129..d8334df7decb 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -144,8 +144,8 @@ pub enum BlockStatus { Accepted, /// If blocks is not connected to canonical chain. Disconnected { - /// The lowest parent block that is not connected to the canonical chain. - missing_parent: BlockNumHash, + /// The lowest ancestor block that is not connected to the canonical chain. + missing_ancestor: BlockNumHash, }, } From 9313eda6ccbaef2a5f46d3ce8a9f993de7c1c632 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Tue, 18 Jul 2023 18:02:13 +0300 Subject: [PATCH 194/722] feat: add timestamp to pool update (#3833) --- crates/transaction-pool/src/maintain.rs | 2 ++ crates/transaction-pool/src/pool/mod.rs | 1 + crates/transaction-pool/src/traits.rs | 2 ++ 3 files changed, 5 insertions(+) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index e8b8a8f1ed87..1b4084c0f304 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -266,6 +266,7 @@ pub async fn maintain_transaction_pool( changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_mined_transactions.into_iter().collect(), + timestamp: new_tip.timestamp, }; pool.on_canonical_state_change(update); @@ -331,6 +332,7 @@ pub async fn maintain_transaction_pool( pending_block_base_fee, changed_accounts, mined_transactions, + timestamp: tip.timestamp, }; pool.on_canonical_state_change(update); } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 2c055bf9f758..d3d3c5ca9aa3 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -257,6 +257,7 @@ where pending_block_base_fee, changed_accounts, mined_transactions, + timestamp: _, } = update; let changed_senders = self.changed_senders(changed_accounts.into_iter()); let block_info = BlockInfo { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 8b2110353d64..d44c25f1b521 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -402,6 +402,8 @@ pub struct CanonicalStateUpdate { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, + /// Timestamp of the latest chain update + pub timestamp: u64, } impl fmt::Display for CanonicalStateUpdate { From 0ef68b7d63fee5337281848efe2289fe8f829e3d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 18 Jul 2023 17:34:38 +0200 Subject: [PATCH 195/722] fix: map more errors to messages (#3837) --- .../revm-inspectors/src/tracing/config.rs | 19 +++++++++++++ .../revm/revm-inspectors/src/tracing/types.rs | 28 +++++++++++++++---- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/config.rs b/crates/revm/revm-inspectors/src/tracing/config.rs index f968993062d3..5096d8307561 100644 --- a/crates/revm/revm-inspectors/src/tracing/config.rs +++ b/crates/revm/revm-inspectors/src/tracing/config.rs @@ -1,5 +1,24 @@ use reth_rpc_types::trace::geth::GethDefaultTracingOptions; +/// What kind of tracing style this is. +/// +/// This affects things like error messages. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub(crate) enum TraceStyle { + /// Parity style tracer + Parity, + /// Geth style tracer + #[allow(unused)] + Geth, +} + +impl TraceStyle { + /// Returns true if this is a parity style tracer. + pub(crate) const fn is_parity(self) -> bool { + matches!(self, Self::Parity) + } +} + /// Gives guidance to the [TracingInspector](crate::tracing::TracingInspector). /// /// Use [TracingInspectorConfig::default_parity] or [TracingInspectorConfig::default_geth] to get diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 548ae72b8789..76ff0f510089 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -1,6 +1,6 @@ //! Types for representing call trace items. -use crate::tracing::utils::convert_memory; +use crate::tracing::{config::TraceStyle, utils::convert_memory}; use reth_primitives::{abi::decode_revert_reason, bytes::Bytes, Address, H256, U256}; use reth_rpc_types::trace::{ geth::{CallFrame, CallLogFrame, GethDefaultTracingOptions, StructLog}, @@ -160,9 +160,26 @@ impl CallTrace { } /// Returns the error message if it is an erroneous result. - pub(crate) fn as_error(&self) -> Option { + pub(crate) fn as_error(&self, kind: TraceStyle) -> Option { + // See also self.is_error().then(|| match self.status { - InstructionResult::Revert => "Reverted".to_string(), + InstructionResult::Revert => { + if kind.is_parity() { "Reverted" } else { "execution reverted" }.to_string() + } + InstructionResult::OutOfGas | InstructionResult::MemoryOOG => { + if kind.is_parity() { "Out of gas" } else { "out of gas" }.to_string() + } + InstructionResult::OpcodeNotFound => { + if kind.is_parity() { "Bad instruction" } else { "invalid opcode" }.to_string() + } + InstructionResult::StackOverflow => "Out of stack".to_string(), + InstructionResult::InvalidJump => { + if kind.is_parity() { "Bad jump destination" } else { "invalid jump destination" } + .to_string() + } + InstructionResult::PrecompileError => { + if kind.is_parity() { "Built-in failed" } else { "precompiled failed" }.to_string() + } status => format!("{:?}", status), }) } @@ -324,7 +341,7 @@ impl CallTraceNode { pub(crate) fn parity_transaction_trace(&self, trace_address: Vec) -> TransactionTrace { let action = self.parity_action(); let output = self.parity_trace_output(); - let error = self.trace.as_error(); + let error = self.trace.as_error(TraceStyle::Parity); TransactionTrace { action, error, @@ -402,7 +419,8 @@ impl CallTraceNode { // we need to populate error and revert reason if !self.trace.success { call_frame.revert_reason = decode_revert_reason(self.trace.output.clone()); - call_frame.error = self.trace.as_error(); + // Note: the call tracer mimics parity's trace transaction and geth maps errors to parity style error messages, + call_frame.error = self.trace.as_error(TraceStyle::Parity); } if include_logs && !self.logs.is_empty() { From 0f810222e43fb4fedbd30b178e1737584bc56c12 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 18 Jul 2023 18:55:59 +0200 Subject: [PATCH 196/722] feat: add builder type for eth tx validator (#3828) --- crates/transaction-pool/src/validate/eth.rs | 185 ++++++++++++++++---- crates/transaction-pool/src/validate/mod.rs | 2 +- 2 files changed, 154 insertions(+), 33 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index b5ca93709dda..2a4bbbf8ec2e 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -28,10 +28,18 @@ pub struct EthTransactionValidator { // === impl EthTransactionValidator === +impl EthTransactionValidator<(), ()> { + /// Convenience method to create a [EthTransactionValidatorBuilder] + pub fn builder(chain_spec: Arc) -> EthTransactionValidatorBuilder { + EthTransactionValidatorBuilder::new(chain_spec) + } +} + impl EthTransactionValidator { /// Creates a new instance for the given [ChainSpec] /// - /// This will spawn a single validation tasks that performs the actual validation. + /// This will spawn a single validation tasks that performs the actual validation. + /// See [EthTransactionValidator::with_additional_tasks] pub fn new(client: Client, chain_spec: Arc, tasks: T) -> Self where T: TaskSpawner, @@ -41,6 +49,11 @@ impl EthTransactionValidator { /// Creates a new instance for the given [ChainSpec] /// + /// By default this will enable support for: + /// - shanghai + /// - eip1559 + /// - eip2930 + /// /// This will always spawn a validation task that performs the actual validation. It will spawn /// `num_additional_tasks` additional tasks. pub fn with_additional_tasks( @@ -52,37 +65,9 @@ impl EthTransactionValidator { where T: TaskSpawner, { - let inner = EthTransactionValidatorInner { - chain_spec, - client, - shanghai: true, - eip2718: true, - eip1559: true, - block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - minimum_priority_fee: None, - _marker: Default::default(), - }; - - let (tx, task) = ValidationTask::new(); - - // Spawn validation tasks, they are blocking because they perform db lookups - for _ in 0..num_additional_tasks { - let task = task.clone(); - tasks.spawn_blocking(Box::pin(async move { - task.run().await; - })); - } - - tasks.spawn_critical_blocking( - "transaction-validation-service", - Box::pin(async move { - task.run().await; - }), - ); - - let to_validation_task = Arc::new(Mutex::new(tx)); - - Self { inner: Arc::new(inner), to_validation_task } + EthTransactionValidatorBuilder::new(chain_spec) + .with_additional_tasks(num_additional_tasks) + .build(client, tasks) } /// Returns the configured chain id @@ -134,6 +119,142 @@ where } } +/// A builder for [EthTransactionValidator] +#[derive(Debug, Clone)] +pub struct EthTransactionValidatorBuilder { + chain_spec: Arc, + /// Fork indicator whether we are in the Shanghai stage. + shanghai: bool, + /// Fork indicator whether we are using EIP-2718 type transactions. + eip2718: bool, + /// Fork indicator whether we are using EIP-1559 type transactions. + eip1559: bool, + /// The current max gas limit + block_gas_limit: u64, + /// Minimum priority fee to enforce for acceptance into the pool. + minimum_priority_fee: Option, + /// Determines how many additional tasks to spawn + /// + /// Default is 1 + additional_tasks: usize, +} + +impl EthTransactionValidatorBuilder { + /// Creates a new builder for the given [ChainSpec] + pub fn new(chain_spec: Arc) -> Self { + Self { + chain_spec, + shanghai: true, + eip2718: true, + eip1559: true, + block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + minimum_priority_fee: None, + additional_tasks: 1, + } + } + + /// Disables the Shanghai fork. + pub fn no_shanghai(self) -> Self { + self.set_shanghai(false) + } + + /// Set the Shanghai fork. + pub fn set_shanghai(mut self, shanghai: bool) -> Self { + self.shanghai = shanghai; + self + } + + /// Disables the eip2718 support. + pub fn no_eip2718(self) -> Self { + self.set_eip2718(false) + } + + /// Set eip2718 support. + pub fn set_eip2718(mut self, eip2718: bool) -> Self { + self.eip2718 = eip2718; + self + } + + /// Disables the eip1559 support. + pub fn no_eip1559(self) -> Self { + self.set_eip1559(false) + } + + /// Set the eip1559 support. + pub fn set_eip1559(mut self, eip1559: bool) -> Self { + self.eip1559 = eip1559; + self + } + + /// Sets a minimum priority fee that's enforced for acceptance into the pool. + pub fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { + self.minimum_priority_fee = Some(minimum_priority_fee); + self + } + + /// Sets the number of additional tasks to spawn. + pub fn with_additional_tasks(mut self, additional_tasks: usize) -> Self { + self.additional_tasks = additional_tasks; + self + } + + /// Builds a [EthTransactionValidator] + /// + /// The validator will spawn `additional_tasks` additional tasks for validation. + /// + /// By default this will spawn 1 additional task. + pub fn build( + self, + client: Client, + tasks: T, + ) -> EthTransactionValidator + where + T: TaskSpawner, + { + let Self { + chain_spec, + shanghai, + eip2718, + eip1559, + block_gas_limit, + minimum_priority_fee, + additional_tasks, + } = self; + + let inner = EthTransactionValidatorInner { + chain_spec, + client, + shanghai, + eip2718, + eip1559, + block_gas_limit, + minimum_priority_fee, + _marker: Default::default(), + }; + + let (tx, task) = ValidationTask::new(); + + // Spawn validation tasks, they are blocking because they perform db lookups + for _ in 0..additional_tasks { + let task = task.clone(); + tasks.spawn_blocking(Box::pin(async move { + task.run().await; + })); + } + + tasks.spawn_critical_blocking( + "transaction-validation-service", + Box::pin(async move { + task.run().await; + }), + ); + + let to_validation_task = Arc::new(Mutex::new(tx)); + + EthTransactionValidator { inner: Arc::new(inner), to_validation_task } + } +} + /// A [TransactionValidator] implementation that validates ethereum transaction. #[derive(Debug, Clone)] struct EthTransactionValidatorInner { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index a3892f52ffe6..f63c08c334c2 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -14,7 +14,7 @@ mod eth; mod task; /// A [TransactionValidator] implementation that validates ethereum transaction. -pub use eth::EthTransactionValidator; +pub use eth::{EthTransactionValidator, EthTransactionValidatorBuilder}; /// A spawnable task that performs transaction validation. pub use task::ValidationTask; From 34c9abe2498d980802684018dd37728cecb5a37b Mon Sep 17 00:00:00 2001 From: Ryan Schneider Date: Tue, 18 Jul 2023 10:49:23 -0700 Subject: [PATCH 197/722] feat: implement reth_getBalanceChangesInBlock (#3768) Co-authored-by: Matthias Seitz --- bin/reth/src/args/rpc_server_args.rs | 6 +- crates/rpc/rpc-api/src/lib.rs | 2 + crates/rpc/rpc-api/src/reth.rs | 15 +++ crates/rpc/rpc-builder/src/lib.rs | 35 ++++-- crates/rpc/rpc/src/lib.rs | 2 + crates/rpc/rpc/src/reth.rs | 118 ++++++++++++++++++ crates/storage/provider/src/lib.rs | 4 +- .../src/providers/database/provider.rs | 18 ++- crates/storage/provider/src/providers/mod.rs | 13 +- .../storage/provider/src/test_utils/noop.rs | 14 ++- crates/storage/provider/src/traits/account.rs | 8 ++ crates/storage/provider/src/traits/mod.rs | 2 +- 12 files changed, 219 insertions(+), 18 deletions(-) create mode 100644 crates/rpc/rpc-api/src/reth.rs create mode 100644 crates/rpc/rpc/src/reth.rs diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index a1c954b4a8a5..4dbde75cbd41 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -8,8 +8,8 @@ use clap::{ use futures::TryFutureExt; use reth_network_api::{NetworkInfo, Peers}; use reth_provider::{ - BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, - StateProviderFactory, + BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + HeaderProvider, StateProviderFactory, }; use reth_rpc::{ eth::{ @@ -249,6 +249,7 @@ impl RpcServerArgs { + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, @@ -310,6 +311,7 @@ impl RpcServerArgs { + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index acca756711c1..7fba5adba9e0 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -26,6 +26,7 @@ mod eth; mod eth_filter; mod eth_pubsub; mod net; +mod reth; mod rpc; mod trace; mod txpool; @@ -44,6 +45,7 @@ pub mod servers { eth_filter::EthFilterApiServer, eth_pubsub::EthPubSubApiServer, net::NetApiServer, + reth::RethApiServer, rpc::RpcApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs new file mode 100644 index 000000000000..1e9c4314ab14 --- /dev/null +++ b/crates/rpc/rpc-api/src/reth.rs @@ -0,0 +1,15 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, BlockId, U256}; +use std::collections::HashMap; + +/// Reth API namespace for reth-specific methods +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "reth"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "reth"))] +pub trait RethApi { + /// Returns all ETH balance changes in a block + #[method(name = "getBalanceChangesInBlock")] + async fn reth_get_balance_changes_in_block( + &self, + block_id: BlockId, + ) -> RpcResult>; +} diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index d01036f8d4b7..368960517b7c 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -31,13 +31,13 @@ //! //! ``` //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; +//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider, ChangeSetReader}; //! use reth_rpc_builder::{RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! pub async fn launch(provider: Provider, pool: Pool, network: Network, events: Events) //! where -//! Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, +//! Provider: BlockReaderIdExt + ChainSpecProvider + ChangeSetReader + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, //! Pool: TransactionPool + Clone + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -64,7 +64,7 @@ //! ``` //! use tokio::try_join; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider}; +//! use reth_provider::{BlockReaderIdExt, ChainSpecProvider, CanonStateSubscriptions, StateProviderFactory, EvmEnvProvider, ChangeSetReader}; //! use reth_rpc::JwtSecret; //! use reth_rpc_builder::{RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig}; //! use reth_tasks::TokioTaskExecutor; @@ -73,7 +73,7 @@ //! use reth_rpc_builder::auth::AuthServerConfig; //! pub async fn launch(provider: Provider, pool: Pool, network: Network, events: Events, engine_api: EngineApi) //! where -//! Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, +//! Provider: BlockReaderIdExt + ChainSpecProvider + ChangeSetReader + StateProviderFactory + EvmEnvProvider + Clone + Unpin + 'static, //! Pool: TransactionPool + Clone + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -113,8 +113,8 @@ use jsonrpsee::{ use reth_ipc::server::IpcServer; use reth_network_api::{NetworkInfo, Peers}; use reth_provider::{ - BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, - StateProviderFactory, + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, StateProviderFactory, }; use reth_rpc::{ eth::{ @@ -122,7 +122,7 @@ use reth_rpc::{ gas_oracle::GasPriceOracle, }, AdminApi, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, - NetApi, RPCApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, + NetApi, RPCApi, RethApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, }; use reth_rpc_api::{servers::*, EngineApiServer}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -176,6 +176,7 @@ where + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, @@ -320,6 +321,7 @@ where + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, @@ -543,6 +545,7 @@ impl RpcModuleSelection { + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, @@ -646,6 +649,8 @@ pub enum RethRpcModule { Web3, /// `rpc_` module Rpc, + /// `reth_` module + Reth, } // === impl RethRpcModule === @@ -758,6 +763,7 @@ where + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + + ChangeSetReader + Clone + Unpin + 'static, @@ -839,6 +845,15 @@ where self } + /// Register Reth namespace + pub fn register_reth(&mut self) -> &mut Self { + self.modules.insert( + RethRpcModule::Reth, + RethApi::new(self.provider.clone(), Box::new(self.executor.clone())).into_rpc().into(), + ); + self + } + /// Helper function to create a [RpcModule] if it's not `None` fn maybe_module(&mut self, config: Option<&RpcModuleSelection>) -> Option> { let config = config?; @@ -921,6 +936,11 @@ where ) .into_rpc() .into(), + RethRpcModule::Reth => { + RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) + .into_rpc() + .into() + } }) .clone() }) @@ -1754,6 +1774,7 @@ mod tests { "trace" => RethRpcModule::Trace, "web3" => RethRpcModule::Web3, "rpc" => RethRpcModule::Rpc, + "reth" => RethRpcModule::Reth, ); } diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 8ec0893a0282..64083f746b2b 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -37,6 +37,7 @@ mod engine; pub mod eth; mod layers; mod net; +mod reth; mod rpc; mod trace; mod txpool; @@ -49,6 +50,7 @@ pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; pub use layers::{AuthLayer, AuthValidator, Claims, JwtAuthValidator, JwtError, JwtSecret}; pub use net::NetApi; +pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs new file mode 100644 index 000000000000..7fbdf25ab460 --- /dev/null +++ b/crates/rpc/rpc/src/reth.rs @@ -0,0 +1,118 @@ +use crate::eth::error::{EthApiError, EthResult}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_interfaces::Result; +use reth_primitives::{Address, BlockId, U256}; +use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; +use reth_rpc_api::RethApiServer; +use reth_tasks::TaskSpawner; +use std::{collections::HashMap, future::Future, sync::Arc}; +use tokio::sync::oneshot; + +/// `reth` API implementation. +/// +/// This type provides the functionality for handling `reth` prototype RPC requests. +pub struct RethApi { + inner: Arc>, +} + +// === impl RethApi === + +impl RethApi { + /// The provider that can interact with the chain. + pub fn provider(&self) -> &Provider { + &self.inner.provider + } + + /// Create a new instance of the [RethApi] + pub fn new(provider: Provider, task_spawner: Box) -> Self { + let inner = Arc::new(RethApiInner { provider, task_spawner }); + Self { inner } + } +} + +impl RethApi +where + Provider: BlockReaderIdExt + ChangeSetReader + StateProviderFactory + 'static, +{ + /// Executes the future on a new blocking task. + async fn on_blocking_task(&self, c: C) -> EthResult + where + C: FnOnce(Self) -> F, + F: Future> + Send + 'static, + R: Send + 'static, + { + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + let f = c(this); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let res = f.await; + let _ = tx.send(res); + })); + rx.await.map_err(|_| EthApiError::InternalEthError)? + } + + /// Returns a map of addresses to changed account balanced for a particular block. + pub async fn balance_changes_in_block( + &self, + block_id: BlockId, + ) -> EthResult> { + self.on_blocking_task(|this| async move { this.try_balance_changes_in_block(block_id) }) + .await + } + + fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult> { + let block_id = block_id; + let Some(block_number) = self.provider().block_number_for_id(block_id)? else { + return Err(EthApiError::UnknownBlockNumber) + }; + + let state = self.provider().state_by_block_id(block_id)?; + let accounts_before = self.provider().account_block_changeset(block_number)?; + let hash_map = accounts_before.iter().try_fold( + HashMap::new(), + |mut hash_map, account_before| -> Result<_> { + let current_balance = state.account_balance(account_before.address)?; + let prev_balance = account_before.info.map(|info| info.balance); + if current_balance != prev_balance { + hash_map.insert(account_before.address, current_balance.unwrap_or_default()); + } + Ok(hash_map) + }, + )?; + Ok(hash_map) + } +} + +#[async_trait] +impl RethApiServer for RethApi +where + Provider: BlockReaderIdExt + ChangeSetReader + StateProviderFactory + 'static, +{ + /// Handler for `reth_getBalanceChangesInBlock` + async fn reth_get_balance_changes_in_block( + &self, + block_id: BlockId, + ) -> RpcResult> { + Ok(RethApi::balance_changes_in_block(self, block_id).await?) + } +} + +impl std::fmt::Debug for RethApi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RethApi").finish_non_exhaustive() + } +} + +impl Clone for RethApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +struct RethApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// The type that can spawn tasks which would otherwise block. + task_spawner: Box, +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index a6a1f6bf2980..64c9932711e8 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -25,8 +25,8 @@ pub use traits::{ BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, BlockWriter, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, EvmEnvProvider, ExecutorFactory, HashingWriter, HeaderProvider, - HistoryWriter, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, + ChainSpecProvider, ChangeSetReader, EvmEnvProvider, ExecutorFactory, HashingWriter, + HeaderProvider, HistoryWriter, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StageCheckpointWriter, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageReader, TransactionsProvider, WithdrawalsProvider, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 16f25d9792f7..7205710ab1a3 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,6 +1,8 @@ use crate::{ post_state::StorageChangeset, - traits::{AccountExtReader, BlockSource, ReceiptProvider, StageCheckpointWriter}, + traits::{ + AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, + }, AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, EvmEnvProvider, HashingWriter, HeaderProvider, HistoryWriter, PostState, ProviderError, StageCheckpointReader, StorageReader, TransactionsProvider, WithdrawalsProvider, @@ -726,6 +728,20 @@ impl<'this, TX: DbTx<'this>> AccountExtReader for DatabaseProvider<'this, TX> { } } +impl<'this, TX: DbTx<'this>> ChangeSetReader for DatabaseProvider<'this, TX> { + fn account_block_changeset(&self, block_number: BlockNumber) -> Result> { + let range = block_number..=block_number; + self.tx + .cursor_read::()? + .walk_range(range)? + .map(|result| -> Result<_> { + let (_, account_before) = result?; + Ok(account_before) + }) + .collect() + } +} + impl<'this, TX: DbTx<'this>> HeaderProvider for DatabaseProvider<'this, TX> { fn header(&self, block_hash: &BlockHash) -> Result> { if let Some(num) = self.block_number(*block_hash)? { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 083f15f98874..27e4903bb1de 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,7 +1,7 @@ use crate::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, HeaderProvider, + CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PostStateDataProvider, ProviderError, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, TransactionsProvider, WithdrawalsProvider, @@ -39,6 +39,7 @@ mod state; use crate::{providers::chain_info::ChainInfoTracker, traits::BlockSource}; pub use database::*; pub use post_state_provider::PostStateProvider; +use reth_db::models::AccountBeforeTx; use reth_interfaces::blockchain_tree::{ error::InsertBlockError, CanonicalOutcome, InsertPayloadOk, }; @@ -813,3 +814,13 @@ where self.tree.subscribe_to_canonical_state() } } + +impl ChangeSetReader for BlockchainProvider +where + DB: Database, + Tree: Sync + Send, +{ + fn account_block_changeset(&self, block_number: BlockNumber) -> Result> { + self.database.provider()?.account_block_changeset(block_number) + } +} diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 8fa349b11b5b..def01741e68a 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,11 +1,11 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, EvmEnvProvider, HeaderProvider, PostState, ReceiptProviderIdExt, - StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, - StateRootProvider, TransactionsProvider, WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PostState, + ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, TransactionsProvider, WithdrawalsProvider, }; -use reth_db::models::StoredBlockBodyIndices; +use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_interfaces::Result; use reth_primitives::{ stage::{StageCheckpoint, StageId}, @@ -235,6 +235,12 @@ impl AccountReader for NoopProvider { } } +impl ChangeSetReader for NoopProvider { + fn account_block_changeset(&self, _block_number: BlockNumber) -> Result> { + Ok(Vec::default()) + } +} + impl StateRootProvider for NoopProvider { fn state_root(&self, _post_state: PostState) -> Result { todo!() diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/provider/src/traits/account.rs index d08d15a1218e..5ae4fe60a778 100644 --- a/crates/storage/provider/src/traits/account.rs +++ b/crates/storage/provider/src/traits/account.rs @@ -1,4 +1,5 @@ use auto_impl::auto_impl; +use reth_db::models::AccountBeforeTx; use reth_interfaces::Result; use reth_primitives::{Account, Address, BlockNumber}; use std::{ @@ -42,3 +43,10 @@ pub trait AccountExtReader: Send + Sync { range: RangeInclusive, ) -> Result>>; } + +/// AccountChange reader +#[auto_impl(&, Arc, Box)] +pub trait ChangeSetReader: Send + Sync { + /// Iterate over account changesets and return the account state from before this block. + fn account_block_changeset(&self, block_number: BlockNumber) -> Result>; +} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 0411c995fcea..5343185bdf19 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -1,7 +1,7 @@ //! Collection of common provider traits. mod account; -pub use account::{AccountExtReader, AccountReader}; +pub use account::{AccountExtReader, AccountReader, ChangeSetReader}; mod storage; pub use storage::StorageReader; From 5ad9b32cbcbb9ed1e08a645674731cc78a0f7ed3 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 18 Jul 2023 14:16:17 -0400 Subject: [PATCH 198/722] feat: let consensus tests configure pipeline, executor, and client (#3839) --- Cargo.lock | 2 + crates/consensus/beacon/Cargo.toml | 2 + crates/consensus/beacon/src/engine/mod.rs | 398 ++++++++++++++++------ 3 files changed, 289 insertions(+), 113 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d013576e75c9..b5cfbf016df9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5050,12 +5050,14 @@ dependencies = [ "reth-blockchain-tree", "reth-consensus-common", "reth-db", + "reth-downloaders", "reth-interfaces", "reth-metrics", "reth-payload-builder", "reth-primitives", "reth-provider", "reth-prune", + "reth-revm", "reth-rpc-types", "reth-stages", "reth-tasks", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 5f7ba35dd225..bb676f71d101 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -40,5 +40,7 @@ reth-blockchain-tree = { path = "../../blockchain-tree", features = ["test-utils reth-db = { path = "../../storage/db", features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing = { path = "../../tracing" } +reth-revm = { path = "../../revm" } +reth-downloaders = { path = "../../net/downloaders" } assert_matches = "1.5" diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e4f46ad9ea1f..4e1e6668261b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1716,20 +1716,30 @@ mod tests { BlockchainTree, ShareableBlockchainTree, }; use reth_db::{test_utils::create_test_rw_db, DatabaseEnv}; + use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, + }; use reth_interfaces::{ + consensus::Consensus, + p2p::either::EitherDownloader, sync::NoopSyncStateUpdater, test_utils::{NoopFullBlockClient, TestConsensus}, }; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; use reth_provider::{ - providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockWriter, - ProviderFactory, + providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, BlockWriter, + ExecutorFactory, ProviderFactory, StateProvider, }; + use reth_revm::Factory; use reth_rpc_types::engine::{ ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; - use reth_stages::{test_utils::TestStages, ExecOutput, PipelineError, StageError}; + use reth_stages::{ + sets::DefaultStages, stages::HeaderSyncMode, test_utils::TestStages, ExecOutput, + PipelineError, StageError, + }; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc, time::Duration}; use tokio::sync::{ @@ -1737,13 +1747,17 @@ mod tests { watch, }; - type TestBeaconConsensusEngine = BeaconConsensusEngine< + type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, BlockchainProvider< Arc, - ShareableBlockchainTree, TestConsensus, TestExecutorFactory>, + ShareableBlockchainTree< + Arc, + Arc, + EitherExecutorFactory, + >, >, - NoopFullBlockClient, + Arc>, >; struct TestEnv { @@ -1806,22 +1820,124 @@ mod tests { } } - struct TestConsensusEngineBuilder { + /// Represents either test pipeline outputs, or real pipeline configuration. + #[derive(Default)] + enum TestPipelineConfig { + /// Test pipeline outputs. + Test(VecDeque>), + /// Real pipeline configuration. + #[default] + Real, + } + + /// Represents either test executor results, or real executor configuration. + #[derive(Default)] + enum TestExecutorConfig { + /// Test executor results. + Test(Vec), + /// Real executor configuration. + #[default] + Real, + } + + /// A type that represents one of two possible executor factories. + #[derive(Debug, Clone)] + enum EitherExecutorFactory { + /// The first factory variant + Left(A), + /// The second factory variant + Right(B), + } + + // A type that represents one of two possible BlockExecutor types. + #[derive(Debug)] + enum EitherBlockExecutor { + /// The first executor variant + Left(A), + /// The second executor variant + Right(B), + } + + impl BlockExecutor for EitherBlockExecutor + where + A: BlockExecutor, + B: BlockExecutor, + SP: StateProvider, + { + fn execute( + &mut self, + block: &reth_primitives::Block, + total_difficulty: U256, + senders: Option>, + ) -> Result { + match self { + EitherBlockExecutor::Left(a) => a.execute(block, total_difficulty, senders), + EitherBlockExecutor::Right(b) => b.execute(block, total_difficulty, senders), + } + } + + fn execute_and_verify_receipt( + &mut self, + block: &reth_primitives::Block, + total_difficulty: U256, + senders: Option>, + ) -> Result { + match self { + EitherBlockExecutor::Left(a) => { + a.execute_and_verify_receipt(block, total_difficulty, senders) + } + EitherBlockExecutor::Right(b) => { + b.execute_and_verify_receipt(block, total_difficulty, senders) + } + } + } + } + + impl ExecutorFactory for EitherExecutorFactory + where + A: ExecutorFactory, + B: ExecutorFactory, + { + type Executor = EitherBlockExecutor, B::Executor>; + + fn chain_spec(&self) -> &ChainSpec { + match self { + EitherExecutorFactory::Left(a) => a.chain_spec(), + EitherExecutorFactory::Right(b) => b.chain_spec(), + } + } + + fn with_sp(&self, sp: SP) -> Self::Executor { + match self { + EitherExecutorFactory::Left(a) => EitherBlockExecutor::Left(a.with_sp(sp)), + EitherExecutorFactory::Right(b) => EitherBlockExecutor::Right(b.with_sp(sp)), + } + } + } + + /// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and + /// mocked executor results. + struct TestConsensusEngineBuilder { chain_spec: Arc, - pipeline_exec_outputs: VecDeque>, - executor_results: Vec, + pipeline_config: TestPipelineConfig, + executor_config: TestExecutorConfig, pipeline_run_threshold: Option, max_block: Option, + client: Option, } - impl TestConsensusEngineBuilder { + impl TestConsensusEngineBuilder + where + Client: HeadersClient + BodiesClient + 'static, + { /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. fn new(chain_spec: Arc) -> Self { Self { chain_spec, - pipeline_exec_outputs: VecDeque::new(), - executor_results: Vec::new(), + pipeline_config: Default::default(), + executor_config: Default::default(), pipeline_run_threshold: None, + client: None, max_block: None, } } @@ -1831,13 +1947,13 @@ mod tests { mut self, pipeline_exec_outputs: VecDeque>, ) -> Self { - self.pipeline_exec_outputs = pipeline_exec_outputs; + self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); self } /// Set the executor results to use for the test consensus engine. fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; + self.executor_config = TestExecutorConfig::Test(executor_results); self } @@ -1847,6 +1963,13 @@ mod tests { self } + /// Sets the client to use for network operations. + #[allow(dead_code)] + fn with_client(mut self, client: Client) -> Self { + self.client = Some(client); + self + } + /// Disables blockchain tree driven sync. This is the same as setting the pipeline run /// threshold to 0. fn disable_blockchain_tree_sync(mut self) -> Self { @@ -1855,20 +1978,55 @@ mod tests { } /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { + fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { reth_tracing::init_test_tracing(); let db = create_test_rw_db(); - let consensus = TestConsensus::default(); + let consensus = Arc::new(TestConsensus::default()); let payload_builder = spawn_test_payload_service(); - let executor_factory = TestExecutorFactory::new(self.chain_spec.clone()); - executor_factory.extend(self.executor_results); + // use either noop client or a user provided client (for example TestFullBlockClient) + let client = Arc::new( + self.client + .map(EitherDownloader::Left) + .unwrap_or_else(|| EitherDownloader::Right(NoopFullBlockClient::default())), + ); + + // use either test executor or real executor + let executor_factory = match self.executor_config { + TestExecutorConfig::Test(results) => { + let executor_factory = TestExecutorFactory::new(self.chain_spec.clone()); + executor_factory.extend(results); + EitherExecutorFactory::Left(executor_factory) + } + TestExecutorConfig::Real => { + EitherExecutorFactory::Right(Factory::new(self.chain_spec.clone())) + } + }; // Setup pipeline let (tip_tx, tip_rx) = watch::channel(H256::default()); - let mut pipeline = Pipeline::builder() - .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) - .with_tip_sender(tip_tx); + let mut pipeline = match self.pipeline_config { + TestPipelineConfig::Test(outputs) => Pipeline::builder() + .add_stages(TestStages::new(outputs, Default::default())) + .with_tip_sender(tip_tx), + TestPipelineConfig::Real => { + let header_downloader = ReverseHeadersDownloaderBuilder::default() + .build(client.clone(), consensus.clone()) + .into_task(); + + let body_downloader = BodiesDownloaderBuilder::default() + .build(client.clone(), consensus.clone(), db.clone()) + .into_task(); + + Pipeline::builder().add_stages(DefaultStages::new( + HeaderSyncMode::Tip(tip_rx.clone()), + Arc::clone(&consensus) as Arc, + header_downloader, + body_downloader, + executor_factory.clone(), + )) + } + }; if let Some(max_block) = self.max_block { pipeline = pipeline.with_max_block(max_block); @@ -1896,7 +2054,7 @@ mod tests { let pruner = Pruner::new(5, 0); let (mut engine, handle) = BeaconConsensusEngine::new( - NoopFullBlockClient::default(), + client, pipeline, blockchain_provider, Box::::default(), @@ -1918,8 +2076,8 @@ mod tests { } } - fn spawn_consensus_engine( - engine: TestBeaconConsensusEngine, + fn spawn_consensus_engine( + engine: TestBeaconConsensusEngine, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { @@ -1940,11 +2098,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) + .disable_blockchain_tree_sync() + .with_max_block(1) + .build(); let res = spawn_consensus_engine(consensus_engine); @@ -1971,11 +2130,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) + .disable_blockchain_tree_sync() + .with_max_block(1) + .build(); let mut rx = spawn_consensus_engine(consensus_engine); @@ -2033,14 +2193,15 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), - Err(StageError::ChannelClosed), - ])) - .disable_blockchain_tree_sync() - .with_max_block(2) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), + Err(StageError::ChannelClosed), + ])) + .disable_blockchain_tree_sync() + .with_max_block(2) + .build(); let rx = spawn_consensus_engine(consensus_engine); @@ -2068,14 +2229,15 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(max_block), - done: true, - })])) - .with_max_block(max_block) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(max_block), + done: true, + })])) + .with_max_block(max_block) + .disable_blockchain_tree_sync() + .build(); let rx = spawn_consensus_engine(consensus_engine); @@ -2117,12 +2279,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -2148,12 +2311,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2197,13 +2361,14 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .disable_blockchain_tree_sync() + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2247,13 +2412,14 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .disable_blockchain_tree_sync() + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2285,12 +2451,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let mut block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2338,12 +2505,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2385,12 +2553,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -2420,12 +2589,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2470,12 +2640,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); @@ -2526,13 +2697,14 @@ mod tests { .build(), ); - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .with_executor_results(Vec::from([exec_result2])) - .build(); + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .with_executor_results(Vec::from([exec_result2])) + .build(); insert_blocks( env.db.as_ref(), From 0e05085e553474540ef71160e7bd9bd550cd766a Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 18 Jul 2023 21:28:29 +0200 Subject: [PATCH 199/722] feat(book): more precise disk requirements (#3838) --- book/installation/installation.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index d3ad78fc9134..a7fd324bf133 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -14,12 +14,12 @@ The hardware requirements for running Reth depend on the node configuration and The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible. -| | Archive Node | Full Node | -|-----------|----------------------------------------|-------------------------------------| -| Disk | At least 2TB (TLC NVMe recommended) | TBD | -| Memory | 8GB+ | 8GB+ | -| CPU | Higher clock speed over core count | Higher clock speeds over core count | -| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | +| | Archive Node | Full Node | +|-----------|---------------------------------------|-------------------------------------| +| Disk | At least 2.1TB (TLC NVMe recommended) | TBD | +| Memory | 8GB+ | 8GB+ | +| CPU | Higher clock speed over core count | Higher clock speeds over core count | +| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | #### QLC and TLC @@ -35,12 +35,12 @@ Prior to purchasing an NVMe drive, it is advisable to research and determine whe There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode: -* Archive Node: At least 2TB is required to store +* Archive Node: At least 2.1TB is required (as of July 2023, at block number 17.7M) * Full Node: TBD NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. -At the time of writing, syncing an Ethereum mainnet node to block 17.4M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. +As of July 2023, syncing an Ethereum mainnet node to block 17.7M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > From b8587a2ca680e86acd152abc333093dae824e698 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 18 Jul 2023 21:28:47 +0200 Subject: [PATCH 200/722] chore: set trace result to null if non revert or selfdestruct (#3840) --- .../revm/revm-inspectors/src/tracing/types.rs | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 76ff0f510089..22bebdbb2478 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -159,6 +159,11 @@ impl CallTrace { self.status as u8 >= InstructionResult::Revert as u8 } + // Returns true if the status code is a revert + pub(crate) fn is_revert(&self) -> bool { + self.status == InstructionResult::Revert + } + /// Returns the error message if it is an erroneous result. pub(crate) fn as_error(&self, kind: TraceStyle) -> Option { // See also @@ -340,15 +345,16 @@ impl CallTraceNode { /// Converts this node into a parity `TransactionTrace` pub(crate) fn parity_transaction_trace(&self, trace_address: Vec) -> TransactionTrace { let action = self.parity_action(); - let output = self.parity_trace_output(); + let result = if action.is_selfdestruct() || + (self.trace.is_error() && !self.trace.is_revert()) + { + // if the trace is a selfdestruct or an error that is not a revert, the result is None + None + } else { + Some(self.parity_trace_output()) + }; let error = self.trace.as_error(TraceStyle::Parity); - TransactionTrace { - action, - error, - result: Some(output), - trace_address, - subtraces: self.children.len(), - } + TransactionTrace { action, error, result, trace_address, subtraces: self.children.len() } } /// Returns the `Output` for a parity trace From 314e56119391931fde2fc0a4f6a0e51ce2f564c0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 18 Jul 2023 16:52:25 -0400 Subject: [PATCH 201/722] feat: add key gen util and simple fcu validation test with custom alloc (#3842) --- crates/consensus/beacon/src/engine/mod.rs | 57 ++++++++++++++++++- .../interfaces/src/test_utils/generators.rs | 14 +++-- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/util.rs | 17 ++++-- 4 files changed, 77 insertions(+), 13 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 4e1e6668261b..8be81f2bbdce 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2538,8 +2538,11 @@ mod tests { mod new_payload { use super::*; - use reth_interfaces::test_utils::{generators, generators::random_block}; - use reth_primitives::{Hardfork, U256}; + use reth_interfaces::test_utils::{ + generators, + generators::{generate_keys, random_block}, + }; + use reth_primitives::{public_key_to_address, Genesis, GenesisAccount, Hardfork, U256}; use reth_provider::test_utils::blocks::BlockChainTestData; #[tokio::test] @@ -2629,6 +2632,56 @@ mod tests { assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); } + #[tokio::test] + async fn simple_validate_block() { + let mut rng = generators::rng(); + let genesis_keys = generate_keys(&mut rng, 16); + let amount = 1000000000000000000u64; + let alloc = genesis_keys.iter().map(|pair| { + ( + public_key_to_address(pair.public_key()), + GenesisAccount::default().with_balance(U256::from(amount)), + ) + }); + + let genesis = Genesis::default().extend_accounts(alloc); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .shanghai_activated() + .build(), + ); + + let (consensus_engine, env) = + TestConsensusEngineBuilder::::new(chain_spec.clone()).build(); + + let genesis = + SealedBlock { header: chain_spec.sealed_genesis_header(), ..Default::default() }; + let block1 = random_block(&mut rng, 1, Some(chain_spec.genesis_hash()), None, Some(0)); + + // TODO: add transactions that transfer from the alloc accounts, generating the new + // block tx and state root + + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); + + let mut engine_rx = spawn_consensus_engine(consensus_engine); + + // Send forkchoice + let res = env + .send_forkchoice_updated(ForkchoiceState { + head_block_hash: block1.hash, + finalized_block_hash: block1.hash, + ..Default::default() + }) + .await; + let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) + .with_latest_valid_hash(block1.hash); + assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); + assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); + } + #[tokio::test] async fn payload_parent_unknown() { let mut rng = generators::rng(); diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index dfcdf39a0c51..fbbdf42a2129 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -104,6 +104,12 @@ pub fn sign_tx_with_key_pair(key_pair: KeyPair, tx: Transaction) -> TransactionS TransactionSigned::from_transaction_and_signature(tx, signature) } +/// Generates a set of [KeyPair]s based on the desired count. +pub fn generate_keys(rng: &mut R, count: usize) -> Vec { + let secp = Secp256k1::new(); + (0..count).map(|_| KeyPair::new(&secp, rng)).collect() +} + /// Generate a random block filled with signed transactions (generated using /// [random_signed_tx]). If no transaction count is provided, the number of transactions /// will be random, otherwise the provided count will be used. @@ -363,7 +369,9 @@ mod test { use super::*; use hex_literal::hex; - use reth_primitives::{keccak256, AccessList, Address, TransactionKind, TxEip1559}; + use reth_primitives::{ + keccak256, public_key_to_address, AccessList, Address, TransactionKind, TxEip1559, + }; use secp256k1::KeyPair; #[test] @@ -393,9 +401,7 @@ mod test { let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature); let recovered = signed.recover_signer().unwrap(); - let public_key_hash = keccak256(&key_pair.public_key().serialize_uncompressed()[1..]); - let expected = Address::from_slice(&public_key_hash[12..]); - + let expected = public_key_to_address(key_pair.public_key()); assert_eq!(recovered, expected); } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 753c1435fe73..0248f008a24c 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -84,7 +84,7 @@ pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; pub use storage::StorageEntry; pub use transaction::{ - util::secp256k1::{recover_signer, sign_message}, + util::secp256k1::{public_key_to_address, recover_signer, sign_message}, AccessList, AccessListItem, AccessListWithGasUsed, FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index f67e152b5826..da3fb63494c5 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -6,7 +6,7 @@ pub(crate) mod secp256k1 { pub(crate) use ::secp256k1::Error; use ::secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, - Message, SecretKey, SECP256K1, + Message, PublicKey, SecretKey, SECP256K1, }; use revm_primitives::{B256, U256}; @@ -18,11 +18,7 @@ pub(crate) mod secp256k1 { RecoverableSignature::from_compact(&sig[0..64], RecoveryId::from_i32(sig[64] as i32)?)?; let public = SECP256K1.recover_ecdsa(&Message::from_slice(&msg[..32])?, &sig)?; - - // strip out the first byte because that should be the SECP256K1_TAG_PUBKEY_UNCOMPRESSED - // tag returned by libsecp's uncompressed pubkey serialization - let hash = keccak256(&public.serialize_uncompressed()[1..]); - Ok(Address::from_slice(&hash[12..])) + Ok(public_key_to_address(public)) } /// Signs message with the given secret key. @@ -39,6 +35,15 @@ pub(crate) mod secp256k1 { }; Ok(signature) } + + /// Converts a public key into an ethereum address by hashing the encoded public key with + /// keccak256. + pub fn public_key_to_address(public: PublicKey) -> Address { + // strip out the first byte because that should be the SECP256K1_TAG_PUBKEY_UNCOMPRESSED + // tag returned by libsecp's uncompressed pubkey serialization + let hash = keccak256(&public.serialize_uncompressed()[1..]); + Address::from_slice(&hash[12..]) + } } #[cfg(test)] mod tests { From 170e6f24d2d600b1c9db52462a2d53908bd4f8d6 Mon Sep 17 00:00:00 2001 From: Resende <17102689+ZePedroResende@users.noreply.github.com> Date: Wed, 19 Jul 2023 00:53:34 +0100 Subject: [PATCH 202/722] feat(rpc): Add ots_ namespace and trait bindings for Otterscan Endpoints (#3778) Co-authored-by: Miguel Palhas Co-authored-by: Matthias Seitz --- crates/rpc/rpc-api/src/lib.rs | 3 + crates/rpc/rpc-api/src/otterscan.rs | 85 ++++++++++++++++++++ crates/rpc/rpc-builder/src/lib.rs | 13 ++- crates/rpc/rpc-types/src/lib.rs | 2 + crates/rpc/rpc-types/src/otterscan.rs | 97 ++++++++++++++++++++++ crates/rpc/rpc/src/lib.rs | 2 + crates/rpc/rpc/src/otterscan.rs | 111 ++++++++++++++++++++++++++ 7 files changed, 312 insertions(+), 1 deletion(-) create mode 100644 crates/rpc/rpc-api/src/otterscan.rs create mode 100644 crates/rpc/rpc-types/src/otterscan.rs create mode 100644 crates/rpc/rpc/src/otterscan.rs diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 7fba5adba9e0..073631c986e7 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -26,6 +26,7 @@ mod eth; mod eth_filter; mod eth_pubsub; mod net; +mod otterscan; mod reth; mod rpc; mod trace; @@ -45,6 +46,7 @@ pub mod servers { eth_filter::EthFilterApiServer, eth_pubsub::EthPubSubApiServer, net::NetApiServer, + otterscan::OtterscanServer, reth::RethApiServer, rpc::RpcApiServer, trace::TraceApiServer, @@ -66,6 +68,7 @@ pub mod clients { engine::{EngineApiClient, EngineEthApiClient}, eth::EthApiClient, net::NetApiClient, + otterscan::OtterscanClient, rpc::RpcApiServer, trace::TraceApiClient, txpool::TxPoolApiClient, diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs new file mode 100644 index 000000000000..9ac585991fed --- /dev/null +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -0,0 +1,85 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, TxHash, H256}; +use reth_rpc_types::{ + BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, + Transaction, TransactionsWithReceipts, +}; + +/// Otterscan rpc interface. +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))] +pub trait Otterscan { + /// Check if a certain address contains a deployed code. + #[method(name = "hasCode")] + async fn has_code(&self, address: Address, block_number: Option) -> RpcResult; + + /// Very simple API versioning scheme. Every time we add a new capability, the number is + /// incremented. This allows for Otterscan to check if the node contains all API it + /// needs. + #[method(name = "getApiLevel")] + async fn get_api_level(&self) -> RpcResult; + + /// Return the internal ETH transfers inside a transaction. + #[method(name = "getInternalOperations")] + async fn get_internal_operations(&self, tx_hash: TxHash) -> RpcResult>; + + /// Given a transaction hash, returns its raw revert reason. + #[method(name = "getTransactionError")] + async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult; + + /// Extract all variations of calls, contract creation and self-destructs and returns a call + /// tree. + #[method(name = "traceTransaction")] + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult; + + /// Tailor-made and expanded version of eth_getBlockByNumber for block details page in + /// Otterscan. + #[method(name = "getBlockDetails")] + async fn get_block_details( + &self, + block_number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Tailor-made and expanded version of eth_getBlockByHash for block details page in Otterscan. + #[method(name = "getBlockDetailsByHash")] + async fn get_block_details_by_hash(&self, block_hash: H256) -> RpcResult>; + + /// Get paginated transactions for a certain block. Also remove some verbose fields like logs. + #[method(name = "getBlockTransactions")] + async fn get_block_transactions( + &self, + block_number: BlockNumberOrTag, + page_number: usize, + page_size: usize, + ) -> RpcResult; + + /// Gets paginated inbound/outbound transaction calls for a certain address. + #[method(name = "searchTransactionsBefore")] + async fn search_transactions_before( + &self, + address: Address, + block_number: BlockNumberOrTag, + page_size: usize, + ) -> RpcResult; + + /// Gets paginated inbound/outbound transaction calls for a certain address. + #[method(name = "searchTransactionsAfter")] + async fn search_transactions_after( + &self, + address: Address, + block_number: BlockNumberOrTag, + page_size: usize, + ) -> RpcResult; + + /// Gets the transaction hash for a certain sender address, given its nonce. + #[method(name = "getTransactionBySenderAndNonce")] + async fn get_transaction_by_sender_and_nonce( + &self, + sender: Address, + nonce: u64, + ) -> RpcResult>; + + /// Gets the transaction hash and the address who created a contract. + #[method(name = "getContractCreator")] + async fn get_contract_creator(&self, address: Address) -> RpcResult>; +} diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 368960517b7c..8737519fa81b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -122,7 +122,7 @@ use reth_rpc::{ gas_oracle::GasPriceOracle, }, AdminApi, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, - NetApi, RPCApi, RethApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, + NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, }; use reth_rpc_api::{servers::*, EngineApiServer}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -651,6 +651,8 @@ pub enum RethRpcModule { Rpc, /// `reth_` module Reth, + /// `ots_` module + Ots, } // === impl RethRpcModule === @@ -779,6 +781,13 @@ where self } + /// Register Otterscan Namespace + pub fn register_ots(&mut self) -> &mut Self { + let eth_api = self.eth_api(); + self.modules.insert(RethRpcModule::Ots, OtterscanApi::new(eth_api).into_rpc().into()); + self + } + /// Register Debug Namespace pub fn register_debug(&mut self) -> &mut Self { let eth_api = self.eth_api(); @@ -936,6 +945,7 @@ where ) .into_rpc() .into(), + RethRpcModule::Ots => OtterscanApi::new(eth_api.clone()).into_rpc().into(), RethRpcModule::Reth => { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) .into_rpc() @@ -1774,6 +1784,7 @@ mod tests { "trace" => RethRpcModule::Trace, "web3" => RethRpcModule::Web3, "rpc" => RethRpcModule::Rpc, + "ots" => RethRpcModule::Ots, "reth" => RethRpcModule::Reth, ); } diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 64d6537291e3..8b3542e6d35f 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -17,8 +17,10 @@ mod admin; mod eth; +mod otterscan; mod rpc; pub use admin::*; pub use eth::*; +pub use otterscan::*; pub use rpc::*; diff --git a/crates/rpc/rpc-types/src/otterscan.rs b/crates/rpc/rpc-types/src/otterscan.rs new file mode 100644 index 000000000000..57e12e8bf123 --- /dev/null +++ b/crates/rpc/rpc-types/src/otterscan.rs @@ -0,0 +1,97 @@ +use crate::{Block, Transaction, TransactionReceipt}; +use reth_primitives::{Address, Bytes, U256}; +use serde::{Deserialize, Serialize}; + +/// Operation type enum for `InternalOperation` struct +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub enum OperationType { + /// Operation Transfer + OpTransfer = 0, + /// Operation Contract self destruct + OpSelfDestruct = 1, + /// Operation Create + OpCreate = 2, + /// Operation Create2 + OpCreate2 = 3, +} + +/// Custom struct for otterscan `getInternalOperations` RPC response +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct InternalOperation { + r#type: OperationType, + from: Address, + to: Address, + value: u128, +} + +/// Custom struct for otterscan `traceTransaction` RPC response +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct TraceEntry { + r#type: String, + depth: u32, + from: Address, + to: Address, + value: u128, + input: Bytes, +} + +/// Internal issuance struct for `BlockDetails` struct +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct InternalIssuance { + block_reward: U256, + uncle_reward: U256, + issuance: U256, +} + +/// Custom `Block` struct that includes transaction count for Otterscan responses +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OtsBlock { + #[serde(flatten)] + block: Block, + transaction_count: usize, +} + +/// Custom struct for otterscan `getBlockDetails` RPC response +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockDetails { + block: OtsBlock, + issuance: InternalIssuance, + total_fees: U256, +} + +/// Custom transaction receipt struct for otterscan `OtsBlockTransactions` struct +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OtsTransactionReceipt { + #[serde(flatten)] + receipt: TransactionReceipt, + timestamp: u64, +} + +/// Custom struct for otterscan `getBlockTransactions` RPC response +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct OtsBlockTransactions { + fullblock: OtsBlock, + receipts: Vec, +} + +/// Custom struct for otterscan `searchTransactionsAfter`and `searchTransactionsBefore` RPC +/// responses +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionsWithReceipts { + txs: Vec, + receipts: Vec, + first_page: bool, + last_page: bool, +} + +/// Custom struct for otterscan `getContractCreator` RPC responses +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct ContractCreator { + tx: Transaction, + creator: Address, +} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 64083f746b2b..b885608139a7 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -37,6 +37,7 @@ mod engine; pub mod eth; mod layers; mod net; +mod otterscan; mod reth; mod rpc; mod trace; @@ -50,6 +51,7 @@ pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; pub use layers::{AuthLayer, AuthValidator, Claims, JwtAuthValidator, JwtError, JwtSecret}; pub use net::NetApi; +pub use otterscan::OtterscanApi; pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs new file mode 100644 index 000000000000..93d0ba167e24 --- /dev/null +++ b/crates/rpc/rpc/src/otterscan.rs @@ -0,0 +1,111 @@ +#![allow(dead_code, unused_variables)] +use crate::result::internal_rpc_err; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, TxHash, H256}; +use reth_rpc_api::{EthApiServer, OtterscanServer}; +use reth_rpc_types::{ + BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, + Transaction, TransactionsWithReceipts, +}; + +/// Otterscan Api +#[derive(Debug)] +pub struct OtterscanApi { + eth: Eth, +} + +impl OtterscanApi { + /// Creates a new instance of `Otterscan`. + pub fn new(eth: Eth) -> Self { + Self { eth } + } +} + +#[async_trait] +impl OtterscanServer for OtterscanApi +where + Eth: EthApiServer, +{ + /// Handler for `ots_hasCode` + async fn has_code(&self, address: Address, block_number: Option) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `ots_getApiLevel` + async fn get_api_level(&self) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `ots_getInternalOperations` + async fn get_internal_operations(&self, tx_hash: TxHash) -> RpcResult> { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `ots_getTransactionError` + async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `ots_traceTransaction` + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `ots_getBlockDetails` + async fn get_block_details( + &self, + block_number: BlockNumberOrTag, + ) -> RpcResult> { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `getBlockDetailsByHash` + async fn get_block_details_by_hash(&self, block_hash: H256) -> RpcResult> { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `getBlockTransactions` + async fn get_block_transactions( + &self, + block_number: BlockNumberOrTag, + page_number: usize, + page_size: usize, + ) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `searchTransactionsBefore` + async fn search_transactions_before( + &self, + address: Address, + block_number: BlockNumberOrTag, + page_size: usize, + ) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `searchTransactionsAfter` + async fn search_transactions_after( + &self, + address: Address, + block_number: BlockNumberOrTag, + page_size: usize, + ) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `getTransactionBySenderAndNonce` + async fn get_transaction_by_sender_and_nonce( + &self, + sender: Address, + nonce: u64, + ) -> RpcResult> { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for `getContractCreator` + async fn get_contract_creator(&self, address: Address) -> RpcResult> { + Err(internal_rpc_err("unimplemented")) + } +} From bdb23b3703bf495cb308defa433fc9b0ef283408 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 19 Jul 2023 02:11:26 +0200 Subject: [PATCH 203/722] docs: rm link to make cargo docs happy (#3843) --- crates/transaction-pool/src/validate/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index f63c08c334c2..d2f95de1da13 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -13,7 +13,7 @@ use std::{fmt, time::Instant}; mod eth; mod task; -/// A [TransactionValidator] implementation that validates ethereum transaction. +/// A `TransactionValidator` implementation that validates ethereum transaction. pub use eth::{EthTransactionValidator, EthTransactionValidatorBuilder}; /// A spawnable task that performs transaction validation. From a38b578218ca41fdc1679c6ee6abc100cad9cc2e Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 19 Jul 2023 16:24:00 +0200 Subject: [PATCH 204/722] ci: increase number of unit test partitions (#3848) --- .github/workflows/unit.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 041eadbe800a..fc4b5123b231 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - partition: [1, 2, 3, 4] + partition: [1, 2, 3, 4, 5] steps: - name: Checkout sources uses: actions/checkout@v3 @@ -101,4 +101,4 @@ jobs: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: - jobs: ${{ toJSON(needs) }} \ No newline at end of file + jobs: ${{ toJSON(needs) }} From 4a86aae213b84e22b315899f4c9eed557673576c Mon Sep 17 00:00:00 2001 From: Bjerg Date: Wed, 19 Jul 2023 16:24:16 +0200 Subject: [PATCH 205/722] ci: re-enable workflows on main (#3849) --- .github/workflows/bench.yml | 4 +++- .github/workflows/ci.yml | 4 +++- .github/workflows/fuzz.yml | 4 +++- .github/workflows/integration.yml | 2 ++ .github/workflows/unit.yml | 2 ++ 5 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 5fec9c0790da..8dec739e7023 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,6 +1,8 @@ on: pull_request: merge_group: + push: + branches: [main] env: RUSTFLAGS: -D warnings @@ -79,4 +81,4 @@ jobs: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: - jobs: ${{ toJSON(needs) }} \ No newline at end of file + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0164ead1a15f..f97709127425 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,8 @@ on: pull_request: merge_group: + push: + branches: [main] env: RUSTFLAGS: -D warnings @@ -68,4 +70,4 @@ jobs: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: - jobs: ${{ toJSON(needs) }} \ No newline at end of file + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 61459252cbd9..8f2760024bdd 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -1,6 +1,8 @@ on: pull_request: merge_group: + push: + branches: [main] env: RUSTFLAGS: -D warnings @@ -70,4 +72,4 @@ jobs: #- name: Decide whether the needed jobs succeeded or failed # uses: re-actors/alls-green@release/v1 # with: - # jobs: ${{ toJSON(needs) }} \ No newline at end of file + # jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 07c58b8675df..7bb68ccac4e6 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -1,6 +1,8 @@ on: pull_request: merge_group: + push: + branches: [main] env: RUSTFLAGS: -D warnings diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index fc4b5123b231..d0aa7243eed7 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -1,6 +1,8 @@ on: pull_request: merge_group: + push: + branches: [main] env: RUSTFLAGS: -D warnings From 4ed7abd1a197d57aa92e9c7ddaa6ad416a1b6443 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 19 Jul 2023 10:44:35 -0400 Subject: [PATCH 206/722] fix: use empty pipeline and executor by default (#3845) --- crates/consensus/beacon/src/engine/mod.rs | 33 +++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 8be81f2bbdce..aa894d180f84 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1821,25 +1821,33 @@ mod tests { } /// Represents either test pipeline outputs, or real pipeline configuration. - #[derive(Default)] enum TestPipelineConfig { /// Test pipeline outputs. Test(VecDeque>), /// Real pipeline configuration. - #[default] Real, } + impl Default for TestPipelineConfig { + fn default() -> Self { + Self::Test(VecDeque::new()) + } + } + /// Represents either test executor results, or real executor configuration. - #[derive(Default)] enum TestExecutorConfig { /// Test executor results. Test(Vec), /// Real executor configuration. - #[default] Real, } + impl Default for TestExecutorConfig { + fn default() -> Self { + Self::Test(Vec::new()) + } + } + /// A type that represents one of two possible executor factories. #[derive(Debug, Clone)] enum EitherExecutorFactory { @@ -1963,6 +1971,18 @@ mod tests { self } + /// Uses the real pipeline instead of a pipeline with empty exec outputs. + fn with_real_pipeline(mut self) -> Self { + self.pipeline_config = TestPipelineConfig::Real; + self + } + + /// Uses the real executor instead of a executor with empty results. + fn with_real_executor(mut self) -> Self { + self.executor_config = TestExecutorConfig::Real; + self + } + /// Sets the client to use for network operations. #[allow(dead_code)] fn with_client(mut self, client: Client) -> Self { @@ -2655,7 +2675,10 @@ mod tests { ); let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()).build(); + TestConsensusEngineBuilder::::new(chain_spec.clone()) + .with_real_pipeline() + .with_real_executor() + .build(); let genesis = SealedBlock { header: chain_spec.sealed_genesis_header(), ..Default::default() }; From 873607502cbbe5abeed111154d722a1682df701a Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Thu, 20 Jul 2023 01:27:08 +0800 Subject: [PATCH 207/722] feat: add reorg metrics (#3824) --- crates/blockchain-tree/src/blockchain_tree.rs | 1 + crates/blockchain-tree/src/metrics.rs | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 819d60d4c38c..6c5f2fe3d626 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1003,6 +1003,7 @@ impl BlockchainTree }; // insert old canon chain self.insert_chain(AppendableChain::new(old_canon_chain)); + self.metrics.reorgs.increment(1); } else { // error here to confirm that we are reverting nothing from db. error!(target: "blockchain_tree", "Reverting nothing from db on block: #{:?}", block_hash); diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 38610c5a69bd..864fac9474c5 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -1,5 +1,5 @@ use reth_metrics::{ - metrics::{self, Gauge}, + metrics::{self, Counter, Gauge}, Metrics, }; @@ -11,6 +11,8 @@ pub struct TreeMetrics { pub sidechains: Gauge, /// The highest block number in the canonical chain pub canonical_chain_height: Gauge, + /// The number of reorgs + pub reorgs: Counter, } /// Metrics for the blockchain tree block buffer From 0fabd8317705b7eae35d1e532a9ae76e493321cc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 19 Jul 2023 14:15:38 -0400 Subject: [PATCH 208/722] chore: refactor autoseal execution into method (#3844) --- crates/consensus/auto-seal/src/lib.rs | 146 ++++++++++++++++++++++++- crates/consensus/auto-seal/src/task.rs | 119 ++++---------------- 2 files changed, 159 insertions(+), 106 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 2cff15528c1f..c9f41a010c6b 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -21,16 +21,26 @@ //! be mined. use reth_beacon_consensus::BeaconEngineMessage; -use reth_interfaces::consensus::{Consensus, ConsensusError}; +use reth_interfaces::{ + consensus::{Consensus, ConsensusError}, + executor::{BlockExecutionError, BlockValidationError}, +}; use reth_primitives::{ - BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, SealedBlock, - SealedHeader, H256, U256, + constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, + proofs, Address, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, + Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, EMPTY_OMMER_ROOT, H256, + U256, }; -use reth_provider::{BlockReaderIdExt, CanonStateNotificationSender}; +use reth_provider::{BlockReaderIdExt, CanonStateNotificationSender, PostState, StateProvider}; +use reth_revm::executor::Executor; use reth_transaction_pool::TransactionPool; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; use tokio::sync::{mpsc::UnboundedSender, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tracing::trace; +use tracing::{trace, warn}; mod client; mod mode; @@ -182,6 +192,7 @@ impl Storage { } } +/// In-memory storage for the chain the auto seal engine is building. #[derive(Default, Debug)] pub(crate) struct StorageInner { /// Headers buffered for download. @@ -232,4 +243,127 @@ impl StorageInner { self.bodies.insert(self.best_hash, body); self.hash_to_number.insert(self.best_hash, self.best_block); } + + /// Fills in pre-execution header fields based on the current best block and given + /// transactions. + pub(crate) fn build_header_template(&self, transactions: &Vec) -> Header { + // check previous block for base fee + let base_fee_per_gas = + self.headers.get(&self.best_block).and_then(|parent| parent.next_block_base_fee()); + + let mut header = Header { + parent_hash: self.best_hash, + ommers_hash: EMPTY_OMMER_ROOT, + beneficiary: Default::default(), + state_root: Default::default(), + transactions_root: Default::default(), + receipts_root: Default::default(), + withdrawals_root: None, + logs_bloom: Default::default(), + difficulty: U256::from(2), + number: self.best_block + 1, + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + gas_used: 0, + timestamp: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(), + mix_hash: Default::default(), + nonce: 0, + base_fee_per_gas, + extra_data: Default::default(), + }; + + header.transactions_root = if transactions.is_empty() { + EMPTY_TRANSACTIONS + } else { + proofs::calculate_transaction_root(transactions) + }; + + header + } + + /// Executes the block with the given block and senders, on the provided [Executor]. + /// + /// This returns the poststate from execution and post-block changes, as well as the gas used. + pub(crate) fn execute( + &mut self, + block: &Block, + executor: &mut Executor, + senders: Vec
, + ) -> Result<(PostState, u64), BlockExecutionError> { + trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); + + let (post_state, gas_used) = + executor.execute_transactions(block, U256::ZERO, Some(senders.clone()))?; + + // apply post block changes + let post_state = executor.apply_post_block_changes(block, U256::ZERO, post_state)?; + + Ok((post_state, gas_used)) + } + + /// Fills in the post-execution header fields based on the given PostState and gas used. + /// In doing this, the state root is calculated and the final header is returned. + pub(crate) fn complete_header( + &self, + mut header: Header, + post_state: &PostState, + executor: &mut Executor, + gas_used: u64, + ) -> Header { + let receipts = post_state.receipts(header.number); + header.receipts_root = if receipts.is_empty() { + EMPTY_RECEIPTS + } else { + let receipts_with_bloom = + receipts.iter().map(|r| r.clone().into()).collect::>(); + proofs::calculate_receipt_root(&receipts_with_bloom) + }; + + header.gas_used = gas_used; + + // calculate the state root + let state_root = executor.db().db.0.state_root(post_state.clone()).unwrap(); + header.state_root = state_root; + header + } + + /// Builds and executes a new block with the given transactions, on the provided [Executor]. + /// + /// This returns the header of the executed block, as well as the poststate from execution. + pub(crate) fn build_and_execute( + &mut self, + transactions: Vec, + executor: &mut Executor, + ) -> Result<(SealedHeader, PostState), BlockExecutionError> { + let header = self.build_header_template(&transactions); + + let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; + + let senders = + block.body.iter().map(|tx| tx.recover_signer()).collect::>>().ok_or( + BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), + )?; + + trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); + + // now execute the block + let (post_state, gas_used) = self.execute(&block, executor, senders)?; + + let Block { header, body, .. } = block; + let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None }; + + trace!(target: "consensus::auto", ?post_state, ?header, ?body, "executed block, calculating state root and completing header"); + + // fill in the rest of the fields + let header = self.complete_header(header, &post_state, executor, gas_used); + + trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); + + // finally insert into storage + self.insert_new_block(header.clone(), body); + + // set new header with hash that should have been updated by insert_new_block + let new_header = header.seal(self.best_hash); + + Ok((new_header, post_state)) + } } diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 5217661c4cd6..6d77784b1add 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -2,11 +2,7 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_interfaces::consensus::ForkchoiceState; -use reth_primitives::{ - constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, - proofs, Block, BlockBody, ChainSpec, Header, IntoRecoveredTransaction, ReceiptWithBloom, - SealedBlockWithSenders, EMPTY_OMMER_ROOT, U256, -}; +use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_revm::{ database::{State, SubState}, @@ -20,11 +16,10 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::{SystemTime, UNIX_EPOCH}, }; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage pub struct MiningTask { @@ -123,104 +118,28 @@ where this.insert_task = Some(Box::pin(async move { let mut storage = storage.write().await; - // check previous block for base fee - let base_fee_per_gas = storage - .headers - .get(&storage.best_block) - .and_then(|parent| parent.next_block_base_fee()); - - let mut header = Header { - parent_hash: storage.best_hash, - ommers_hash: EMPTY_OMMER_ROOT, - beneficiary: Default::default(), - state_root: Default::default(), - transactions_root: Default::default(), - receipts_root: Default::default(), - withdrawals_root: None, - logs_bloom: Default::default(), - difficulty: U256::from(2), - number: storage.best_block + 1, - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - gas_used: 0, - timestamp: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - mix_hash: Default::default(), - nonce: 0, - base_fee_per_gas, - extra_data: Default::default(), - }; - - let transactions = transactions + let (transactions, senders): (Vec<_>, Vec<_>) = transactions .into_iter() - .map(|tx| tx.to_recovered_transaction().into_signed()) - .collect::>(); - - header.transactions_root = if transactions.is_empty() { - EMPTY_TRANSACTIONS - } else { - proofs::calculate_transaction_root(&transactions) - }; - - let block = - Block { header, body: transactions, ommers: vec![], withdrawals: None }; + .map(|tx| { + let recovered = tx.to_recovered_transaction(); + let signer = recovered.signer(); + (recovered.into_signed(), signer) + }) + .unzip(); // execute the new block let substate = SubState::new(State::new(client.latest().unwrap())); let mut executor = Executor::new(chain_spec, substate); - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - - let senders = block - .body - .iter() - .map(|tx| tx.recover_signer()) - .collect::>>()?; - - match executor.execute_transactions(&block, U256::ZERO, Some(senders.clone())) { - Ok((post_state, gas_used)) => { - // apply post block changes - let post_state = executor - .apply_post_block_changes(&block, U256::ZERO, post_state) - .unwrap(); - - let Block { mut header, body, .. } = block; - + match storage.build_and_execute(transactions.clone(), &mut executor) { + Ok((new_header, post_state)) => { // clear all transactions from pool - pool.remove_transactions(body.iter().map(|tx| tx.hash())); - - let receipts = post_state.receipts(header.number); - header.receipts_root = if receipts.is_empty() { - EMPTY_RECEIPTS - } else { - let receipts_with_bloom = receipts - .iter() - .map(|r| r.clone().into()) - .collect::>(); - proofs::calculate_receipt_root(&receipts_with_bloom) - }; - let transactions = body.clone(); - let body = - BlockBody { transactions: body, ommers: vec![], withdrawals: None }; - header.gas_used = gas_used; - - trace!(target: "consensus::auto", ?post_state, ?header, ?body, "executed block, calculating root"); - - // calculate the state root - let state_root = - executor.db().db.0.state_root(post_state.clone()).unwrap(); - header.state_root = state_root; - - trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); - - storage.insert_new_block(header.clone(), body); + pool.remove_transactions(transactions.iter().map(|tx| tx.hash())); - let new_hash = storage.best_hash; let state = ForkchoiceState { - head_block_hash: new_hash, - finalized_block_hash: new_hash, - safe_block_hash: new_hash, + head_block_hash: new_header.hash, + finalized_block_hash: new_header.hash, + safe_block_hash: new_header.hash, }; drop(storage); @@ -261,7 +180,7 @@ where // seal the block let block = Block { - header: header.clone(), + header: new_header.clone().unseal(), body: transactions, ommers: vec![], withdrawals: None, @@ -273,9 +192,9 @@ where .expect("senders are valid"); // update canon chain for rpc - client.set_canonical_head(header.clone().seal(new_hash)); - client.set_safe(header.clone().seal(new_hash)); - client.set_finalized(header.clone().seal(new_hash)); + client.set_canonical_head(new_header.clone()); + client.set_safe(new_header.clone()); + client.set_finalized(new_header.clone()); debug!(target: "consensus::auto", header=?sealed_block_with_senders.hash(), "sending block notification"); From 0fcb338e256a6f139b10fbbf58e9720de9e413b2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 19 Jul 2023 21:17:29 +0200 Subject: [PATCH 209/722] fix(rpc): trace get is index not trace address (#3852) --- crates/rpc/rpc-api/src/trace.rs | 5 +++++ crates/rpc/rpc/src/trace.rs | 27 +++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index ab4f36758dee..d38af056d79e 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -71,6 +71,11 @@ pub trait TraceApi { async fn trace_filter(&self, filter: TraceFilter) -> RpcResult>; /// Returns transaction trace at given index. + /// + /// `indices` represent the index positions of the traces. + /// + /// Note: This expects a list of indices but only one is supported since this function returns a + /// single [LocalizedTransactionTrace]. #[method(name = "get")] async fn trace_get( &self, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 2fcf2d237b7d..ff2d09e50c73 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -268,17 +268,36 @@ where .await } - /// Returns transaction trace with the given address. + /// Returns transaction trace objects at the given index + /// + /// Note: For compatibility reasons this only supports 1 single index, since this method is + /// supposed to return a single trace. See also: + /// + /// This returns `None` if `indices` is empty pub async fn trace_get( &self, hash: H256, - trace_address: Vec, + indices: Vec, + ) -> EthResult> { + if indices.len() != 1 { + // The OG impl failed if it gets more than a single index + return Ok(None) + } + self.trace_get_index(hash, indices[0]).await + } + + /// Returns transaction trace object at the given index. + /// + /// Returns `None` if the trace object at that index does not exist + pub async fn trace_get_index( + &self, + hash: H256, + index: usize, ) -> EthResult> { match self.trace_transaction(hash).await? { None => Ok(None), Some(traces) => { - let trace = - traces.into_iter().find(|trace| trace.trace.trace_address == trace_address); + let trace = traces.into_iter().nth(index); Ok(trace) } } From 7476120d8170d7269ec8525f173edac995e12385 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Altu=C4=9F=20Bakan?= Date: Wed, 19 Jul 2023 21:26:16 +0200 Subject: [PATCH 210/722] feat: wrap gas limit into a new type (#3841) Co-authored-by: Matthias Seitz --- bin/reth/src/args/rpc_server_args.rs | 6 ++--- crates/rpc/rpc-builder/src/constants.rs | 7 ------ crates/rpc/rpc-builder/src/eth.rs | 4 +-- crates/rpc/rpc/src/eth/api/mod.rs | 33 +++++++++++++++++++++++-- crates/rpc/rpc/src/eth/mod.rs | 2 +- 5 files changed, 37 insertions(+), 15 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 4dbde75cbd41..452f6ba33e2e 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -17,13 +17,13 @@ use reth_rpc::{ DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_ENV_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }, gas_oracle::GasPriceOracleConfig, + RPC_DEFAULT_GAS_CAP, }, JwtError, JwtSecret, }; use reth_rpc_builder::{ auth::{AuthServerConfig, AuthServerHandle}, constants, - constants::RPC_DEFAULT_GAS_CAP, error::RpcError, EthConfig, IpcServerBuilder, RethRpcModule, RpcModuleBuilder, RpcModuleConfig, RpcModuleSelection, RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, @@ -139,7 +139,7 @@ pub struct RpcServerArgs { alias = "rpc.gascap", value_name = "GAS_CAP", value_parser = RangedU64ValueParser::::new().range(1..), - default_value_t = RPC_DEFAULT_GAS_CAP + default_value_t = RPC_DEFAULT_GAS_CAP.into() )] pub rpc_gas_cap: u64, @@ -513,7 +513,7 @@ mod tests { fn test_rpc_gas_cap() { let args = CommandParser::::parse_from(["reth"]).args; let config = args.eth_config(); - assert_eq!(config.rpc_gas_cap, RPC_DEFAULT_GAS_CAP); + assert_eq!(config.rpc_gas_cap, Into::::into(RPC_DEFAULT_GAS_CAP)); let args = CommandParser::::parse_from(["reth", "--rpc.gascap", "1000"]).args; diff --git a/crates/rpc/rpc-builder/src/constants.rs b/crates/rpc/rpc-builder/src/constants.rs index 6768ca62a659..a1b2bc36a82d 100644 --- a/crates/rpc/rpc-builder/src/constants.rs +++ b/crates/rpc/rpc-builder/src/constants.rs @@ -7,13 +7,6 @@ pub const DEFAULT_WS_RPC_PORT: u16 = 8546; /// The default port for the auth server. pub const DEFAULT_AUTH_PORT: u16 = 8551; -/// The default gas limit for eth_call and adjacent calls. -/// -/// This is different from the default to regular 30M block gas limit -/// [ETHEREUM_BLOCK_GAS_LIMIT](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for -/// more complex calls. -pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; - /// The default IPC endpoint #[cfg(windows)] pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc"; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 5b2d6780a531..34ec4989c62b 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,8 +1,8 @@ -use crate::constants::RPC_DEFAULT_GAS_CAP; use reth_rpc::{ eth::{ cache::{EthStateCache, EthStateCacheConfig}, gas_oracle::GasPriceOracleConfig, + RPC_DEFAULT_GAS_CAP, }, EthApi, EthFilter, EthPubSub, }; @@ -51,7 +51,7 @@ impl Default for EthConfig { gas_oracle: GasPriceOracleConfig::default(), max_tracing_requests: DEFAULT_MAX_TRACING_REQUESTS, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, - rpc_gas_cap: RPC_DEFAULT_GAS_CAP, + rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), } } } diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index e68dd3686ef7..de5d12f8d105 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -87,7 +87,7 @@ where network: Network, eth_cache: EthStateCache, gas_oracle: GasPriceOracle, - gas_cap: u64, + gas_cap: impl Into, ) -> Self { Self::with_spawner( provider, @@ -95,7 +95,7 @@ where network, eth_cache, gas_oracle, - gas_cap, + gas_cap.into().into(), Box::::default(), ) } @@ -370,6 +370,35 @@ where } } +/// The default gas limit for eth_call and adjacent calls. +/// +/// This is different from the default to regular 30M block gas limit +/// [ETHEREUM_BLOCK_GAS_LIMIT](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for +/// more complex calls. +pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(50_000_000); + +/// The wrapper type for gas limit +#[derive(Debug, Clone, Copy)] +pub struct GasCap(u64); + +impl Default for GasCap { + fn default() -> Self { + RPC_DEFAULT_GAS_CAP + } +} + +impl From for GasCap { + fn from(gas_cap: u64) -> Self { + Self(gas_cap) + } +} + +impl From for u64 { + fn from(gas_cap: GasCap) -> Self { + gas_cap.0 + } +} + /// Container type `EthApi` struct EthApiInner { /// The transaction pool. diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index bff4361e4868..0a9efc560257 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -12,7 +12,7 @@ pub mod revm_utils; mod signer; pub(crate) mod utils; -pub use api::{EthApi, EthApiSpec, EthTransactions, TransactionSource}; +pub use api::{EthApi, EthApiSpec, EthTransactions, TransactionSource, RPC_DEFAULT_GAS_CAP}; pub use filter::EthFilter; pub use id_provider::EthSubscriptionIdProvider; pub use pubsub::EthPubSub; From 6c151d349943a0c96687b364a18d77bde3873ec6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 19 Jul 2023 22:25:58 +0200 Subject: [PATCH 211/722] chore: bump default max response size (#3850) --- bin/reth/src/args/rpc_server_args.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 452f6ba33e2e..a38cd6994450 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -43,7 +43,9 @@ pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024; /// Default max request size in MB. pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15; /// Default max response size in MB. -pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 100; +/// +/// This is only relevant for very large trace responses. +pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 115; /// Default number of incoming connections. pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; /// Default number of incoming connections. From 815cac7dd88f2cdb1f7fe4f9d69dd6b1ad3d0297 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 20 Jul 2023 01:54:01 +0200 Subject: [PATCH 212/722] chore: reorder fields (#3832) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index fcc5e54bbbb8..f3fa4eb9b1f6 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -264,18 +264,18 @@ pub struct TransactionTrace { pub struct LocalizedTransactionTrace { #[serde(flatten)] pub trace: TransactionTrace, - /// Transaction index within the block, None if pending. - pub transaction_position: Option, - /// Hash of the transaction - pub transaction_hash: Option, - /// Block number the transaction is included in, None if pending. - /// - /// Note: this deviates from which always returns a block number - pub block_number: Option, /// Hash of the block, if not pending /// /// Note: this deviates from which always returns a block number pub block_hash: Option, + /// Block number the transaction is included in, None if pending. + /// + /// Note: this deviates from which always returns a block number + pub block_number: Option, + /// Hash of the transaction + pub transaction_hash: Option, + /// Transaction index within the block, None if pending. + pub transaction_position: Option, } /// A record of a full VM trace for a CALL/CREATE. From 39cd0e071ef1220aef2c54d2d880c4a123e8a638 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 20 Jul 2023 03:10:41 +0200 Subject: [PATCH 213/722] chore(grafana): make instance selector single-value (#3805) --- etc/grafana/dashboards/overview.json | 36 ++++++++++++++-------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 4e11ec203aa3..0ca8bd3d9d13 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -27,7 +27,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "9.5.3" + "version": "10.0.2" }, { "type": "panel", @@ -100,7 +100,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Overview ($instance)", + "title": "Overview", "type": "row" }, { @@ -159,7 +159,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "9.5.3", + "pluginVersion": "10.0.2", "targets": [ { "datasource": { @@ -226,7 +226,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "9.5.3", + "pluginVersion": "10.0.2", "targets": [ { "datasource": { @@ -441,7 +441,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Database ($instance)", + "title": "Database", "type": "row" }, { @@ -609,7 +609,7 @@ "unit": "percentunit" } }, - "pluginVersion": "9.5.3", + "pluginVersion": "10.0.2", "targets": [ { "datasource": { @@ -1006,7 +1006,7 @@ }, "showHeader": true }, - "pluginVersion": "9.5.3", + "pluginVersion": "10.0.2", "targets": [ { "datasource": { @@ -1038,7 +1038,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Stage: Execution ($instance)", + "title": "Stage: Execution", "type": "row" }, { @@ -1179,7 +1179,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Networking ($instance)", + "title": "Networking", "type": "row" }, { @@ -1705,7 +1705,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Downloader: Headers ($instance)", + "title": "Downloader: Headers", "type": "row" }, { @@ -2097,7 +2097,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Downloader: Bodies ($instance)", + "title": "Downloader: Bodies", "type": "row" }, { @@ -2792,7 +2792,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Transaction Pool ($instance)", + "title": "Transaction Pool", "type": "row" }, { @@ -3428,7 +3428,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Blockchain Tree ($instance)", + "title": "Blockchain Tree", "type": "row" }, { @@ -3725,7 +3725,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Engine API ($instance)", + "title": "Engine API", "type": "row" }, { @@ -4031,7 +4031,7 @@ "panels": [], "repeat": "instance", "repeatDirection": "h", - "title": "Payload Builder ($instance)", + "title": "Payload Builder", "type": "row" }, { @@ -4786,8 +4786,8 @@ }, "definition": "query_result(up)", "hide": 0, - "includeAll": true, - "multi": true, + "includeAll": false, + "multi": false, "name": "instance", "options": [], "query": { @@ -4810,6 +4810,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 2, + "version": 3, "weekStart": "" } \ No newline at end of file From fb2d0a22f6c18d2ad48c69a04823bb303f7b7247 Mon Sep 17 00:00:00 2001 From: "Luca G.F" Date: Thu, 20 Jul 2023 22:19:49 +0200 Subject: [PATCH 214/722] docs: add subsection describing build process on arm devices (#3854) Signed-off-by: Luca Georges Francois --- book/SUMMARY.md | 1 + book/installation/build-for-arm-devices.md | 49 ++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 book/installation/build-for-arm-devices.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 8faf1b1ab3be..21a98cbaf6e6 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -5,6 +5,7 @@ 1. [Pre-Built Binaries](./installation/binaries.md) 1. [Docker](./installation/docker.md) 1. [Build from Source](./installation/source.md) + 1. [Build for ARM devices](./installation/build-for-arm-devices.md) 1. [Update Priorities](./installation/priorities.md) 1. [Run a Node](./run/run-a-node.md) 1. [Mainnet or official testnets](./run/mainnet.md) diff --git a/book/installation/build-for-arm-devices.md b/book/installation/build-for-arm-devices.md new file mode 100644 index 000000000000..ac95f16939f8 --- /dev/null +++ b/book/installation/build-for-arm-devices.md @@ -0,0 +1,49 @@ +# Building for ARM devices + +Reth can be build for and run on ARM devices, but there are a few things to take into considerations before. + +## CPU Architecture + +First, you must have a 64-bit CPU and Operating System, otherwise some of the project dependencies will not be able to compile or be executed. + +## Memory Layout on AArch64 + +Then, you must setup the virtual memory layout in such a way that the user space is sufficiently large. +From [the Linux Kernel documentation](https://www.kernel.org/doc/html/v5.3/arm64/memory.html#:~:text=AArch64%20Linux%20uses%20either%203,for%20both%20user%20and%20kernel.), you can see that the memory layout with 4KB pages and a level-3 translation table limits the user space to 512GB, which is too low for Reth to sync on Ethereum mainnet. + +## Build Reth + +If both your CPU architecture and the memory layout are valid, the instructions for building Reth will not differ from [the standard process](https://paradigmxyz.github.io/reth/installation/source.html). + +## Troubleshooting + +> If you ever need to recompile the Linux Kernel because the official OS images for your ARM board don't have the right memory layout configuration, you can use [the Armbian build framework](https://github.com/armbian/build). + +### Failed to open database + +> This error is documented [here](https://github.com/paradigmxyz/reth/issues/2211). + +This error is raised whenever MBDX can not open a database due to the limitations imposed by the memory layout of your kernel. If the user space is limited to 512GB, the database will not be able to grow below this size. + +You will need to recompile the Linux Kernel to fix the issue. + +A simple and safe approach to achieve this is to use the Armbian build framework to create a new image of the OS that will be flashed to a storage device of your choice - an SD card for example - with the following kernel feature values: +- **Page Size**: 64 KB +- **Virtual Address Space Size**: 48 Bits + +To be able to build an Armbian image and set those values, you will need to: +- Clone the Armbian build framework repository +```shell +git clone https://github.com/armbian/build +cd build +``` +- Run the compile script with the following parameters: +```shell +./compile.sh \ +BUILD_MINIMAL=yes \ +BUILD_DESKTOP=no \ +KERNEL_CONFIGURE=yes \ +CARD_DEVICE="/dev/sdX" # Replace sdX with your own storage device +``` +- From there, you will be able to select the target board, the OS release and branch. Then, once you get in the **Kernel Configuration** screen, select the **Kernel Features options** and set the previous values accordingly. +- Wait for the process to finish, plug your storage device into your board and start it. You can now download or install Reth and it should work properly. From 8720a5189b0d3fe7b8d811893d5c15d690e20c0f Mon Sep 17 00:00:00 2001 From: N Date: Thu, 20 Jul 2023 16:24:43 -0400 Subject: [PATCH 215/722] docs: prometheus defaults to target itself at 9090 (#3792) Co-authored-by: N --- book/run/observability.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/book/run/observability.md b/book/run/observability.md index 9eb3e99a5e8d..4ab2951805e4 100644 --- a/book/run/observability.md +++ b/book/run/observability.md @@ -43,11 +43,19 @@ brew services start prometheus brew services start grafana ``` -This will start a Prometheus service which by default scrapes the metrics exposed at `localhost:9001`. If you launched reth with a different `--metrics` endpoint, you can change the Prometheus config file at `/usr/local/etc/prometheus/prometheus.yml` to point to the correct endpoint, and then restart the Prometheus service. You can also stop the service and launch Prometheus with a custom `prometheus.yml` like the one provided under [`etc/prometheus/prometheus.yml`](https://github.com/paradigmxyz/reth/blob/main/etc/prometheus/prometheus.yml) in this repo. +This will start a Prometheus service which [by default scrapes itself about the current instance](https://prometheus.io/docs/introduction/first_steps/#:~:text=The%20job%20contains%20a%20single,%3A%2F%2Flocalhost%3A9090%2Fmetrics.). So you'll need to change its config to hit your Reth nodes metrics endpoint at `localhost:9001` which you set using the `--metrics` flag. + +You can find an example config for the Prometheus service in the repo here: [`etc/prometheus/prometheus.yml`](https://github.com/paradigmxyz/reth/blob/main/etc/prometheus/prometheus.yml) + +Depending on your installation you may find the config for your Prometheus service at: + +- OSX: `/opt/homebrew/etc/prometheus.yml` +- Linuxbrew: `/home/linuxbrew/.linuxbrew/etc/prometheus.yml` +- Others: `/usr/local/etc/prometheus/prometheus.yml` Next, open up "localhost:3000" in your browser, which is the default URL for Grafana. Here, "admin" is the default for both the username and password. -Once you've logged in, click on the gear icon in the lower left, and select "Data Sources". Click on "Add data source", and select "Prometheus" as the type. In the HTTP URL field, enter `http://localhost:9090`, this is the default endpoint for the Prometheus scrape endpoint. Finally, click "Save & Test". +Once you've logged in, click on the gear icon in the lower left, and select "Data Sources". Click on "Add data source", and select "Prometheus" as the type. In the HTTP URL field, enter `http://localhost:9090`. Finally, click "Save & Test". As this might be a point of confusion, `localhost:9001`, which we supplied to `--metrics`, is the endpoint that Reth exposes, from which Prometheus collects metrics. Prometheus then exposes `localhost:9090` (by default) for other services (such as Grafana) to consume Prometheus metrics. From 0b012ab6640f7627a0f9fe11ea9eec10d479de72 Mon Sep 17 00:00:00 2001 From: ControlCplusControlV <44706811+ControlCplusControlV@users.noreply.github.com> Date: Thu, 20 Jul 2023 14:25:24 -0600 Subject: [PATCH 216/722] docs: describe windows dependencies for source builds (#3786) Co-authored-by: Bjerg --- book/installation/source.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/book/installation/source.md b/book/installation/source.md index a4448482d421..a76326ba5f32 100644 --- a/book/installation/source.md +++ b/book/installation/source.md @@ -1,6 +1,6 @@ # Build from Source -You can build Reth on Linux, macOS, and Windows WSL2. +You can build Reth on Linux, macOS, Windows, and Windows WSL2. > **Note** > @@ -28,6 +28,7 @@ operating system: - **Ubuntu**: `apt-get install libclang-dev pkg-config build-essential` - **macOS**: `brew install llvm pkg-config` +- **Windows**: `choco install llvm` or `winget install LLVM.LLVM` These are needed to build bindings for Reth's database. From 95cbff31f950e29f7145bf5f41afe6971a79934a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 24 Jul 2023 11:30:11 +0200 Subject: [PATCH 217/722] ci: add another it partition (#3871) --- .github/workflows/integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 7bb68ccac4e6..abc92e192491 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - partition: [1, 2] + partition: [1, 2, 3] steps: - name: Checkout sources uses: actions/checkout@v3 From c878a9f489e5ec7a97e99691d9ef6e22826a97b8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 24 Jul 2023 13:08:34 +0300 Subject: [PATCH 218/722] chore(txpool): move basefee to u64 (#3872) Co-authored-by: Matthias Seitz --- crates/payload/basic/src/lib.rs | 2 +- crates/primitives/src/constants.rs | 4 ++-- crates/primitives/src/transaction/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 2 +- crates/transaction-pool/src/lib.rs | 2 +- crates/transaction-pool/src/maintain.rs | 7 +++--- crates/transaction-pool/src/noop.rs | 2 +- crates/transaction-pool/src/pool/best.rs | 4 ++-- crates/transaction-pool/src/pool/mod.rs | 2 +- crates/transaction-pool/src/pool/parked.rs | 18 +++++++------- crates/transaction-pool/src/pool/pending.rs | 12 +++++----- crates/transaction-pool/src/pool/txpool.rs | 24 +++++++++---------- .../transaction-pool/src/test_utils/mock.rs | 4 ++-- crates/transaction-pool/src/traits.rs | 6 ++--- crates/transaction-pool/src/validate/mod.rs | 2 +- 15 files changed, 46 insertions(+), 47 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 2d598a270268..84da180060da 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -581,7 +581,7 @@ fn build_payload( let base_fee = initialized_block_env.basefee.to::(); let mut executed_txs = Vec::new(); - let mut best_txs = pool.best_transactions_with_base_fee(base_fee as u128); + let mut best_txs = pool.best_transactions_with_base_fee(base_fee); let mut total_fees = U256::ZERO; diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants.rs index 005d69380ab2..8530a2bc67c8 100644 --- a/crates/primitives/src/constants.rs +++ b/crates/primitives/src/constants.rs @@ -39,7 +39,7 @@ pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; /// The `BASE_FEE_MAX_CHANGE_DENOMINATOR` is `8`, or 12.5%. /// Once the base fee has dropped to `7` WEI it cannot decrease further because 12.5% of 7 is less /// than 1. -pub const MIN_PROTOCOL_BASE_FEE: u128 = 7; +pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; /// Same as [MIN_PROTOCOL_BASE_FEE] but as a U256. pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); @@ -114,6 +114,6 @@ mod tests { #[test] fn min_protocol_sanity() { - assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); + assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c0371aa2a849..a99ae609fe4a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -462,7 +462,7 @@ impl Transaction { /// /// This is different than the `max_priority_fee_per_gas` method, which returns `None` for /// non-EIP-1559 transactions. - pub(crate) fn priority_fee_or_price(&self) -> u128 { + pub fn priority_fee_or_price(&self) -> u128 { match self { Transaction::Legacy(TxLegacy { gas_price, .. }) | Transaction::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index a427df3279a1..b5b8bc89c410 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -50,7 +50,7 @@ impl PendingBlockEnv { let block_number = block_env.number.to::(); let mut executed_txs = Vec::new(); - let mut best_txs = pool.best_transactions_with_base_fee(base_fee as u128); + let mut best_txs = pool.best_transactions_with_base_fee(base_fee); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 93ebe6fd1a08..d4c25f047e75 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -399,7 +399,7 @@ where fn best_transactions_with_base_fee( &self, - base_fee: u128, + base_fee: u64, ) -> Box>>> { self.pool.best_transactions_with_base_fee(base_fee) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 1b4084c0f304..22cc62ddaab3 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -83,7 +83,7 @@ pub async fn maintain_transaction_pool( let info = BlockInfo { last_seen_block_hash: latest.hash, last_seen_block_number: latest.number, - pending_basefee: latest.next_block_base_fee().unwrap_or_default() as u128, + pending_basefee: latest.next_block_base_fee().unwrap_or_default(), }; pool.set_block_info(info); } @@ -205,8 +205,7 @@ pub async fn maintain_transaction_pool( } // base fee for the next block: `new_tip+1` - let pending_block_base_fee = - new_tip.next_block_base_fee().unwrap_or_default() as u128; + let pending_block_base_fee = new_tip.next_block_base_fee().unwrap_or_default(); // we know all changed account in the new chain let new_changed_accounts: HashSet<_> = @@ -282,7 +281,7 @@ pub async fn maintain_transaction_pool( let tip = blocks.tip(); // base fee for the next block: `tip+1` - let pending_block_base_fee = tip.next_block_base_fee().unwrap_or_default() as u128; + let pending_block_base_fee = tip.next_block_base_fee().unwrap_or_default(); let first_block = blocks.first(); trace!( diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index c03a475e50a7..d935e62c1b2a 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -112,7 +112,7 @@ impl TransactionPool for NoopTransactionPool { fn best_transactions_with_base_fee( &self, - _: u128, + _: u64, ) -> Box>>> { Box::new(std::iter::empty()) } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index fc6ff5595666..071063aeda5d 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -18,7 +18,7 @@ use tracing::debug; /// This iterator guarantees that all transaction it returns satisfy the base fee. pub(crate) struct BestTransactionsWithBasefee { pub(crate) best: BestTransactions, - pub(crate) base_fee: u128, + pub(crate) base_fee: u64, } impl crate::traits::BestTransactions for BestTransactionsWithBasefee { @@ -34,7 +34,7 @@ impl Iterator for BestTransactionsWithBasefee { // find the next transaction that satisfies the base fee loop { let best = self.best.next()?; - if best.transaction.max_fee_per_gas() < self.base_fee { + if best.transaction.max_fee_per_gas() < self.base_fee as u128 { // tx violates base fee, mark it as invalid and continue crate::traits::BestTransactions::mark_invalid(self, &best); } else { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index d3d3c5ca9aa3..d15596baa2f9 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -467,7 +467,7 @@ where /// the given base fee. pub(crate) fn best_transactions_with_base_fee( &self, - base_fee: u128, + base_fee: u64, ) -> Box>>> { self.pool.read().best_transactions_with_base_fee(base_fee) diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 0916886d130c..1965fe8c8c11 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -115,7 +115,7 @@ impl ParkedPool> { /// Note: this does _not_ remove the transactions pub(crate) fn satisfy_base_fee_transactions( &self, - basefee: u128, + basefee: u64, ) -> Vec>> { let ids = self.satisfy_base_fee_ids(basefee); let mut txs = Vec::with_capacity(ids.len()); @@ -126,13 +126,13 @@ impl ParkedPool> { } /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { + fn satisfy_base_fee_ids(&self, basefee: u64) -> Vec { let mut transactions = Vec::new(); { let mut iter = self.by_id.iter().peekable(); while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { + if tx.transaction.transaction.max_fee_per_gas() < basefee as u128 { // still parked -> skip descendant transactions 'this: while let Some((peek, _)) = iter.peek() { if peek.sender != id.sender { @@ -152,7 +152,7 @@ impl ParkedPool> { /// satisfy the given basefee. /// /// Note: the transactions are not returned in a particular order. - pub(crate) fn enforce_basefee(&mut self, basefee: u128) -> Vec>> { + pub(crate) fn enforce_basefee(&mut self, basefee: u64) -> Vec>> { let to_remove = self.satisfy_base_fee_ids(basefee); let mut removed = Vec::with_capacity(to_remove.len()); @@ -330,10 +330,10 @@ mod tests { assert!(pool.by_id.contains_key(tx.id())); assert_eq!(pool.len(), 1); - let removed = pool.enforce_basefee(u128::MAX); + let removed = pool.enforce_basefee(u64::MAX); assert!(removed.is_empty()); - let removed = pool.enforce_basefee(tx.max_fee_per_gas() - 1); + let removed = pool.enforce_basefee((tx.max_fee_per_gas() - 1) as u64); assert_eq!(removed.len(), 1); assert!(pool.is_empty()); } @@ -353,14 +353,14 @@ mod tests { assert!(pool.by_id.contains_key(descendant_tx.id())); assert_eq!(pool.len(), 2); - let removed = pool.enforce_basefee(u128::MAX); + let removed = pool.enforce_basefee(u64::MAX); assert!(removed.is_empty()); // two dependent tx in the pool with decreasing fee { let mut pool2 = pool.clone(); - let removed = pool2.enforce_basefee(descendant_tx.max_fee_per_gas()); + let removed = pool2.enforce_basefee(descendant_tx.max_fee_per_gas() as u64); assert_eq!(removed.len(), 1); assert_eq!(pool2.len(), 1); // descendant got popped @@ -369,7 +369,7 @@ mod tests { } // remove root transaction via root tx fee - let removed = pool.enforce_basefee(root_tx.max_fee_per_gas()); + let removed = pool.enforce_basefee(root_tx.max_fee_per_gas() as u64); assert_eq!(removed.len(), 2); assert!(pool.is_empty()); } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 71b873a3022f..dd77997dcc77 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -86,7 +86,7 @@ impl PendingPool { } /// Same as `best` but only returns transactions that satisfy the given basefee. - pub(crate) fn best_with_basefee(&self, base_fee: u128) -> BestTransactionsWithBasefee { + pub(crate) fn best_with_basefee(&self, base_fee: u64) -> BestTransactionsWithBasefee { BestTransactionsWithBasefee { best: self.best(), base_fee } } @@ -135,14 +135,14 @@ impl PendingPool { /// Note: the transactions are not returned in a particular order. pub(crate) fn enforce_basefee( &mut self, - basefee: u128, + basefee: u64, ) -> Vec>> { let mut to_remove = Vec::new(); { let mut iter = self.by_id.iter().peekable(); while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { + if tx.transaction.transaction.max_fee_per_gas() < basefee as u128 { // this transaction no longer satisfies the basefee: remove it and all its // descendants to_remove.push(*id); @@ -346,7 +346,7 @@ mod tests { let removed = pool.enforce_basefee(0); assert!(removed.is_empty()); - let removed = pool.enforce_basefee(tx.max_fee_per_gas() + 1); + let removed = pool.enforce_basefee((tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 1); assert!(pool.is_empty()); } @@ -375,7 +375,7 @@ mod tests { { let mut pool2 = pool.clone(); - let removed = pool2.enforce_basefee(descendant_tx.max_fee_per_gas() + 1); + let removed = pool2.enforce_basefee((descendant_tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 1); assert_eq!(pool2.len(), 1); // descendant got popped @@ -384,7 +384,7 @@ mod tests { } // remove root transaction via fee - let removed = pool.enforce_basefee(root_tx.max_fee_per_gas() + 1); + let removed = pool.enforce_basefee((root_tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 2); assert!(pool.is_empty()); } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index e3820ee65575..cd0d607387d9 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -142,7 +142,7 @@ impl TxPool { /// /// Depending on the change in direction of the basefee, this will promote or demote /// transactions from the basefee pool. - fn update_basefee(&mut self, pending_basefee: u128) { + fn update_basefee(&mut self, pending_basefee: u64) { match pending_basefee.cmp(&self.all_transactions.pending_basefee) { Ordering::Equal => { // fee unchanged, nothing to update @@ -195,7 +195,7 @@ impl TxPool { /// the given base fee. pub(crate) fn best_transactions_with_base_fee( &self, - basefee: u128, + basefee: u64, ) -> Box>>> { match basefee.cmp(&self.all_transactions.pending_basefee) { @@ -649,7 +649,7 @@ pub(crate) struct AllTransactions { /// Minimum base fee required by the protocol. /// /// Transactions with a lower base fee will never be included by the chain - minimal_protocol_basefee: u128, + minimal_protocol_basefee: u64, /// The max gas limit of the block block_gas_limit: u64, /// Max number of executable transaction slots guaranteed per account @@ -665,7 +665,7 @@ pub(crate) struct AllTransactions { /// The current block hash the pool keeps track of. last_seen_block_hash: H256, /// Expected base fee for the pending block. - pending_basefee: u128, + pending_basefee: u64, } impl AllTransactions { @@ -812,7 +812,7 @@ impl AllTransactions { tx.state.insert(TxState::NO_PARKED_ANCESTORS); // Update the first transaction of this sender. - Self::update_tx_base_fee(&self.pending_basefee, tx); + Self::update_tx_base_fee(self.pending_basefee, tx); // Track if the transaction's sub-pool changed. Self::record_subpool_update(&mut updates, tx); @@ -858,7 +858,7 @@ impl AllTransactions { has_parked_ancestor = !tx.state.is_pending(); // Update and record sub-pool changes. - Self::update_tx_base_fee(&self.pending_basefee, tx); + Self::update_tx_base_fee(self.pending_basefee, tx); Self::record_subpool_update(&mut updates, tx); // Advance iterator @@ -887,9 +887,9 @@ impl AllTransactions { } /// Rechecks the transaction's dynamic fee condition. - fn update_tx_base_fee(pending_block_base_fee: &u128, tx: &mut PoolInternalTransaction) { + fn update_tx_base_fee(pending_block_base_fee: u64, tx: &mut PoolInternalTransaction) { // Recheck dynamic fee condition. - match tx.transaction.max_fee_per_gas().cmp(pending_block_base_fee) { + match tx.transaction.max_fee_per_gas().cmp(&(pending_block_base_fee as u128)) { Ordering::Greater | Ordering::Equal => { tx.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); } @@ -1070,10 +1070,10 @@ impl AllTransactions { // Check dynamic fee let fee_cap = transaction.max_fee_per_gas(); - if fee_cap < self.minimal_protocol_basefee { + if fee_cap < self.minimal_protocol_basefee as u128 { return Err(InsertErr::FeeCapBelowMinimumProtocolFeeCap { transaction, fee_cap }) } - if fee_cap >= self.pending_basefee { + if fee_cap >= self.pending_basefee as u128 { state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); } @@ -1570,7 +1570,7 @@ mod tests { let first_in_pool = pool.get(first.id()).unwrap(); - assert!(tx.get_gas_price() < pool.pending_basefee); + assert!(tx.get_gas_price() < pool.pending_basefee as u128); // has nonce gap assert!(!first_in_pool.state.contains(TxState::NO_NONCE_GAPS)); @@ -1671,7 +1671,7 @@ mod tests { assert_eq!(pool.pending_pool.len(), 1); - pool.update_basefee(tx.max_fee_per_gas() + 1); + pool.update_basefee((tx.max_fee_per_gas() + 1) as u64); assert!(pool.pending_pool.is_empty()); assert_eq!(pool.basefee_pool.len(), 1); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 13ec9b5af010..62be5c66ef11 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -131,8 +131,8 @@ impl MockTransaction { hash: H256::random(), sender: Address::random(), nonce: 0, - max_fee_per_gas: MIN_PROTOCOL_BASE_FEE, - max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE, + max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, + max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, to: TransactionKind::Call(Address::random()), value: Default::default(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d44c25f1b521..9d3319832ab2 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -181,7 +181,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: Block production fn best_transactions_with_base_fee( &self, - base_fee: u128, + base_fee: u64, ) -> Box>>>; /// Returns all transactions that can be included in the next block. @@ -397,7 +397,7 @@ pub struct CanonicalStateUpdate { /// EIP-1559 Base fee of the _next_ (pending) block /// /// The base fee of a block depends on the utilization of the last block and its base fee. - pub pending_block_base_fee: u128, + pub pending_block_base_fee: u64, /// A set of changed accounts across a range of blocks. pub changed_accounts: Vec, /// All mined transactions in the block range. @@ -677,7 +677,7 @@ pub struct BlockInfo { /// /// Note: this is the derived base fee of the _next_ block that builds on the clock the pool is /// currently tracking. - pub pending_basefee: u128, + pub pending_basefee: u64, } /// A Stream that yields full transactions the subpool diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index d2f95de1da13..2e96099f9354 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -164,7 +164,7 @@ impl ValidPoolTransaction { self.transaction.cost() } - /// Returns the gas cost for this transaction. + /// Returns the effective tip for this transaction. /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit`. /// For legacy transactions: `gas_price * gas_limit`. From 15781beda869c5db682bb408d8228b90045d3471 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 24 Jul 2023 12:09:05 +0200 Subject: [PATCH 219/722] fix: convert empty topic vec to vec None (#3856) --- crates/rpc/rpc-types/src/eth/filter.rs | 51 +++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/filter.rs b/crates/rpc/rpc-types/src/eth/filter.rs index 13af9216b483..d4b6365ccc1c 100644 --- a/crates/rpc/rpc-types/src/eth/filter.rs +++ b/crates/rpc/rpc-types/src/eth/filter.rs @@ -379,7 +379,13 @@ impl Filter { ValueOrArray::Value(s) => { vec![*s] } - ValueOrArray::Array(s) => s.clone(), + ValueOrArray::Array(s) => { + if s.is_empty() { + vec![None] + } else { + s.clone() + } + } } } else { vec![None] @@ -1007,6 +1013,49 @@ mod tests { serde_json::to_value(t).expect("Failed to serialize value") } + #[test] + fn test_empty_filter_topics_list() { + let s = r#"{"fromBlock": "0xfc359e", "toBlock": "0xfc359e", "topics": [["0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925"], [], ["0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778"]]}"#; + let filter = serde_json::from_str::(s).unwrap(); + similar_asserts::assert_eq!( + filter.topics, + [ + Some(ValueOrArray::Array(vec![Some( + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" + .parse() + .unwrap() + ),])), + Some(ValueOrArray::Array(vec![])), + Some(ValueOrArray::Array(vec![Some( + "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778" + .parse() + .unwrap() + )])), + None + ] + ); + + let filtered_params = FilteredParams::new(Some(filter)); + let topics = filtered_params.flat_topics; + assert_eq!( + topics, + vec![ValueOrArray::Array(vec![ + Some( + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" + .parse() + .unwrap() + ), + None, + Some( + "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778" + .parse() + .unwrap() + ), + None + ])] + ) + } + #[test] fn can_serde_value_or_array() { #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] From 62f3af67434713420e185956a005482c8017f273 Mon Sep 17 00:00:00 2001 From: pistomat Date: Mon, 24 Jul 2023 12:47:40 +0200 Subject: [PATCH 220/722] fix(autoseal): calculate logs bloom for block headers (#3869) Co-authored-by: Matthias Seitz --- crates/consensus/auto-seal/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index c9f41a010c6b..76bf16e28fd3 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -27,7 +27,7 @@ use reth_interfaces::{ }; use reth_primitives::{ constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, - proofs, Address, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, + proofs, Address, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Bloom, ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, EMPTY_OMMER_ROOT, H256, U256, }; @@ -292,7 +292,7 @@ impl StorageInner { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); let (post_state, gas_used) = - executor.execute_transactions(block, U256::ZERO, Some(senders.clone()))?; + executor.execute_transactions(block, U256::ZERO, Some(senders))?; // apply post block changes let post_state = executor.apply_post_block_changes(block, U256::ZERO, post_state)?; @@ -315,6 +315,8 @@ impl StorageInner { } else { let receipts_with_bloom = receipts.iter().map(|r| r.clone().into()).collect::>(); + header.logs_bloom = + receipts_with_bloom.iter().fold(Bloom::zero(), |bloom, r| bloom | r.bloom); proofs::calculate_receipt_root(&receipts_with_bloom) }; From 5e5bde88a9c26ea5ad9c0e470cc9f5012f198134 Mon Sep 17 00:00:00 2001 From: Paolo Facchinetti <51409747+paolofacchinetti@users.noreply.github.com> Date: Mon, 24 Jul 2023 13:35:32 +0200 Subject: [PATCH 221/722] docs: add 30303 port to plain docker example (#3877) --- book/installation/docker.md | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/book/installation/docker.md b/book/installation/docker.md index 95ddb86b450e..44cd711567d5 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -64,6 +64,8 @@ docker run \ -v rethdata:/root/.local/share/reth/db \ -d \ -p 9001:9001 \ + -p 30303:30303 \ + -p 30303:30303/udp \ --name reth \ reth:local \ node \ @@ -71,6 +73,7 @@ docker run \ ``` The above command will create a container named `reth` and a named volume called `rethdata` for data persistence. +It will also expose the `30303` port (TCP and UDP) for peering with other nodes and the `9001` port for metrics. It will use the local image `reth:local`. If you want to use the GitHub Container Registry remote image, use `ghcr.io/paradigmxyz/reth` with your preferred tag. @@ -121,14 +124,4 @@ docker exec -it reth bash **If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command** -### Listing the tables - -```bash -reth db stats -``` - -### Viewing some records - -```bash -reth db list --start=1 --len=2 Headers -``` \ No newline at end of file +Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. \ No newline at end of file From 08403c80e210187f1f8e9c9c3d686a789dc73c4a Mon Sep 17 00:00:00 2001 From: Resende <17102689+ZePedroResende@users.noreply.github.com> Date: Mon, 24 Jul 2023 13:12:45 +0100 Subject: [PATCH 222/722] feat(rpc): ots_getApiLevel implementation (#3861) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/tests/it/http.rs | 92 ++++++++++++++++++++++++- crates/rpc/rpc/src/otterscan.rs | 4 +- 2 files changed, 94 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 221b2913ddab..997bf0ef9e14 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -14,7 +14,7 @@ use reth_primitives::{ }; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, - DebugApiClient, NetApiClient, TraceApiClient, Web3ApiClient, + DebugApiClient, NetApiClient, OtterscanClient, TraceApiClient, Web3ApiClient, }; use reth_rpc_builder::RethRpcModule; use reth_rpc_types::{trace::filter::TraceFilter, CallRequest, Index, TransactionRequest}; @@ -180,6 +180,69 @@ where Web3ApiClient::sha3(client, Bytes::default()).await.unwrap(); } +async fn test_basic_otterscan_calls(client: &C) +where + C: ClientT + SubscriptionClientT + Sync, +{ + let address = Address::default(); + let sender = Address::default(); + let tx_hash = TxHash::default(); + let block_number = BlockNumberOrTag::default(); + let page_number = 1; + let page_size = 10; + let nonce = 1; + let block_hash = H256::default(); + + assert!(is_unimplemented( + OtterscanClient::has_code(client, address, None).await.err().unwrap() + )); + + OtterscanClient::get_api_level(client).await.unwrap(); + + assert!(is_unimplemented( + OtterscanClient::get_internal_operations(client, tx_hash).await.err().unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_transaction_error(client, tx_hash).await.err().unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::trace_transaction(client, tx_hash).await.err().unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_block_details(client, block_number,).await.err().unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_block_details_by_hash(client, block_hash).await.err().unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_block_transactions(client, block_number, page_number, page_size,) + .await + .err() + .unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::search_transactions_before(client, address, block_number, page_size,) + .await + .err() + .unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::search_transactions_after(client, address, block_number, page_size,) + .await + .err() + .unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_transaction_by_sender_and_nonce(client, sender, nonce,) + .await + .err() + .unwrap() + )); + assert!(is_unimplemented( + OtterscanClient::get_contract_creator(client, address).await.err().unwrap() + )); +} + #[tokio::test(flavor = "multi_thread")] async fn test_call_admin_functions_http() { reth_tracing::init_test_tracing(); @@ -341,3 +404,30 @@ async fn test_call_web3_functions_http_and_ws() { let client = handle.http_client().unwrap(); test_basic_web3_calls(&client).await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_call_otterscan_functions_http() { + reth_tracing::init_test_tracing(); + + let handle = launch_http(vec![RethRpcModule::Ots]).await; + let client = handle.http_client().unwrap(); + test_basic_otterscan_calls(&client).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_call_otterscan_functions_ws() { + reth_tracing::init_test_tracing(); + + let handle = launch_ws(vec![RethRpcModule::Ots]).await; + let client = handle.ws_client().await.unwrap(); + test_basic_otterscan_calls(&client).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_call_otterscan_functions_http_and_ws() { + reth_tracing::init_test_tracing(); + + let handle = launch_http_ws(vec![RethRpcModule::Ots]).await; + let client = handle.http_client().unwrap(); + test_basic_otterscan_calls(&client).await; +} diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 93d0ba167e24..da895c6936f2 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -9,6 +9,8 @@ use reth_rpc_types::{ Transaction, TransactionsWithReceipts, }; +const API_LEVEL: u64 = 8; + /// Otterscan Api #[derive(Debug)] pub struct OtterscanApi { @@ -34,7 +36,7 @@ where /// Handler for `ots_getApiLevel` async fn get_api_level(&self) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + Ok(API_LEVEL) } /// Handler for `ots_getInternalOperations` From b3c8a98a6248dfd3b894610b5d6989eb8b707a88 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 24 Jul 2023 15:49:54 +0100 Subject: [PATCH 223/722] chore(ci): build tests in release and filter features (#3885) --- .github/workflows/integration.yml | 2 +- .github/workflows/unit.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index abc92e192491..094134617d4b 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -52,7 +52,7 @@ jobs: - name: Run tests run: | cargo llvm-cov nextest --lcov --output-path lcov.info \ - --locked --workspace --all-features \ + --release --locked --all-features --workspace --exclude examples --exclude ef-tests \ --partition hash:${{ matrix.partition }}/${{ strategy.job-total }} \ -E 'kind(test)' diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index d0aa7243eed7..92998e7d99ac 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -40,7 +40,7 @@ jobs: - name: Run tests run: | cargo llvm-cov nextest --lcov --output-path lcov.info \ - --locked --workspace --all-features \ + --release --locked --all-features --workspace --exclude examples --exclude ef-tests \ --partition hash:${{ matrix.partition }}/${{ strategy.job-total }} \ -E 'kind(lib)' -E 'kind(bin)' -E 'kind(proc-macro)' From cb0947d7cbe57e4b8ea59aa040d2cda72bbfe997 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 24 Jul 2023 18:11:41 +0200 Subject: [PATCH 224/722] ci: use large runners (#3888) --- .github/workflows/fuzz.yml | 3 ++- .github/workflows/hive.yml | 6 ++++-- .github/workflows/integration.yml | 11 +++++++---- .github/workflows/unit.yml | 11 +++++++---- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 8f2760024bdd..ebe22fedc72d 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -18,7 +18,8 @@ jobs: # Skip the Fuzzing Jobs until we make them run fast and reliably. Currently they will # always recompile the codebase for each test and that takes way too long. if: false - runs-on: ubuntu-20.04 + runs-on: + group: Reth strategy: matrix: target: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 20def9eed9c7..b564803e4e41 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -14,7 +14,8 @@ concurrency: name: hive jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: + group: Reth steps: - name: Checkout sources uses: actions/checkout@v3 @@ -104,7 +105,8 @@ jobs: fail-fast: false needs: prepare name: run - runs-on: ubuntu-20.04 + runs-on: + group: Reth steps: - name: Download artifacts diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 094134617d4b..07fe9854892d 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -18,7 +18,8 @@ name: integration jobs: test: name: test (partition ${{ matrix.partition }}/${{ strategy.job-total }}) - runs-on: ubuntu-20.04 + runs-on: + group: Reth strategy: matrix: partition: [1, 2, 3] @@ -52,7 +53,7 @@ jobs: - name: Run tests run: | cargo llvm-cov nextest --lcov --output-path lcov.info \ - --release --locked --all-features --workspace --exclude examples --exclude ef-tests \ + --locked --all-features --workspace --exclude examples --exclude ef-tests \ --partition hash:${{ matrix.partition }}/${{ strategy.job-total }} \ -E 'kind(test)' @@ -67,7 +68,8 @@ jobs: name: sync / 100k blocks # Only run sync tests in merge groups if: github.event_name == 'merge_group' - runs-on: ubuntu-20.04 + runs-on: + group: Reth env: RUST_LOG: info,sync=error steps: @@ -95,7 +97,8 @@ jobs: integration-success: if: always() name: integration success - runs-on: ubuntu-20.04 + runs-on: + group: Reth needs: [test] steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 92998e7d99ac..6c6e55bf9c9a 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -17,7 +17,8 @@ name: unit jobs: test: name: test (partition ${{ matrix.partition }}/${{ strategy.job-total }}) - runs-on: ubuntu-20.04 + runs-on: + group: Reth strategy: matrix: partition: [1, 2, 3, 4, 5] @@ -40,7 +41,7 @@ jobs: - name: Run tests run: | cargo llvm-cov nextest --lcov --output-path lcov.info \ - --release --locked --all-features --workspace --exclude examples --exclude ef-tests \ + --locked --all-features --workspace --exclude examples --exclude ef-tests \ --partition hash:${{ matrix.partition }}/${{ strategy.job-total }} \ -E 'kind(lib)' -E 'kind(bin)' -E 'kind(proc-macro)' @@ -53,7 +54,8 @@ jobs: eth-blockchain: name: ethereum / state tests (stable) - runs-on: ubuntu-20.04 + runs-on: + group: Reth env: RUST_LOG: info,sync=error steps: @@ -85,7 +87,8 @@ jobs: doc-test: name: rustdoc - runs-on: ubuntu-20.04 + runs-on: + group: Reth steps: - uses: actions/checkout@v3 - name: Install toolchain From 3ff9be6d4fc9ac96a59f313d1e75a8c693b35b59 Mon Sep 17 00:00:00 2001 From: Resende <17102689+ZePedroResende@users.noreply.github.com> Date: Mon, 24 Jul 2023 16:44:07 +0100 Subject: [PATCH 225/722] feat(rpc): ots_hasCode implementation (#3886) Co-authored-by: Miguel Palhas Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/tests/it/http.rs | 4 +--- crates/rpc/rpc/src/otterscan.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 997bf0ef9e14..09dbc52cb254 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -193,9 +193,7 @@ where let nonce = 1; let block_hash = H256::default(); - assert!(is_unimplemented( - OtterscanClient::has_code(client, address, None).await.err().unwrap() - )); + OtterscanClient::has_code(client, address, None).await.unwrap(); OtterscanClient::get_api_level(client).await.unwrap(); diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index da895c6936f2..767b3657ad79 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -31,7 +31,7 @@ where { /// Handler for `ots_hasCode` async fn has_code(&self, address: Address, block_number: Option) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + self.eth.get_code(address, block_number).await.map(|code| !code.is_empty()) } /// Handler for `ots_getApiLevel` From 88376e3bd12932fa3cbe6402e5eb4da5f18b53ee Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Mon, 24 Jul 2023 23:48:26 +0800 Subject: [PATCH 226/722] test: cover index account history stage with stage_test_suite_ext tests (#3383) --- .../src/stages/index_account_history.rs | 150 +++++++++++++++++- 1 file changed, 146 insertions(+), 4 deletions(-) diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 2062b4eb02f1..fe0b6d3b404c 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -75,17 +75,26 @@ mod tests { use std::collections::BTreeMap; use super::*; - use crate::test_utils::TestTransaction; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, + TestTransaction, UnwindStageTestRunner, + }; + use itertools::Itertools; use reth_db::{ + cursor::DbCursorRO, models::{ - sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx, ShardedKey, + sharded_key, sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx, ShardedKey, StoredBlockBodyIndices, }, tables, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, BlockNumberList, }; - use reth_primitives::{hex_literal::hex, H160, MAINNET}; + use reth_interfaces::test_utils::{ + generators, + generators::{random_block_range, random_contract_account_range, random_transition_range}, + }; + use reth_primitives::{hex_literal::hex, Address, BlockNumber, H160, H256, MAINNET}; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); @@ -357,4 +366,137 @@ mod tests { ]) ); } + + stage_test_suite_ext!(IndexAccountHistoryTestRunner, index_account_history); + + struct IndexAccountHistoryTestRunner { + pub(crate) tx: TestTransaction, + commit_threshold: u64, + } + + impl Default for IndexAccountHistoryTestRunner { + fn default() -> Self { + Self { tx: TestTransaction::default(), commit_threshold: 1000 } + } + } + + impl StageTestRunner for IndexAccountHistoryTestRunner { + type S = IndexAccountHistoryStage; + + fn tx(&self) -> &TestTransaction { + &self.tx + } + + fn stage(&self) -> Self::S { + Self::S { commit_threshold: self.commit_threshold } + } + } + + impl ExecuteStageTestRunner for IndexAccountHistoryTestRunner { + type Seed = (); + + fn seed_execution(&mut self, input: ExecInput) -> Result { + let stage_process = input.checkpoint().block_number; + let start = stage_process + 1; + let end = input.target(); + let mut rng = generators::rng(); + + let num_of_accounts = 31; + let accounts = random_contract_account_range(&mut rng, &mut (0..num_of_accounts)) + .into_iter() + .collect::>(); + + let blocks = random_block_range(&mut rng, start..=end, H256::zero(), 0..3); + + let (transitions, _) = random_transition_range( + &mut rng, + blocks.iter(), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 0..3, + 0..256, + ); + + // add block changeset from block 1. + self.tx.insert_transitions(transitions, Some(start))?; + + Ok(()) + } + + fn validate_execution( + &self, + input: ExecInput, + output: Option, + ) -> Result<(), TestRunnerError> { + if let Some(output) = output { + let start_block = input.next_block(); + let end_block = output.checkpoint.block_number; + if start_block > end_block { + return Ok(()) + } + + assert_eq!( + output, + ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true } + ); + + let provider = self.tx.inner(); + let mut changeset_cursor = + provider.tx_ref().cursor_read::()?; + + let account_transitions = + changeset_cursor.walk_range(start_block..=end_block)?.try_fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, + entry| + -> Result<_, TestRunnerError> { + let (index, account) = entry?; + accounts.entry(account.address).or_default().push(index); + Ok(accounts) + }, + )?; + + let mut result = BTreeMap::new(); + for (address, indices) in account_transitions { + // chunk indices and insert them in shards of N size. + let mut chunks = indices + .iter() + .chunks(sharded_key::NUM_OF_INDICES_IN_SHARD) + .into_iter() + .map(|chunks| chunks.map(|i| *i as usize).collect::>()) + .collect::>(); + let last_chunk = chunks.pop(); + + chunks.into_iter().for_each(|list| { + result.insert( + ShardedKey::new( + address, + *list.last().expect("Chuck does not return empty list") + as BlockNumber, + ) as ShardedKey, + list, + ); + }); + + if let Some(last_list) = last_chunk { + result.insert( + ShardedKey::new(address, u64::MAX) as ShardedKey, + last_list, + ); + }; + } + + let table = cast(self.tx.table::().unwrap()); + assert_eq!(table, result); + } + Ok(()) + } + } + + impl UnwindStageTestRunner for IndexAccountHistoryTestRunner { + fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { + let table = self.tx.table::().unwrap(); + assert!(table.is_empty()); + Ok(()) + } + } } From 6e283cdbfd8ad41409ec5c503a75ecc2717d9df4 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 24 Jul 2023 18:37:31 +0200 Subject: [PATCH 227/722] ci: reduce partitions (#3890) --- .github/workflows/integration.yml | 2 +- .github/workflows/unit.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 07fe9854892d..20886b9e4ba2 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -22,7 +22,7 @@ jobs: group: Reth strategy: matrix: - partition: [1, 2, 3] + partition: [1, 2] steps: - name: Checkout sources uses: actions/checkout@v3 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 6c6e55bf9c9a..e46f5a112f2a 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -21,7 +21,7 @@ jobs: group: Reth strategy: matrix: - partition: [1, 2, 3, 4, 5] + partition: [1, 2] steps: - name: Checkout sources uses: actions/checkout@v3 From 2581c24a6f1bf96062c72a8ebb2815fd445d2343 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 24 Jul 2023 18:37:40 +0200 Subject: [PATCH 228/722] ci: run bench on large runner (#3889) --- .github/workflows/bench.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 8dec739e7023..cd5b3f54d6ae 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -15,7 +15,8 @@ concurrency: name: bench jobs: iai: - runs-on: ubuntu-20.04 + runs-on: + group: Reth # Only run benchmarks in merge groups if: github.event_name != 'pull_request' steps: @@ -62,15 +63,15 @@ jobs: # Checks that benchmarks not run in CI compile bench-check: name: check - runs-on: ubuntu-20.04 + runs-on: + group: Reth steps: - uses: actions/checkout@v3 - name: Install toolchain uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Check if benchmarks build - run: cargo bench --all --all-features --all-targets --no-run - + run: cargo check --workspace --benches --all-features bench-success: if: always() From 0ff75b50117166dabad50c75ebe0e9c72794628b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 24 Jul 2023 18:28:42 +0200 Subject: [PATCH 229/722] fix: do not perform future timestamp checks post-merge (#3884) Co-authored-by: Matthias Seitz --- .../consensus/beacon/src/beacon_consensus.rs | 25 ++++++++++++++++--- crates/consensus/common/src/validation.rs | 15 +---------- crates/primitives/src/constants.rs | 9 +++++++ 3 files changed, 32 insertions(+), 17 deletions(-) diff --git a/crates/consensus/beacon/src/beacon_consensus.rs b/crates/consensus/beacon/src/beacon_consensus.rs index 449d46845c76..897bc25f4a1f 100644 --- a/crates/consensus/beacon/src/beacon_consensus.rs +++ b/crates/consensus/beacon/src/beacon_consensus.rs @@ -2,10 +2,10 @@ use reth_consensus_common::validation; use reth_interfaces::consensus::{Consensus, ConsensusError}; use reth_primitives::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, - SealedHeader, EMPTY_OMMER_ROOT, U256, + constants::{ALLOWED_FUTURE_BLOCK_TIME_SECONDS, MAXIMUM_EXTRA_DATA_SIZE}, + Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT, U256, }; -use std::sync::Arc; +use std::{sync::Arc, time::SystemTime}; /// Ethereum beacon consensus /// @@ -59,6 +59,14 @@ impl Consensus for BeaconConsensus { return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) } + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. + // validate header extradata for all networks post merge validate_header_extradata(header)?; @@ -69,6 +77,17 @@ impl Consensus for BeaconConsensus { // * difficulty, mix_hash & nonce aka PoW stuff // low priority as syncing is done in reverse order + // Check if timestamp is in future. Clock can drift but this can be consensus issue. + let present_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + if header.timestamp > present_timestamp + ALLOWED_FUTURE_BLOCK_TIME_SECONDS { + return Err(ConsensusError::TimestampIsInFuture { + timestamp: header.timestamp, + present_timestamp, + }) + } + // Goerli exception: // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index f61e160f0390..485d6146737b 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -5,10 +5,7 @@ use reth_primitives::{ SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxLegacy, }; use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; -use std::{ - collections::{hash_map::Entry, HashMap}, - time::SystemTime, -}; +use std::collections::{hash_map::Entry, HashMap}; /// Validate header standalone pub fn validate_header_standalone( @@ -23,16 +20,6 @@ pub fn validate_header_standalone( }) } - // Check if timestamp is in future. Clock can drift but this can be consensus issue. - let present_timestamp = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); - if header.timestamp > present_timestamp { - return Err(ConsensusError::TimestampIsInFuture { - timestamp: header.timestamp, - present_timestamp, - }) - } - // Check if base fee is set. if chain_spec.fork(Hardfork::London).active_at_block(header.number) && header.base_fee_per_gas.is_none() diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants.rs index 8530a2bc67c8..048a8801c4b3 100644 --- a/crates/primitives/src/constants.rs +++ b/crates/primitives/src/constants.rs @@ -108,6 +108,15 @@ pub const EMPTY_WITHDRAWALS: H256 = EMPTY_SET_HASH; /// the database. pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; +/// Max seconds from current time allowed for blocks, before they're considered future blocks. +/// +/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the +/// future. +/// +/// See: +/// +pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; + #[cfg(test)] mod tests { use super::*; From 1ca7f3ae40b1e8b6a91f5f4aaa3eef9c901fc539 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 24 Jul 2023 17:39:02 +0100 Subject: [PATCH 230/722] feat(pruner, storage): prune receipts & save checkpoints to database (#3733) Co-authored-by: joshieDo --- Cargo.lock | 6 + bin/reth/src/debug_cmd/merkle.rs | 4 +- bin/reth/src/node/mod.rs | 10 +- bin/reth/src/stage/dump/merkle.rs | 4 +- crates/config/src/config.rs | 6 +- crates/consensus/beacon/src/engine/mod.rs | 20 ++- crates/consensus/beacon/src/engine/prune.rs | 17 +- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/prune/checkpoint.rs | 4 +- crates/primitives/src/prune/mod.rs | 2 +- crates/primitives/src/prune/target.rs | 67 +++++--- crates/prune/Cargo.toml | 12 ++ crates/prune/src/error.rs | 13 +- crates/prune/src/lib.rs | 2 +- crates/prune/src/pruner.rs | 162 ++++++++++++++++-- crates/stages/Cargo.toml | 2 +- crates/stages/src/stages/execution.rs | 16 +- crates/stages/src/test_utils/test_db.rs | 17 +- crates/storage/db/src/tables/mod.rs | 8 +- crates/storage/provider/src/lib.rs | 8 +- crates/storage/provider/src/post_state/mod.rs | 6 +- .../provider/src/providers/database/mod.rs | 15 +- .../src/providers/database/provider.rs | 72 +++++++- crates/storage/provider/src/providers/mod.rs | 20 ++- .../storage/provider/src/test_utils/noop.rs | 17 +- crates/storage/provider/src/traits/mod.rs | 3 + .../provider/src/traits/prune_checkpoint.rs | 16 ++ 27 files changed, 433 insertions(+), 98 deletions(-) create mode 100644 crates/storage/provider/src/traits/prune_checkpoint.rs diff --git a/Cargo.lock b/Cargo.lock index b5cfbf016df9..1ad3c97b7f63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5586,7 +5586,13 @@ dependencies = [ name = "reth-prune" version = "0.1.0-alpha.4" dependencies = [ + "assert_matches", + "itertools", + "reth-db", + "reth-interfaces", "reth-primitives", + "reth-provider", + "reth-stages", "thiserror", "tracing", ] diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index e672ec3e027e..894d0d3fb209 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -8,7 +8,7 @@ use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx}; use reth_primitives::{ fs, stage::{StageCheckpoint, StageId}, - ChainSpec, PruneTargets, + ChainSpec, PruneModes, }; use reth_provider::{ProviderFactory, StageCheckpointReader}; use reth_stages::{ @@ -96,7 +96,7 @@ impl Command { let mut execution_stage = ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(1), max_changes: None }, - PruneTargets::all(), + PruneModes::all(), ); let mut account_hashing_stage = AccountHashingStage::default(); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 1c5cd3a7fce5..6a1da252f0b5 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -78,6 +78,7 @@ use reth_interfaces::p2p::headers::client::HeadersClient; use reth_payload_builder::PayloadBuilderService; use reth_primitives::DisplayHardforks; use reth_provider::providers::BlockchainProvider; +use reth_prune::BatchSizes; use reth_stages::stages::{ AccountHashingStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, StorageHashingStage, TransactionLookupStage, @@ -364,7 +365,14 @@ impl Command { let pruner = config.prune.map(|prune_config| { info!(target: "reth::cli", "Pruner initialized"); - reth_prune::Pruner::new(prune_config.block_interval, tree_config.max_reorg_depth()) + reth_prune::Pruner::new( + db.clone(), + self.chain.clone(), + prune_config.block_interval, + tree_config.max_reorg_depth(), + prune_config.parts, + BatchSizes::default(), + ) }); // Configure the consensus engine diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index dd5c8f75809f..69b39234bd10 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -2,7 +2,7 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; -use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec, PruneTargets}; +use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec, PruneModes}; use reth_provider::ProviderFactory; use reth_stages::{ stages::{ @@ -70,7 +70,7 @@ async fn unwind_and_copy( let mut exec_stage = ExecutionStage::new( reth_revm::Factory::new(db_tool.chain.clone()), ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None }, - PruneTargets::all(), + PruneModes::all(), ); exec_stage diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index d46e665430c6..3fb414e8ee4e 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -5,7 +5,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; -use reth_primitives::PruneTargets; +use reth_primitives::PruneModes; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -285,12 +285,12 @@ pub struct PruneConfig { /// Minimum pruning interval measured in blocks. pub block_interval: u64, /// Pruning configuration for every part of the data that can be pruned. - pub parts: PruneTargets, + pub parts: PruneModes, } impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 10, parts: PruneTargets::default() } + Self { block_interval: 10, parts: PruneModes::default() } } } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index aa894d180f84..131b58545725 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -193,7 +193,7 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, /// Controls pruning triggered by engine updates. - prune: Option, + prune: Option>, } impl BeaconConsensusEngine @@ -220,7 +220,7 @@ where payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, - pruner: Option, + pruner: Option>, ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let (to_engine, rx) = mpsc::unbounded_channel(); Self::with_channel( @@ -266,7 +266,7 @@ where pipeline_run_threshold: u64, to_engine: UnboundedSender, rx: UnboundedReceiver, - pruner: Option, + pruner: Option>, ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let handle = BeaconConsensusEngineHandle { to_engine }; let sync = EngineSyncController::new( @@ -1727,11 +1727,14 @@ mod tests { test_utils::{NoopFullBlockClient, TestConsensus}, }; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; + use reth_primitives::{ + stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, PruneModes, H256, MAINNET, + }; use reth_provider::{ providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, BlockWriter, ExecutorFactory, ProviderFactory, StateProvider, }; + use reth_prune::BatchSizes; use reth_revm::Factory; use reth_rpc_types::engine::{ ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, @@ -2071,7 +2074,14 @@ mod tests { let latest = self.chain_spec.genesis_header().seal_slow(); let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest); - let pruner = Pruner::new(5, 0); + let pruner = Pruner::new( + db.clone(), + self.chain_spec.clone(), + 5, + 0, + PruneModes::default(), + BatchSizes::default(), + ); let (mut engine, handle) = BeaconConsensusEngine::new( client, diff --git a/crates/consensus/beacon/src/engine/prune.rs b/crates/consensus/beacon/src/engine/prune.rs index 855456f3543b..257170a376fe 100644 --- a/crates/consensus/beacon/src/engine/prune.rs +++ b/crates/consensus/beacon/src/engine/prune.rs @@ -1,6 +1,7 @@ //! Prune management for the engine implementation. use futures::FutureExt; +use reth_db::database::Database; use reth_primitives::BlockNumber; use reth_prune::{Pruner, PrunerError, PrunerWithResult}; use reth_tasks::TaskSpawner; @@ -10,16 +11,16 @@ use tokio::sync::oneshot; /// Manages pruning under the control of the engine. /// /// This type controls the [Pruner]. -pub(crate) struct EnginePruneController { +pub(crate) struct EnginePruneController { /// The current state of the pruner. - pruner_state: PrunerState, + pruner_state: PrunerState, /// The type that can spawn the pruner task. pruner_task_spawner: Box, } -impl EnginePruneController { +impl EnginePruneController { /// Create a new instance - pub(crate) fn new(pruner: Pruner, pruner_task_spawner: Box) -> Self { + pub(crate) fn new(pruner: Pruner, pruner_task_spawner: Box) -> Self { Self { pruner_state: PrunerState::Idle(Some(pruner)), pruner_task_spawner } } @@ -131,14 +132,14 @@ pub(crate) enum EnginePruneEvent { /// running, it acquires the write lock over the database. This means that we cannot forward to the /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. -enum PrunerState { +enum PrunerState { /// Pruner is idle. - Idle(Option), + Idle(Option>), /// Pruner is running and waiting for a response - Running(oneshot::Receiver), + Running(oneshot::Receiver>), } -impl PrunerState { +impl PrunerState { /// Returns `true` if the state matches idle. fn is_idle(&self) -> bool { matches!(self, PrunerState::Idle(_)) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 0248f008a24c..153e2972cd4b 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -78,7 +78,7 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; -pub use prune::{PruneCheckpoint, PruneMode, PrunePart, PruneTargets}; +pub use prune::{PruneCheckpoint, PruneMode, PruneModes, PrunePart}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/checkpoint.rs b/crates/primitives/src/prune/checkpoint.rs index a0c445fdfb50..52e1cabd76cb 100644 --- a/crates/primitives/src/prune/checkpoint.rs +++ b/crates/primitives/src/prune/checkpoint.rs @@ -7,7 +7,7 @@ use reth_codecs::{main_codec, Compact}; #[cfg_attr(test, derive(Default))] pub struct PruneCheckpoint { /// Highest pruned block number. - block_number: BlockNumber, + pub block_number: BlockNumber, /// Prune mode. - prune_mode: PruneMode, + pub prune_mode: PruneMode, } diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 510bc40b6e5d..4dfc591bfcc0 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -6,4 +6,4 @@ mod target; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; pub use part::PrunePart; -pub use target::PruneTargets; +pub use target::PruneModes; diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 987d0883a30a..8e16235ff8ae 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; /// Pruning configuration for every part of the data that can be pruned. #[derive(Debug, Clone, Default, Copy, Deserialize, Eq, PartialEq, Serialize)] #[serde(default)] -pub struct PruneTargets { +pub struct PruneModes { /// Sender Recovery pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] pub sender_recovery: Option, @@ -26,25 +26,42 @@ pub struct PruneTargets { pub storage_history: Option, } -macro_rules! should_prune_method { - ($($config:ident),+) => { +macro_rules! impl_prune_parts { + ($(($part:ident, $human_part:expr)),+) => { $( paste! { - #[allow(missing_docs)] - pub fn [](&self, block: BlockNumber, tip: BlockNumber) -> bool { - if let Some(config) = &self.$config { - return self.should_prune(config, block, tip) + #[doc = concat!( + "Check if ", + $human_part, + " should be pruned at the target block according to the provided tip." + )] + pub fn [](&self, block: BlockNumber, tip: BlockNumber) -> bool { + if let Some(mode) = &self.$part { + return self.should_prune(mode, block, tip) } false } } )+ + $( + paste! { + #[doc = concat!( + "Returns block up to which ", + $human_part, + " pruning needs to be done, inclusive, according to the provided tip." + )] + pub fn [](&self, tip: BlockNumber) -> Option<(BlockNumber, PruneMode)> { + self.$part.as_ref().map(|mode| (self.prune_to_block(mode, tip), *mode)) + } + } + )+ + /// Sets pruning to all targets. pub fn all() -> Self { - PruneTargets { + Self { $( - $config: Some(PruneMode::Full), + $part: Some(PruneMode::Full), )+ } } @@ -52,15 +69,15 @@ macro_rules! should_prune_method { }; } -impl PruneTargets { +impl PruneModes { /// Sets pruning to no target. pub fn none() -> Self { - PruneTargets::default() + PruneModes::default() } - /// Check if target block should be pruned - pub fn should_prune(&self, target: &PruneMode, block: BlockNumber, tip: BlockNumber) -> bool { - match target { + /// Check if target block should be pruned according to the provided prune mode and tip. + pub fn should_prune(&self, mode: &PruneMode, block: BlockNumber, tip: BlockNumber) -> bool { + match mode { PruneMode::Full => true, PruneMode::Distance(distance) => { if *distance > tip { @@ -72,11 +89,21 @@ impl PruneTargets { } } - should_prune_method!( - sender_recovery, - transaction_lookup, - receipts, - account_history, - storage_history + /// Returns block up to which pruning needs to be done, inclusive, according to the provided + /// prune mode and tip. + pub fn prune_to_block(&self, mode: &PruneMode, tip: BlockNumber) -> BlockNumber { + match mode { + PruneMode::Full => tip, + PruneMode::Distance(distance) => tip.saturating_sub(*distance), + PruneMode::Before(n) => *n, + } + } + + impl_prune_parts!( + (sender_recovery, "Sender Recovery"), + (transaction_lookup, "Transaction Lookup"), + (receipts, "Receipts"), + (account_history, "Account History"), + (storage_history, "Storage History") ); } diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 56b0c49a017a..432a0ecfdd9b 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -13,8 +13,20 @@ Pruning implementation [dependencies] # reth reth-primitives = { workspace = true } +reth-db = { workspace = true } +reth-provider = { workspace = true } +reth-interfaces = { workspace = true } # misc tracing = { workspace = true } thiserror = { workspace = true } +itertools = "0.10" +[dev-dependencies] +# reth +reth-db = { workspace = true, features = ["test-utils"] } +reth-stages = { path = "../stages", features = ["test-utils"] } + +# misc + +assert_matches = "1.5.0" diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index 96f0a25b7d72..a38e3d6e5d3a 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -1,4 +1,15 @@ +use reth_db::DatabaseError; +use reth_provider::ProviderError; use thiserror::Error; #[derive(Error, Debug)] -pub enum PrunerError {} +pub enum PrunerError { + #[error("An interface error occurred.")] + Interface(#[from] reth_interfaces::Error), + + #[error(transparent)] + Database(#[from] DatabaseError), + + #[error(transparent)] + Provider(#[from] ProviderError), +} diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs index 6d133030fcd7..c7cb720cb696 100644 --- a/crates/prune/src/lib.rs +++ b/crates/prune/src/lib.rs @@ -2,4 +2,4 @@ mod error; mod pruner; pub use error::PrunerError; -pub use pruner::{Pruner, PrunerResult, PrunerWithResult}; +pub use pruner::{BatchSizes, Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 9f3a60b86cc3..4cdde139d7ef 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -1,17 +1,31 @@ //! Support for pruning. use crate::PrunerError; -use reth_primitives::BlockNumber; -use tracing::debug; +use reth_db::{database::Database, tables}; +use reth_primitives::{BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart}; +use reth_provider::{BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointWriter}; +use std::sync::Arc; +use tracing::{debug, instrument, trace}; /// Result of [Pruner::run] execution pub type PrunerResult = Result<(), PrunerError>; /// The pipeline type itself with the result of [Pruner::run] -pub type PrunerWithResult = (Pruner, PrunerResult); +pub type PrunerWithResult = (Pruner, PrunerResult); + +pub struct BatchSizes { + receipts: usize, +} + +impl Default for BatchSizes { + fn default() -> Self { + Self { receipts: 10000 } + } +} /// Pruning routine. Main pruning logic happens in [Pruner::run]. -pub struct Pruner { +pub struct Pruner { + provider_factory: ProviderFactory, /// Minimum pruning interval measured in blocks. All prune parts are checked and, if needed, /// pruned, when the chain advances by the specified number of blocks. min_block_interval: u64, @@ -22,17 +36,39 @@ pub struct Pruner { /// Last pruned block number. Used in conjunction with `min_block_interval` to determine /// when the pruning needs to be initiated. last_pruned_block_number: Option, + modes: PruneModes, + batch_sizes: BatchSizes, } -impl Pruner { +impl Pruner { /// Creates a new [Pruner]. - pub fn new(min_block_interval: u64, max_prune_depth: u64) -> Self { - Self { min_block_interval, max_prune_depth, last_pruned_block_number: None } + pub fn new( + db: DB, + chain_spec: Arc, + min_block_interval: u64, + max_prune_depth: u64, + modes: PruneModes, + batch_sizes: BatchSizes, + ) -> Self { + Self { + provider_factory: ProviderFactory::new(db, chain_spec), + min_block_interval, + max_prune_depth, + last_pruned_block_number: None, + modes, + batch_sizes, + } } /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { - // Pruning logic + let provider = self.provider_factory.provider_rw()?; + + if let Some((to_block, prune_mode)) = self.modes.prune_to_block_receipts(tip_block_number) { + self.prune_receipts(&provider, to_block, prune_mode)?; + } + + provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); Ok(()) @@ -58,15 +94,62 @@ impl Pruner { false } } + + /// Prune receipts up to the provided block, inclusive. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_receipts( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + prune_mode: PruneMode, + ) -> PrunerResult { + let to_block_body = match provider.block_body_indices(to_block)? { + Some(body) => body, + None => { + trace!(target: "pruner", "No receipts to prune"); + return Ok(()) + } + }; + + provider.prune_table_in_batches::( + ..=to_block_body.last_tx_num(), + self.batch_sizes.receipts, + |receipts| { + trace!( + target: "pruner", + %receipts, + "Pruned receipts" + ); + }, + )?; + + provider.save_prune_checkpoint( + PrunePart::Receipts, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } } #[cfg(test)] mod tests { - use crate::Pruner; + use crate::{pruner::BatchSizes, Pruner}; + use assert_matches::assert_matches; + use reth_db::{tables, test_utils::create_test_rw_db}; + use reth_interfaces::test_utils::{ + generators, + generators::{random_block_range, random_receipt}, + }; + use reth_primitives::{PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET}; + use reth_provider::PruneCheckpointReader; + use reth_stages::test_utils::TestTransaction; #[test] - fn pruner_is_pruning_needed() { - let pruner = Pruner::new(5, 0); + fn is_pruning_needed() { + let db = create_test_rw_db(); + let pruner = + Pruner::new(db, MAINNET.clone(), 5, 0, PruneModes::default(), BatchSizes::default()); // No last pruned block number was set before let first_block_number = 1; @@ -80,4 +163,61 @@ mod tests { let third_block_number = second_block_number; assert!(pruner.is_pruning_needed(third_block_number)); } + + #[test] + fn prune_receipts() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range(&mut rng, 0..=100, H256::zero(), 0..10); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let mut receipts = Vec::new(); + for block in &blocks { + for transaction in &block.body { + receipts + .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); + } + } + tx.insert_receipts(receipts).expect("insert receipts"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.table::().unwrap().len(), + tx.table::().unwrap().len() + ); + + let prune_to_block = 10; + let prune_mode = PruneMode::Before(prune_to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + 0, + PruneModes { receipts: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + receipts: 10, + }, + ); + + let provider = tx.inner_rw(); + assert_matches!(pruner.prune_receipts(&provider, prune_to_block, prune_mode), Ok(())); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks[prune_to_block as usize + 1..] + .iter() + .map(|block| block.body.len()) + .sum::() + ); + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::Receipts).unwrap(), + Some(PruneCheckpoint { block_number: prune_to_block, prune_mode }) + ); + } } diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 3bfa592616da..f83dcab22c1b 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -70,7 +70,7 @@ criterion = { version = "0.5", features = ["async_futures"] } serde_json = { workspace = true } [features] -test-utils = [] +test-utils = ["reth-interfaces/test-utils"] [[bench]] name = "criterion" diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 70d07f8d736b..366e10e7c3c0 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -14,7 +14,7 @@ use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, }, - BlockNumber, Header, PruneTargets, U256, + BlockNumber, Header, PruneModes, U256, }; use reth_provider::{ post_state::PostState, BlockExecutor, BlockReader, DatabaseProviderRW, ExecutorFactory, @@ -60,7 +60,7 @@ pub struct ExecutionStage { /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// Pruning configuration. - prune_targets: PruneTargets, + prune_targets: PruneModes, } impl ExecutionStage { @@ -68,7 +68,7 @@ impl ExecutionStage { pub fn new( executor_factory: EF, thresholds: ExecutionStageThresholds, - prune_targets: PruneTargets, + prune_targets: PruneModes, ) -> Self { Self { metrics_tx: None, executor_factory, thresholds, prune_targets } } @@ -77,7 +77,7 @@ impl ExecutionStage { /// /// The commit threshold will be set to 10_000. pub fn new_with_factory(executor_factory: EF) -> Self { - Self::new(executor_factory, ExecutionStageThresholds::default(), PruneTargets::default()) + Self::new(executor_factory, ExecutionStageThresholds::default(), PruneModes::default()) } /// Set the metric events sender. @@ -425,7 +425,7 @@ mod tests { use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db}; use reth_primitives::{ hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode, - ChainSpecBuilder, PruneMode, PruneTargets, SealedBlock, StorageEntry, H160, H256, MAINNET, + ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StorageEntry, H160, H256, MAINNET, U256, }; use reth_provider::{AccountReader, BlockWriter, ProviderFactory, ReceiptProvider}; @@ -439,7 +439,7 @@ mod tests { ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, - PruneTargets::none(), + PruneModes::none(), ) } @@ -943,7 +943,7 @@ mod tests { provider.commit().unwrap(); let check_pruning = |factory: Arc>, - prune_targets: PruneTargets, + prune_targets: PruneModes, expect_num_receipts: usize| async move { let provider = factory.provider_rw().unwrap(); @@ -960,7 +960,7 @@ mod tests { ); }; - let mut prune = PruneTargets::none(); + let mut prune = PruneModes::none(); check_pruning(factory.clone(), prune, 1).await; diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index ddab55040734..4efb8debffb5 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -10,8 +10,8 @@ use reth_db::{ DatabaseEnv, DatabaseError as DbError, }; use reth_primitives::{ - keccak256, Account, Address, BlockNumber, SealedBlock, SealedHeader, StorageEntry, H256, - MAINNET, U256, + keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StorageEntry, + TxNumber, H256, MAINNET, U256, }; use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, ProviderFactory}; use std::{ @@ -268,6 +268,19 @@ impl TestTransaction { }) } + /// Insert collection of ([TxNumber], [Receipt]) into the corresponding table. + pub fn insert_receipts(&self, receipts: I) -> Result<(), DbError> + where + I: IntoIterator, + { + self.commit(|tx| { + receipts.into_iter().try_for_each(|(tx_num, receipt)| { + // Insert into receipts table. + tx.put::(tx_num, receipt) + }) + }) + } + /// Insert collection of ([Address], [Account]) into corresponding tables. pub fn insert_accounts_and_storages(&self, accounts: I) -> Result<(), DbError> where diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 924095ca71df..4c8c0c4909ed 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -51,7 +51,7 @@ pub enum TableType { } /// Number of tables that should be present inside database. -pub const NUM_TABLES: usize = 25; +pub const NUM_TABLES: usize = 26; /// The general purpose of this is to use with a combination of Tables enum, /// by implementing a `TableViewer` trait you can operate on db tables in an abstract way. @@ -183,7 +183,8 @@ tables!([ (StoragesTrie, TableType::DupSort), (TxSenders, TableType::Table), (SyncStage, TableType::Table), - (SyncStageProgress, TableType::Table) + (SyncStageProgress, TableType::Table), + (PruneCheckpoints, TableType::Table) ]); #[macro_export] @@ -417,7 +418,7 @@ table!( table!( /// Stores the highest pruned block number and prune mode of each prune part. - ( PruneParts ) PrunePart | PruneCheckpoint + ( PruneCheckpoints ) PrunePart | PruneCheckpoint ); /// Alias Types @@ -459,6 +460,7 @@ mod tests { (TableType::Table, TxSenders::const_name()), (TableType::Table, SyncStage::const_name()), (TableType::Table, SyncStageProgress::const_name()), + (TableType::Table, PruneCheckpoints::const_name()), ]; #[test] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 64c9932711e8..4e7048cd4621 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -26,10 +26,10 @@ pub use traits::{ BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, ExecutorFactory, HashingWriter, - HeaderProvider, HistoryWriter, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StageCheckpointWriter, StateProvider, StateProviderBox, - StateProviderFactory, StateRootProvider, StorageReader, TransactionsProvider, - WithdrawalsProvider, + HeaderProvider, HistoryWriter, PostStateDataProvider, PruneCheckpointReader, + PruneCheckpointWriter, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StageCheckpointWriter, StateProvider, StateProviderBox, StateProviderFactory, + StateRootProvider, StorageReader, TransactionsProvider, WithdrawalsProvider, }; /// Provider trait implementations. diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 9e8aa189ef1a..846012bffad5 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -8,7 +8,7 @@ use reth_db::{ }; use reth_primitives::{ bloom::logs_bloom, keccak256, proofs::calculate_receipt_root_ref, Account, Address, - BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneTargets, Receipt, StorageEntry, H256, U256, + BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneModes, Receipt, StorageEntry, H256, U256, }; use reth_trie::{ hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, @@ -79,7 +79,7 @@ pub struct PostState { /// The receipt(s) of the executed transaction(s). receipts: BTreeMap>, /// Pruning configuration. - prune_targets: PruneTargets, + prune_targets: PruneModes, } impl PostState { @@ -94,7 +94,7 @@ impl PostState { } /// Add a pruning configuration. - pub fn add_prune_targets(&mut self, prune_targets: PruneTargets) { + pub fn add_prune_targets(&mut self, prune_targets: PruneModes) { self.prune_targets = prune_targets; } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index fde0ff2865c1..5fbb0fceeaed 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -2,16 +2,17 @@ use crate::{ providers::state::{historical::HistoricalStateProvider, latest::LatestStateProvider}, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, EvmEnvProvider, - HeaderProvider, ProviderError, StageCheckpointReader, StateProviderBox, TransactionsProvider, - WithdrawalsProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_interfaces::Result; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, - ChainSpec, Header, Receipt, SealedBlock, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, U256, + ChainSpec, Header, PruneCheckpoint, PrunePart, Receipt, SealedBlock, SealedHeader, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, + H256, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; use std::{ops::RangeBounds, sync::Arc}; @@ -360,6 +361,12 @@ where } } +impl PruneCheckpointReader for ProviderFactory { + fn get_prune_checkpoint(&self, part: PrunePart) -> Result> { + self.provider()?.get_prune_checkpoint(part) + } +} + #[cfg(test)] mod tests { use super::ProviderFactory; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 7205710ab1a3..339807580edc 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -5,7 +5,8 @@ use crate::{ }, AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, EvmEnvProvider, HashingWriter, HeaderProvider, HistoryWriter, PostState, ProviderError, - StageCheckpointReader, StorageReader, TransactionsProvider, WithdrawalsProvider, + PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, StorageReader, + TransactionsProvider, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ @@ -16,7 +17,7 @@ use reth_db::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }, - table::Table, + table::{Key, Table}, tables, transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, @@ -26,9 +27,10 @@ use reth_primitives::{ keccak256, stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainInfo, ChainSpec, Hardfork, Head, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, U256, + ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PrunePart, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, + U256, }; use reth_revm_primitives::{ config::revm_spec, @@ -617,6 +619,54 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { Ok(()) } + /// Prune the table for the specified key range. + /// Returns number of rows pruned. + pub fn prune_table( + &self, + range: impl RangeBounds, + ) -> std::result::Result + where + T: Table, + K: Key, + { + self.prune_table_in_batches::(range, usize::MAX, |_| {}) + } + + /// Prune the table for the specified key range calling `chunk_callback` after every + /// `batch_size` pruned rows. + /// + /// Returns number of rows pruned. + pub fn prune_table_in_batches( + &self, + range: impl RangeBounds, + batch_size: usize, + batch_callback: F, + ) -> std::result::Result + where + T: Table, + K: Key, + F: Fn(usize), + { + let mut cursor = self.tx.cursor_write::()?; + let mut walker = cursor.walk_range(range)?; + let mut deleted = 0; + + while let Some(Ok(_)) = walker.next() { + walker.delete_current()?; + deleted += 1; + + if deleted % batch_size == 0 { + batch_callback(batch_size); + } + } + + if deleted % batch_size != 0 { + batch_callback(deleted % batch_size); + } + + Ok(deleted) + } + /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> Result> @@ -1816,3 +1866,15 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' Ok(()) } } + +impl<'this, TX: DbTx<'this>> PruneCheckpointReader for DatabaseProvider<'this, TX> { + fn get_prune_checkpoint(&self, part: PrunePart) -> Result> { + Ok(self.tx.get::(part)?) + } +} + +impl<'this, TX: DbTxMut<'this>> PruneCheckpointWriter for DatabaseProvider<'this, TX> { + fn save_prune_checkpoint(&self, part: PrunePart, checkpoint: PruneCheckpoint) -> Result<()> { + Ok(self.tx.put::(part, checkpoint)?) + } +} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 27e4903bb1de..b1e0c3e009ca 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -2,9 +2,9 @@ use crate::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, - PostStateDataProvider, ProviderError, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, TransactionsProvider, - WithdrawalsProvider, + PostStateDataProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, models::StoredBlockBodyIndices}; use reth_interfaces::{ @@ -15,8 +15,8 @@ use reth_interfaces::{ use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, - BlockNumberOrTag, BlockWithSenders, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, + BlockNumberOrTag, BlockWithSenders, ChainInfo, ChainSpec, Header, PruneCheckpoint, PrunePart, + Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; @@ -438,6 +438,16 @@ where } } +impl PruneCheckpointReader for BlockchainProvider +where + DB: Database, + Tree: Send + Sync, +{ + fn get_prune_checkpoint(&self, part: PrunePart) -> Result> { + self.database.provider()?.get_prune_checkpoint(part) + } +} + impl ChainSpecProvider for BlockchainProvider where DB: Send + Sync, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index def01741e68a..593b338343f2 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -2,17 +2,18 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PostState, - ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, - StateProviderFactory, StateRootProvider, TransactionsProvider, WithdrawalsProvider, + PruneCheckpointReader, ReceiptProviderIdExt, StageCheckpointReader, StateProvider, + StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, + WithdrawalsProvider, }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_interfaces::Result; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, Bytecode, Bytes, - ChainInfo, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, StorageKey, StorageValue, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, H256, - KECCAK_EMPTY, MAINNET, U256, + ChainInfo, ChainSpec, Header, PruneCheckpoint, PrunePart, Receipt, SealedBlock, SealedHeader, + StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, + TxNumber, H256, KECCAK_EMPTY, MAINNET, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; use std::{ops::RangeBounds, sync::Arc}; @@ -360,3 +361,9 @@ impl WithdrawalsProvider for NoopProvider { Ok(None) } } + +impl PruneCheckpointReader for NoopProvider { + fn get_prune_checkpoint(&self, _part: PrunePart) -> Result> { + Ok(None) + } +} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 5343185bdf19..3c2e06d21142 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -59,3 +59,6 @@ pub use hashing::HashingWriter; mod history; pub use history::HistoryWriter; + +mod prune_checkpoint; +pub use prune_checkpoint::{PruneCheckpointReader, PruneCheckpointWriter}; diff --git a/crates/storage/provider/src/traits/prune_checkpoint.rs b/crates/storage/provider/src/traits/prune_checkpoint.rs new file mode 100644 index 000000000000..65ee17dc2f43 --- /dev/null +++ b/crates/storage/provider/src/traits/prune_checkpoint.rs @@ -0,0 +1,16 @@ +use reth_interfaces::Result; +use reth_primitives::{PruneCheckpoint, PrunePart}; + +/// The trait for fetching prune checkpoint related data. +#[auto_impl::auto_impl(&, Arc)] +pub trait PruneCheckpointReader: Send + Sync { + /// Fetch the checkpoint for the given prune part. + fn get_prune_checkpoint(&self, part: PrunePart) -> Result>; +} + +/// The trait for updating prune checkpoint related data. +#[auto_impl::auto_impl(&, Arc)] +pub trait PruneCheckpointWriter: Send + Sync { + /// Save prune checkpoint. + fn save_prune_checkpoint(&self, part: PrunePart, checkpoint: PruneCheckpoint) -> Result<()>; +} From b69a18dc47255304b3943ca859dc6eb9da579b19 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 24 Jul 2023 18:57:45 +0200 Subject: [PATCH 231/722] chore: refactor header stream (#3880) --- crates/rpc/rpc/src/eth/pubsub.rs | 32 +++++++++------------------- crates/storage/provider/src/chain.rs | 12 ++++++++++- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 5b823ea40cbd..d5c0231ef849 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,25 +1,22 @@ //! `eth_` PubSub RPC handler implementation -use crate::eth::logs_utils; +use crate::{eth::logs_utils, result::invalid_params_rpc_err}; use futures::StreamExt; use jsonrpsee::{server::SubscriptionMessage, PendingSubscriptionSink, SubscriptionSink}; use reth_network_api::NetworkInfo; use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_api::EthPubSubApiServer; -use reth_rpc_types::FilteredParams; -use std::sync::Arc; - -use crate::result::invalid_params_rpc_err; use reth_rpc_types::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - Header, Log, Transaction, + FilteredParams, Header, Log, Transaction, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; use serde::Serialize; +use std::sync::Arc; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, @@ -292,22 +289,13 @@ where { /// Returns a stream that yields all new RPC blocks. fn new_headers_stream(&self) -> impl Stream { - BroadcastStream::new(self.chain_events.subscribe_to_canonical_state()) - .map(|new_block| { - let new_chain = new_block.expect("new block subscription never ends; qed"); - new_chain - .committed() - .map(|c| { - c.blocks() - .iter() - .map(|(_, block)| { - Header::from_primitive_with_hash(block.header.clone()) - }) - .collect::>() - }) - .unwrap_or_default() - }) - .flat_map(futures::stream::iter) + self.chain_events.canonical_state_stream().flat_map(|new_chain| { + let headers = new_chain + .committed() + .map(|chain| chain.headers().collect::>()) + .unwrap_or_default(); + futures::stream::iter(headers.into_iter().map(Header::from_primitive_with_hash)) + }) } /// Returns a stream that yields all logs that match the given filter. diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 13ba859d05da..b5d596c0164b 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -4,7 +4,7 @@ use crate::PostState; use reth_interfaces::{executor::BlockExecutionError, Error}; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionSigned, TxHash, + SealedHeader, TransactionSigned, TxHash, }; use std::{borrow::Cow, collections::BTreeMap, fmt}; @@ -31,6 +31,16 @@ impl Chain { &self.blocks } + /// Consumes the type and only returns the blocks in this chain. + pub fn into_blocks(self) -> BTreeMap { + self.blocks + } + + /// Returns an iterator over all headers in the block with increasing block numbers. + pub fn headers(&self) -> impl Iterator + '_ { + self.blocks.values().map(|block| block.header.clone()) + } + /// Get post state of this chain pub fn state(&self) -> &PostState { &self.state From 609cab872e8e1c12a32ed54f9eaed1d85a3f632e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 24 Jul 2023 19:59:20 +0200 Subject: [PATCH 232/722] chore: improve TestConsensusEngineBuilder ergonomics (#3891) --- Cargo.lock | 2 +- crates/consensus/beacon/src/engine/mod.rs | 619 +++--------------- .../consensus/beacon/src/engine/test_utils.rs | 509 ++++++++++++++ 3 files changed, 616 insertions(+), 514 deletions(-) create mode 100644 crates/consensus/beacon/src/engine/test_utils.rs diff --git a/Cargo.lock b/Cargo.lock index 1ad3c97b7f63..7b674b4eec8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5587,7 +5587,7 @@ name = "reth-prune" version = "0.1.0-alpha.4" dependencies = [ "assert_matches", - "itertools", + "itertools 0.10.5", "reth-db", "reth-interfaces", "reth-primitives", diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 131b58545725..35ecd356a208 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -72,6 +72,9 @@ mod metrics; pub(crate) mod prune; pub(crate) mod sync; +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; + /// The maximum number of invalid headers that can be tracked by the engine. const MAX_INVALID_HEADERS: u32 = 512u32; @@ -1709,413 +1712,17 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; - use assert_matches::assert_matches; - use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, post_state::PostState, - BlockchainTree, ShareableBlockchainTree, - }; - use reth_db::{test_utils::create_test_rw_db, DatabaseEnv}; - use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, - }; - use reth_interfaces::{ - consensus::Consensus, - p2p::either::EitherDownloader, - sync::NoopSyncStateUpdater, - test_utils::{NoopFullBlockClient, TestConsensus}, - }; - use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::{ - stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, PruneModes, H256, MAINNET, + use crate::{ + test_utils::{spawn_consensus_engine, TestConsensusEngineBuilder}, + BeaconForkChoiceUpdateError, }; - use reth_provider::{ - providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, BlockWriter, - ExecutorFactory, ProviderFactory, StateProvider, - }; - use reth_prune::BatchSizes; - use reth_revm::Factory; - use reth_rpc_types::engine::{ - ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, - }; - use reth_stages::{ - sets::DefaultStages, stages::HeaderSyncMode, test_utils::TestStages, ExecOutput, - PipelineError, StageError, - }; - use reth_tasks::TokioTaskExecutor; + use assert_matches::assert_matches; + use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; + use reth_provider::{BlockWriter, ProviderFactory}; + use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; + use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc, time::Duration}; - use tokio::sync::{ - oneshot::{self, error::TryRecvError}, - watch, - }; - - type TestBeaconConsensusEngine = BeaconConsensusEngine< - Arc, - BlockchainProvider< - Arc, - ShareableBlockchainTree< - Arc, - Arc, - EitherExecutorFactory, - >, - >, - Arc>, - >; - - struct TestEnv { - db: DB, - // Keep the tip receiver around, so it's not dropped. - #[allow(dead_code)] - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, - } - - impl TestEnv { - fn new( - db: DB, - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, - ) -> Self { - Self { db, tip_rx, engine_handle } - } - - async fn send_new_payload( - &self, - payload: ExecutionPayload, - ) -> Result { - self.engine_handle.new_payload(payload).await - } - - /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine - /// is syncing. - async fn send_new_payload_retry_on_syncing( - &self, - payload: ExecutionPayload, - ) -> Result { - loop { - let result = self.send_new_payload(payload.clone()).await?; - if !result.is_syncing() { - return Ok(result) - } - } - } - - async fn send_forkchoice_updated( - &self, - state: ForkchoiceState, - ) -> Result { - self.engine_handle.fork_choice_updated(state, None).await - } - - /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine - /// is syncing. - async fn send_forkchoice_retry_on_syncing( - &self, - state: ForkchoiceState, - ) -> Result { - loop { - let result = self.engine_handle.fork_choice_updated(state, None).await?; - if !result.is_syncing() { - return Ok(result) - } - } - } - } - - /// Represents either test pipeline outputs, or real pipeline configuration. - enum TestPipelineConfig { - /// Test pipeline outputs. - Test(VecDeque>), - /// Real pipeline configuration. - Real, - } - - impl Default for TestPipelineConfig { - fn default() -> Self { - Self::Test(VecDeque::new()) - } - } - - /// Represents either test executor results, or real executor configuration. - enum TestExecutorConfig { - /// Test executor results. - Test(Vec), - /// Real executor configuration. - Real, - } - - impl Default for TestExecutorConfig { - fn default() -> Self { - Self::Test(Vec::new()) - } - } - - /// A type that represents one of two possible executor factories. - #[derive(Debug, Clone)] - enum EitherExecutorFactory { - /// The first factory variant - Left(A), - /// The second factory variant - Right(B), - } - - // A type that represents one of two possible BlockExecutor types. - #[derive(Debug)] - enum EitherBlockExecutor { - /// The first executor variant - Left(A), - /// The second executor variant - Right(B), - } - - impl BlockExecutor for EitherBlockExecutor - where - A: BlockExecutor, - B: BlockExecutor, - SP: StateProvider, - { - fn execute( - &mut self, - block: &reth_primitives::Block, - total_difficulty: U256, - senders: Option>, - ) -> Result { - match self { - EitherBlockExecutor::Left(a) => a.execute(block, total_difficulty, senders), - EitherBlockExecutor::Right(b) => b.execute(block, total_difficulty, senders), - } - } - - fn execute_and_verify_receipt( - &mut self, - block: &reth_primitives::Block, - total_difficulty: U256, - senders: Option>, - ) -> Result { - match self { - EitherBlockExecutor::Left(a) => { - a.execute_and_verify_receipt(block, total_difficulty, senders) - } - EitherBlockExecutor::Right(b) => { - b.execute_and_verify_receipt(block, total_difficulty, senders) - } - } - } - } - - impl ExecutorFactory for EitherExecutorFactory - where - A: ExecutorFactory, - B: ExecutorFactory, - { - type Executor = EitherBlockExecutor, B::Executor>; - - fn chain_spec(&self) -> &ChainSpec { - match self { - EitherExecutorFactory::Left(a) => a.chain_spec(), - EitherExecutorFactory::Right(b) => b.chain_spec(), - } - } - - fn with_sp(&self, sp: SP) -> Self::Executor { - match self { - EitherExecutorFactory::Left(a) => EitherBlockExecutor::Left(a.with_sp(sp)), - EitherExecutorFactory::Right(b) => EitherBlockExecutor::Right(b.with_sp(sp)), - } - } - } - - /// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and - /// mocked executor results. - struct TestConsensusEngineBuilder { - chain_spec: Arc, - pipeline_config: TestPipelineConfig, - executor_config: TestExecutorConfig, - pipeline_run_threshold: Option, - max_block: Option, - client: Option, - } - - impl TestConsensusEngineBuilder - where - Client: HeadersClient + BodiesClient + 'static, - { - /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. - fn new(chain_spec: Arc) -> Self { - Self { - chain_spec, - pipeline_config: Default::default(), - executor_config: Default::default(), - pipeline_run_threshold: None, - client: None, - max_block: None, - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - fn with_real_pipeline(mut self) -> Self { - self.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - fn with_real_executor(mut self) -> Self { - self.executor_config = TestExecutorConfig::Real; - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - fn with_client(mut self, client: Client) -> Self { - self.client = Some(client); - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - fn disable_blockchain_tree_sync(mut self) -> Self { - self.pipeline_run_threshold = Some(0); - self - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { - reth_tracing::init_test_tracing(); - let db = create_test_rw_db(); - let consensus = Arc::new(TestConsensus::default()); - let payload_builder = spawn_test_payload_service(); - - // use either noop client or a user provided client (for example TestFullBlockClient) - let client = Arc::new( - self.client - .map(EitherDownloader::Left) - .unwrap_or_else(|| EitherDownloader::Right(NoopFullBlockClient::default())), - ); - - // use either test executor or real executor - let executor_factory = match self.executor_config { - TestExecutorConfig::Test(results) => { - let executor_factory = TestExecutorFactory::new(self.chain_spec.clone()); - executor_factory.extend(results); - EitherExecutorFactory::Left(executor_factory) - } - TestExecutorConfig::Real => { - EitherExecutorFactory::Right(Factory::new(self.chain_spec.clone())) - } - }; - - // Setup pipeline - let (tip_tx, tip_rx) = watch::channel(H256::default()); - let mut pipeline = match self.pipeline_config { - TestPipelineConfig::Test(outputs) => Pipeline::builder() - .add_stages(TestStages::new(outputs, Default::default())) - .with_tip_sender(tip_tx), - TestPipelineConfig::Real => { - let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone()) - .into_task(); - - let body_downloader = BodiesDownloaderBuilder::default() - .build(client.clone(), consensus.clone(), db.clone()) - .into_task(); - - Pipeline::builder().add_stages(DefaultStages::new( - HeaderSyncMode::Tip(tip_rx.clone()), - Arc::clone(&consensus) as Arc, - header_downloader, - body_downloader, - executor_factory.clone(), - )) - } - }; - - if let Some(max_block) = self.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let pipeline = pipeline.build(db.clone(), self.chain_spec.clone()); - - // Setup blockchain tree - let externals = TreeExternals::new( - db.clone(), - consensus, - executor_factory, - self.chain_spec.clone(), - ); - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let (canon_state_notification_sender, _) = tokio::sync::broadcast::channel(3); - let tree = ShareableBlockchainTree::new( - BlockchainTree::new(externals, canon_state_notification_sender, config) - .expect("failed to create tree"), - ); - let shareable_db = ProviderFactory::new(db.clone(), self.chain_spec.clone()); - let latest = self.chain_spec.genesis_header().seal_slow(); - let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest); - - let pruner = Pruner::new( - db.clone(), - self.chain_spec.clone(), - 5, - 0, - PruneModes::default(), - BatchSizes::default(), - ); - - let (mut engine, handle) = BeaconConsensusEngine::new( - client, - pipeline, - blockchain_provider, - Box::::default(), - Box::::default(), - None, - false, - payload_builder, - None, - self.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), - Some(pruner), - ) - .expect("failed to create consensus engine"); - - if let Some(max_block) = self.max_block { - engine.sync.set_max_block(max_block) - } - - (engine, TestEnv::new(db, tip_rx, handle)) - } - } - - fn spawn_consensus_engine( - engine: TestBeaconConsensusEngine, - ) -> oneshot::Receiver> { - let (tx, rx) = oneshot::channel(); - tokio::spawn(async move { - let result = engine.await; - tx.send(result).expect("failed to forward consensus engine result"); - }); - rx - } + use tokio::sync::oneshot::error::TryRecvError; // Pipeline error is propagated. #[tokio::test] @@ -2128,12 +1735,11 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) + .disable_blockchain_tree_sync() + .with_max_block(1) + .build(); let res = spawn_consensus_engine(consensus_engine); @@ -2160,12 +1766,11 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) + .disable_blockchain_tree_sync() + .with_max_block(1) + .build(); let mut rx = spawn_consensus_engine(consensus_engine); @@ -2223,15 +1828,14 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), - Err(StageError::ChannelClosed), - ])) - .disable_blockchain_tree_sync() - .with_max_block(2) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), + Err(StageError::ChannelClosed), + ])) + .disable_blockchain_tree_sync() + .with_max_block(2) + .build(); let rx = spawn_consensus_engine(consensus_engine); @@ -2259,15 +1863,14 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(max_block), - done: true, - })])) - .with_max_block(max_block) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(max_block), + done: true, + })])) + .with_max_block(max_block) + .disable_blockchain_tree_sync() + .build(); let rx = spawn_consensus_engine(consensus_engine); @@ -2309,13 +1912,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -2341,13 +1943,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2391,14 +1992,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .disable_blockchain_tree_sync() + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2442,14 +2042,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .disable_blockchain_tree_sync() - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .disable_blockchain_tree_sync() + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2481,13 +2080,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let mut block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2535,13 +2133,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([ + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), + ])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2586,13 +2183,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -2622,13 +2218,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); let block1 = random_block(&mut rng, 1, Some(genesis.hash), None, Some(0)); @@ -2684,11 +2279,11 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_real_pipeline() - .with_real_executor() - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_real_pipeline() + .with_real_executor() + .with_real_consensus() + .build(); let genesis = SealedBlock { header: chain_spec.sealed_genesis_header(), ..Default::default() }; @@ -2726,13 +2321,12 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .build(); let genesis = random_block(&mut rng, 0, None, None, Some(0)); @@ -2783,14 +2377,13 @@ mod tests { .build(), ); - let (consensus_engine, env) = - TestConsensusEngineBuilder::::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .with_executor_results(Vec::from([exec_result2])) - .build(); + let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(0), + done: true, + })])) + .with_executor_results(Vec::from([exec_result2])) + .build(); insert_blocks( env.db.as_ref(), diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs new file mode 100644 index 000000000000..46fb5eaeb245 --- /dev/null +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -0,0 +1,509 @@ +use crate::{ + BeaconConsensus, BeaconConsensusEngine, BeaconConsensusEngineError, + BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, + MIN_BLOCKS_FOR_PIPELINE_RUN, +}; +use reth_blockchain_tree::{ + config::BlockchainTreeConfig, externals::TreeExternals, post_state::PostState, BlockchainTree, + ShareableBlockchainTree, +}; +use reth_db::{test_utils::create_test_rw_db, DatabaseEnv}; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_interfaces::{ + consensus::Consensus, + executor::BlockExecutionError, + p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, + sync::NoopSyncStateUpdater, + test_utils::{NoopFullBlockClient, TestConsensus}, +}; +use reth_payload_builder::test_utils::spawn_test_payload_service; +use reth_primitives::{BlockNumber, ChainSpec, PruneModes, H256, U256}; +use reth_provider::{ + providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, ExecutorFactory, + ProviderFactory, StateProvider, +}; +use reth_prune::{BatchSizes, Pruner}; +use reth_revm::Factory; +use reth_rpc_types::engine::{ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; +use reth_stages::{ + sets::DefaultStages, stages::HeaderSyncMode, test_utils::TestStages, ExecOutput, Pipeline, + StageError, +}; +use reth_tasks::TokioTaskExecutor; +use std::{collections::VecDeque, sync::Arc}; +use tokio::sync::{oneshot, watch}; + +type TestBeaconConsensusEngine = BeaconConsensusEngine< + Arc, + BlockchainProvider< + Arc, + ShareableBlockchainTree< + Arc, + Arc, + EitherExecutorFactory, + >, + >, + Arc>, +>; + +pub struct TestEnv { + pub db: DB, + // Keep the tip receiver around, so it's not dropped. + #[allow(dead_code)] + tip_rx: watch::Receiver, + engine_handle: BeaconConsensusEngineHandle, +} + +impl TestEnv { + fn new( + db: DB, + tip_rx: watch::Receiver, + engine_handle: BeaconConsensusEngineHandle, + ) -> Self { + Self { db, tip_rx, engine_handle } + } + + pub async fn send_new_payload( + &self, + payload: ExecutionPayload, + ) -> Result { + self.engine_handle.new_payload(payload).await + } + + /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine + /// is syncing. + pub async fn send_new_payload_retry_on_syncing( + &self, + payload: ExecutionPayload, + ) -> Result { + loop { + let result = self.send_new_payload(payload.clone()).await?; + if !result.is_syncing() { + return Ok(result) + } + } + } + + pub async fn send_forkchoice_updated( + &self, + state: ForkchoiceState, + ) -> Result { + self.engine_handle.fork_choice_updated(state, None).await + } + + /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine + /// is syncing. + pub async fn send_forkchoice_retry_on_syncing( + &self, + state: ForkchoiceState, + ) -> Result { + loop { + let result = self.engine_handle.fork_choice_updated(state, None).await?; + if !result.is_syncing() { + return Ok(result) + } + } + } +} + +// TODO: add with_consensus in case we want to use the TestConsensus purposeful failure - this +// would require similar patterns to how we use with_client and the EitherDownloader +/// Represents either a real consensus engine, or a test consensus engine. +#[derive(Default)] +enum TestConsensusConfig { + /// Test consensus engine + #[default] + Test, + /// Real consensus engine + Real, +} + +/// Represents either test pipeline outputs, or real pipeline configuration. +enum TestPipelineConfig { + /// Test pipeline outputs. + Test(VecDeque>), + /// Real pipeline configuration. + Real, +} + +impl Default for TestPipelineConfig { + fn default() -> Self { + Self::Test(VecDeque::new()) + } +} + +/// Represents either test executor results, or real executor configuration. +enum TestExecutorConfig { + /// Test executor results. + Test(Vec), + /// Real executor configuration. + Real, +} + +impl Default for TestExecutorConfig { + fn default() -> Self { + Self::Test(Vec::new()) + } +} + +/// A type that represents one of two possible executor factories. +#[derive(Debug, Clone)] +pub enum EitherExecutorFactory { + /// The first factory variant + Left(A), + /// The second factory variant + Right(B), +} + +// A type that represents one of two possible BlockExecutor types. +#[derive(Debug)] +pub enum EitherBlockExecutor { + /// The first executor variant + Left(A), + /// The second executor variant + Right(B), +} + +impl BlockExecutor for EitherBlockExecutor +where + A: BlockExecutor, + B: BlockExecutor, + SP: StateProvider, +{ + fn execute( + &mut self, + block: &reth_primitives::Block, + total_difficulty: U256, + senders: Option>, + ) -> Result { + match self { + EitherBlockExecutor::Left(a) => a.execute(block, total_difficulty, senders), + EitherBlockExecutor::Right(b) => b.execute(block, total_difficulty, senders), + } + } + + fn execute_and_verify_receipt( + &mut self, + block: &reth_primitives::Block, + total_difficulty: U256, + senders: Option>, + ) -> Result { + match self { + EitherBlockExecutor::Left(a) => { + a.execute_and_verify_receipt(block, total_difficulty, senders) + } + EitherBlockExecutor::Right(b) => { + b.execute_and_verify_receipt(block, total_difficulty, senders) + } + } + } +} + +impl ExecutorFactory for EitherExecutorFactory +where + A: ExecutorFactory, + B: ExecutorFactory, +{ + type Executor = EitherBlockExecutor, B::Executor>; + + fn chain_spec(&self) -> &ChainSpec { + match self { + EitherExecutorFactory::Left(a) => a.chain_spec(), + EitherExecutorFactory::Right(b) => b.chain_spec(), + } + } + + fn with_sp(&self, sp: SP) -> Self::Executor { + match self { + EitherExecutorFactory::Left(a) => EitherBlockExecutor::Left(a.with_sp(sp)), + EitherExecutorFactory::Right(b) => EitherBlockExecutor::Right(b.with_sp(sp)), + } + } +} + +/// The basic configuration for a `TestConsensusEngine`, without generics for the client or +/// consensus engine. +pub struct TestConsensusEngineBuilder { + chain_spec: Arc, + pipeline_config: TestPipelineConfig, + executor_config: TestExecutorConfig, + pipeline_run_threshold: Option, + max_block: Option, + consensus: TestConsensusConfig, +} + +impl TestConsensusEngineBuilder { + /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. + pub fn new(chain_spec: Arc) -> Self { + Self { + chain_spec, + pipeline_config: Default::default(), + executor_config: Default::default(), + pipeline_run_threshold: None, + max_block: None, + consensus: Default::default(), + } + } + + /// Set the pipeline execution outputs to use for the test consensus engine. + pub fn with_pipeline_exec_outputs( + mut self, + pipeline_exec_outputs: VecDeque>, + ) -> Self { + self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); + self + } + + /// Set the executor results to use for the test consensus engine. + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + self.executor_config = TestExecutorConfig::Test(executor_results); + self + } + + /// Sets the max block for the pipeline to run. + pub fn with_max_block(mut self, max_block: BlockNumber) -> Self { + self.max_block = Some(max_block); + self + } + + /// Uses the real pipeline instead of a pipeline with empty exec outputs. + pub fn with_real_pipeline(mut self) -> Self { + self.pipeline_config = TestPipelineConfig::Real; + self + } + + /// Uses the real executor instead of a executor with empty results. + pub fn with_real_executor(mut self) -> Self { + self.executor_config = TestExecutorConfig::Real; + self + } + + /// Uses a real consensus engine instead of a test consensus engine. + pub fn with_real_consensus(mut self) -> Self { + self.consensus = TestConsensusConfig::Real; + self + } + + /// Disables blockchain tree driven sync. This is the same as setting the pipeline run + /// threshold to 0. + pub fn disable_blockchain_tree_sync(mut self) -> Self { + self.pipeline_run_threshold = Some(0); + self + } + + /// Sets the client to use for network operations. + #[allow(dead_code)] + pub fn with_client(self, client: Client) -> NetworkedTestConsensusEngineBuilder + where + Client: HeadersClient + BodiesClient + 'static, + { + NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } + } + + /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. + pub fn build( + self, + ) -> (TestBeaconConsensusEngine, TestEnv>) { + let networked = NetworkedTestConsensusEngineBuilder { base_config: self, client: None }; + + networked.build() + } +} + +/// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and +/// mocked executor results. +/// +/// This optionally includes a client for network operations. +pub struct NetworkedTestConsensusEngineBuilder { + base_config: TestConsensusEngineBuilder, + client: Option, +} + +impl NetworkedTestConsensusEngineBuilder +where + Client: HeadersClient + BodiesClient + 'static, +{ + /// Set the pipeline execution outputs to use for the test consensus engine. + #[allow(dead_code)] + pub fn with_pipeline_exec_outputs( + mut self, + pipeline_exec_outputs: VecDeque>, + ) -> Self { + self.base_config.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); + self + } + + /// Set the executor results to use for the test consensus engine. + #[allow(dead_code)] + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + self.base_config.executor_config = TestExecutorConfig::Test(executor_results); + self + } + + /// Sets the max block for the pipeline to run. + #[allow(dead_code)] + pub fn with_max_block(mut self, max_block: BlockNumber) -> Self { + self.base_config.max_block = Some(max_block); + self + } + + /// Uses the real pipeline instead of a pipeline with empty exec outputs. + #[allow(dead_code)] + pub fn with_real_pipeline(mut self) -> Self { + self.base_config.pipeline_config = TestPipelineConfig::Real; + self + } + + /// Uses the real executor instead of a executor with empty results. + #[allow(dead_code)] + pub fn with_real_executor(mut self) -> Self { + self.base_config.executor_config = TestExecutorConfig::Real; + self + } + + /// Disables blockchain tree driven sync. This is the same as setting the pipeline run + /// threshold to 0. + #[allow(dead_code)] + pub fn disable_blockchain_tree_sync(mut self) -> Self { + self.base_config.pipeline_run_threshold = Some(0); + self + } + + /// Sets the client to use for network operations. + #[allow(dead_code)] + pub fn with_client( + self, + client: ClientType, + ) -> NetworkedTestConsensusEngineBuilder + where + ClientType: HeadersClient + BodiesClient + 'static, + { + NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } + } + + /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. + pub fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { + reth_tracing::init_test_tracing(); + let db = create_test_rw_db(); + + let consensus: Arc = match self.base_config.consensus { + TestConsensusConfig::Real => { + Arc::new(BeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) + } + TestConsensusConfig::Test => Arc::new(TestConsensus::default()), + }; + let payload_builder = spawn_test_payload_service(); + + // use either noop client or a user provided client (for example TestFullBlockClient) + let client = Arc::new( + self.client + .map(EitherDownloader::Left) + .unwrap_or_else(|| EitherDownloader::Right(NoopFullBlockClient::default())), + ); + + // use either test executor or real executor + let executor_factory = match self.base_config.executor_config { + TestExecutorConfig::Test(results) => { + let executor_factory = + TestExecutorFactory::new(self.base_config.chain_spec.clone()); + executor_factory.extend(results); + EitherExecutorFactory::Left(executor_factory) + } + TestExecutorConfig::Real => { + EitherExecutorFactory::Right(Factory::new(self.base_config.chain_spec.clone())) + } + }; + + // Setup pipeline + let (tip_tx, tip_rx) = watch::channel(H256::default()); + let mut pipeline = match self.base_config.pipeline_config { + TestPipelineConfig::Test(outputs) => Pipeline::builder() + .add_stages(TestStages::new(outputs, Default::default())) + .with_tip_sender(tip_tx), + TestPipelineConfig::Real => { + let header_downloader = ReverseHeadersDownloaderBuilder::default() + .build(client.clone(), consensus.clone()) + .into_task(); + + let body_downloader = BodiesDownloaderBuilder::default() + .build(client.clone(), consensus.clone(), db.clone()) + .into_task(); + + Pipeline::builder().add_stages(DefaultStages::new( + HeaderSyncMode::Tip(tip_rx.clone()), + Arc::clone(&consensus) as Arc, + header_downloader, + body_downloader, + executor_factory.clone(), + )) + } + }; + + if let Some(max_block) = self.base_config.max_block { + pipeline = pipeline.with_max_block(max_block); + } + + let pipeline = pipeline.build(db.clone(), self.base_config.chain_spec.clone()); + + // Setup blockchain tree + let externals = TreeExternals::new( + db.clone(), + consensus, + executor_factory, + self.base_config.chain_spec.clone(), + ); + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let (canon_state_notification_sender, _) = tokio::sync::broadcast::channel(3); + let tree = ShareableBlockchainTree::new( + BlockchainTree::new(externals, canon_state_notification_sender, config) + .expect("failed to create tree"), + ); + let shareable_db = ProviderFactory::new(db.clone(), self.base_config.chain_spec.clone()); + let latest = self.base_config.chain_spec.genesis_header().seal_slow(); + let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest); + + let pruner = Pruner::new( + db.clone(), + self.base_config.chain_spec.clone(), + 5, + 0, + PruneModes::default(), + BatchSizes::default(), + ); + + let (mut engine, handle) = BeaconConsensusEngine::new( + client, + pipeline, + blockchain_provider, + Box::::default(), + Box::::default(), + None, + false, + payload_builder, + None, + self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), + Some(pruner), + ) + .expect("failed to create consensus engine"); + + if let Some(max_block) = self.base_config.max_block { + engine.sync.set_max_block(max_block) + } + + (engine, TestEnv::new(db, tip_rx, handle)) + } +} + +pub fn spawn_consensus_engine( + engine: TestBeaconConsensusEngine, +) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + tokio::spawn(async move { + let result = engine.await; + tx.send(result).expect("failed to forward consensus engine result"); + }); + rx +} From dd6dbfdb776d14007ff27c38fb15f729b162a7c9 Mon Sep 17 00:00:00 2001 From: Aditya Pandey Date: Mon, 24 Jul 2023 23:43:24 +0530 Subject: [PATCH 233/722] feat: add eip 4844 blob tx type (#3807) Co-authored-by: Matthias Seitz --- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 81 ++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 153e2972cd4b..35df6932c1e7 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -88,7 +88,7 @@ pub use transaction::{ AccessList, AccessListItem, AccessListWithGasUsed, FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - TxEip1559, TxEip2930, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, + TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a99ae609fe4a..20cd6374d783 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -215,6 +215,87 @@ impl TxEip1559 { } } +/// A transaction with blob hashes and max blob fee +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct TxEip4844 { + /// Added as EIP-pub 155: Simple replay attack protection + pub chain_id: u64, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + pub nonce: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + pub gas_limit: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub max_fee_per_gas: u128, + /// Max Priority fee that transaction is paying + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub max_priority_fee_per_gas: u128, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + pub to: TransactionKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub value: u128, + /// The accessList specifies a list of addresses and storage keys; + /// these addresses and storage keys are added into the `accessed_addresses` + /// and `accessed_storage_keys` global sets (introduced in EIP-2929). + /// A gas cost is charged, though at a discount relative to the cost of + /// accessing outside the list. + pub access_list: AccessList, + + /// It contains a vector of fixed size hash(32 bytes) + pub blob_hashes: Vec, + + /// Max fee per data gas + pub max_fee_per_blob: u128, + + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + pub input: Bytes, +} + +impl TxEip4844 { + /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_limit + mem::size_of::() + // max_fee_per_gas + mem::size_of::() + // max_priority_fee_per_gas + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() + // input + self.blob_hashes.capacity() * mem::size_of::() + // blob hashes size + mem::size_of::() // blob fee cap + } +} + /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). From 019ead37f2910377afc8f33387b2e234634eb9c3 Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Mon, 24 Jul 2023 14:25:47 -0400 Subject: [PATCH 234/722] feat: sidechain length metric (#3864) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/blockchain_tree.rs | 5 +++++ crates/blockchain-tree/src/metrics.rs | 2 ++ 2 files changed, 7 insertions(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 6c5f2fe3d626..ce92f48b2839 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1102,6 +1102,11 @@ impl BlockchainTree pub(crate) fn update_chains_metrics(&mut self) { let height = self.canonical_chain().tip().number; + let longest_sidechain_height = self.chains.values().map(|chain| chain.tip().number).max(); + if let Some(longest_sidechain_height) = longest_sidechain_height { + self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64); + } + self.metrics.sidechains.set(self.chains.len() as f64); self.metrics.canonical_chain_height.set(height as f64); if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index 864fac9474c5..fd48307d4be2 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -13,6 +13,8 @@ pub struct TreeMetrics { pub canonical_chain_height: Gauge, /// The number of reorgs pub reorgs: Counter, + /// Longest sidechain height + pub longest_sidechain_height: Gauge, } /// Metrics for the blockchain tree block buffer From 9b9ae82b2f0db02f2d66701080d6240834ce1b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Altu=C4=9F=20Bakan?= Date: Mon, 24 Jul 2023 21:13:37 +0200 Subject: [PATCH 235/722] feat: report different request errors (#3857) --- crates/net/network/src/transactions.rs | 31 +++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 51d023d072cb..8c1db9a03bd5 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -12,7 +12,10 @@ use reth_eth_wire::{ EthVersion, GetPooledTransactions, NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, Transactions, }; -use reth_interfaces::{p2p::error::RequestResult, sync::SyncStateProvider}; +use reth_interfaces::{ + p2p::error::{RequestError, RequestResult}, + sync::SyncStateProvider, +}; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; use reth_primitives::{ @@ -472,10 +475,22 @@ where } } - fn report_bad_message(&self, peer_id: PeerId) { - trace!(target: "net::tx", ?peer_id, "Penalizing peer for bad transaction"); + fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { + trace!(target: "net::tx", ?peer_id, ?kind); + self.network.reputation_change(peer_id, kind); self.metrics.reported_bad_transactions.increment(1); - self.network.reputation_change(peer_id, ReputationChangeKind::BadTransactions); + } + + fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { + let kind = match req_err { + RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, + RequestError::Timeout => ReputationChangeKind::Timeout, + RequestError::ChannelClosed | RequestError::ConnectionDropped => { + ReputationChangeKind::Dropped + } + RequestError::BadResponse => ReputationChangeKind::BadTransactions, + }; + self.report_peer(peer_id, kind); } fn report_already_seen(&self, peer_id: PeerId) { @@ -492,7 +507,7 @@ where fn on_bad_import(&mut self, hash: TxHash) { if let Some(peers) = self.transactions_by_peers.remove(&hash) { for peer_id in peers { - self.report_bad_message(peer_id); + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); } } } @@ -537,11 +552,11 @@ where Poll::Ready(Ok(Ok(txs))) => { this.import_transactions(req.peer_id, txs.0, TransactionSource::Response); } - Poll::Ready(Ok(Err(_))) => { - this.report_bad_message(req.peer_id); + Poll::Ready(Ok(Err(req_err))) => { + this.on_request_error(req.peer_id, req_err); } Poll::Ready(Err(_)) => { - this.report_bad_message(req.peer_id); + this.on_request_error(req.peer_id, RequestError::ConnectionDropped) } } } From 993b84408bc2b271fd428c5a6d2677a969564b3d Mon Sep 17 00:00:00 2001 From: Chris Evanko <106608356+cjeva10@users.noreply.github.com> Date: Mon, 24 Jul 2023 17:42:59 -0400 Subject: [PATCH 236/722] feat: add canonicalization latency metric (#3865) --- crates/consensus/beacon/src/engine/metrics.rs | 10 ++++++- crates/consensus/beacon/src/engine/mod.rs | 30 +++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs index 04080e93be55..66a95dc578ad 100644 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ b/crates/consensus/beacon/src/engine/metrics.rs @@ -1,5 +1,5 @@ use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{self, Counter, Gauge, Histogram}, Metrics, }; @@ -15,6 +15,14 @@ pub(crate) struct EngineMetrics { pub(crate) new_payload_messages: Counter, /// The number of times the pruner was run. pub(crate) pruner_runs: Counter, + /// Latency for making canonical already canonical block + pub(crate) make_canonical_already_canonical_latency: Histogram, + /// Latency for making canonical committed block + pub(crate) make_canonical_committed_latency: Histogram, + /// Latency for making canonical returns error + pub(crate) make_canonical_error_latency: Histogram, + /// Latency for all making canonical results + pub(crate) make_canonical_latency: Histogram, } /// Metrics for the `EngineSyncController`. diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 35ecd356a208..7c74bc395751 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -12,7 +12,7 @@ use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ error::{InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockchainTreeEngine, InsertPayloadOk, + BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, consensus::ForkchoiceState, executor::{BlockExecutionError, BlockValidationError}, @@ -39,6 +39,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Instant, }; use tokio::sync::{ mpsc, @@ -628,7 +629,10 @@ where return Ok(OnForkChoiceUpdated::syncing()) } - let status = match self.blockchain.make_canonical(&state.head_block_hash) { + let start = Instant::now(); + let make_canonical_result = self.blockchain.make_canonical(&state.head_block_hash); + self.record_make_canonical_latency(start, &make_canonical_result); + let status = match make_canonical_result { Ok(outcome) => { if !outcome.is_already_canonical() { debug!(target: "consensus::engine", hash=?state.head_block_hash, number=outcome.header().number, "canonicalized new head"); @@ -684,6 +688,28 @@ where Ok(OnForkChoiceUpdated::valid(status)) } + /// Record latency metrics for one call to make a block canonical + /// Takes start time of the call and result of the make canonical call + /// + /// Handles cases for error, already canonical and commmitted blocks + fn record_make_canonical_latency( + &self, + start: Instant, + outcome: &Result, + ) { + let elapsed = start.elapsed(); + self.metrics.make_canonical_latency.record(elapsed); + match outcome { + Ok(CanonicalOutcome::AlreadyCanonical { .. }) => { + self.metrics.make_canonical_already_canonical_latency.record(elapsed) + } + Ok(CanonicalOutcome::Committed { .. }) => { + self.metrics.make_canonical_committed_latency.record(elapsed) + } + Err(_) => self.metrics.make_canonical_error_latency.record(elapsed), + } + } + /// Ensures that the given forkchoice state is consistent, assuming the head block has been /// made canonical. This takes a status as input, and will only perform consistency checks if /// the input status is VALID. From ea11787d7d58d90a48e62c43829eb2b7f9603caa Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 25 Jul 2023 06:24:35 +0300 Subject: [PATCH 237/722] chore(trie): clean up in-mem root (#3894) --- crates/trie/src/hashed_cursor/post_state.rs | 53 ++++++--------------- 1 file changed, 14 insertions(+), 39 deletions(-) diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index c5418bc69245..1afff48c5927 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -261,12 +261,7 @@ where // Take the next account from the post state with the key greater than or equal to the // sought key. let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while let Some((k, _)) = post_state_entry { - if k >= &key { - // Found the next entry that is equal or greater than the key. - break - } - + while post_state_entry.map(|(k, _)| k < &key).unwrap_or_default() { self.post_state_account_index += 1; post_state_entry = self.post_state.accounts.get(self.post_state_account_index); } @@ -322,15 +317,9 @@ where db_entry = self.cursor.next()?; } - // Take the next account from the post state with the key greater than or equal to the - // sought key. + // Take the next account from the post state with the key greater than the last sought key. let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while let Some((k, _)) = post_state_entry { - if k > last_account { - // Found the next entry in the post state. - break - } - + while post_state_entry.map(|(k, _)| k <= last_account).unwrap_or_default() { self.post_state_account_index += 1; post_state_entry = self.post_state.accounts.get(self.post_state_account_index); } @@ -389,7 +378,6 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Given the next post state and database entries, return the smallest of the two. /// If the storage keys are the same, the post state entry is given precedence. fn next_slot( - &self, post_state_item: Option<&(H256, U256)>, db_item: Option, ) -> Option { @@ -451,16 +439,11 @@ where // Attempt to find the account's storage in post state. let mut post_state_entry = None; if let Some(storage) = self.post_state.storages.get(&account) { - debug_assert!(storage.sorted, "`HashStorage` must be pre-sorted"); + debug_assert!(storage.sorted, "`HashedStorage` must be pre-sorted"); post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); - while let Some((slot, _)) = post_state_entry { - if slot >= &subkey { - // Found the next entry that is equal or greater than the key. - break - } - + while post_state_entry.map(|(slot, _)| slot < &subkey).unwrap_or_default() { self.post_state_storage_index += 1; post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); @@ -494,7 +477,7 @@ where }; // Compare two entries and return the lowest. - let result = self.next_slot(post_state_entry, db_entry); + let result = Self::next_slot(post_state_entry, db_entry); self.last_slot = result.as_ref().map(|entry| entry.key); Ok(result) } @@ -509,7 +492,7 @@ where let account = self.account.expect("`seek` must be called first"); let last_slot = match self.last_slot.as_ref() { - Some(account) => account, + Some(slot) => slot, None => return Ok(None), // no previous entry was found }; @@ -519,14 +502,12 @@ where // If post state was given precedence, move the cursor forward. let mut db_entry = self.cursor.seek_by_key_subkey(account, *last_slot)?; - // If the entry was already returned, move to the next. - if db_entry.as_ref().map(|entry| &entry.key == last_slot).unwrap_or_default() { - db_entry = self.cursor.next_dup_val()?; - } - + // If the entry was already returned or is zero-values, move to the next. while db_entry .as_ref() - .map(|entry| self.is_slot_zero_valued(&account, &entry.key)) + .map(|entry| { + &entry.key == last_slot || self.is_slot_zero_valued(&account, &entry.key) + }) .unwrap_or_default() { db_entry = self.cursor.next_dup_val()?; @@ -538,16 +519,10 @@ where // Attempt to find the account's storage in post state. let mut post_state_entry = None; if let Some(storage) = self.post_state.storages.get(&account) { - debug_assert!(storage.sorted, "`HashStorage` must be pre-sorted"); + debug_assert!(storage.sorted, "`HashedStorage` must be pre-sorted"); post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); - - while let Some((k, _)) = post_state_entry { - if k > last_slot { - // Found the next entry. - break - } - + while post_state_entry.map(|(slot, _)| slot <= last_slot).unwrap_or_default() { self.post_state_storage_index += 1; post_state_entry = storage.non_zero_valued_storage.get(self.post_state_storage_index); @@ -555,7 +530,7 @@ where } // Compare two entries and return the lowest. - let result = self.next_slot(post_state_entry, db_entry); + let result = Self::next_slot(post_state_entry, db_entry); self.last_slot = result.as_ref().map(|entry| entry.key); Ok(result) } From 34fc89bd1fcbc28f5236dc751ef1bc1bb0ac011c Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:21:13 +0200 Subject: [PATCH 238/722] feat(docs): add primer on all transaction types (#3897) --- book/SUMMARY.md | 1 + book/run/run-a-node.md | 3 +- book/run/transactions.md | 38 ++++++++++++++++++++++++ crates/primitives/src/transaction/mod.rs | 27 +++++++++++++++-- 4 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 book/run/transactions.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 21a98cbaf6e6..ffdb45c0cfdc 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -11,6 +11,7 @@ 1. [Mainnet or official testnets](./run/mainnet.md) 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) + 1. [Transaction types](./run/transactions.md) 1. [Troubleshooting](./run/troubleshooting.md) 1. [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) 1. [eth](./jsonrpc/eth.md) diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 164d76945e44..570b28d80236 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -6,6 +6,7 @@ In this chapter we'll go through a few different topics you'll encounter when ru 1. [Running on mainnet or official testnets](./mainnet.md) 1. [Logs and Observability](./observability.md) 1. [Configuring reth.toml](./config.md) +1. [Transaction types](./transactions.md) 1. [Troubleshooting](./troubleshooting.md) -In the future, we also intend to support the [OP Stack](https://stack.optimism.io/docs/understand/explainer/), which will allow you to run Reth as a Layer 2 client. More there soon! \ No newline at end of file +In the future, we also intend to support the [OP Stack](https://stack.optimism.io/docs/understand/explainer/), which will allow you to run Reth as a Layer 2 client. More there soon! diff --git a/book/run/transactions.md b/book/run/transactions.md new file mode 100644 index 000000000000..1499c064ffc7 --- /dev/null +++ b/book/run/transactions.md @@ -0,0 +1,38 @@ +# Transaction types + +Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Three significant transaction types that have evolved are: + +- Legacy Transactions, +- EIP-2930 Transactions, +- EIP-1559 Transactions. + +Each of these transaction types brings unique features and improvements to the Ethereum network. + +## Legacy Transactions + +Legacy Transactions (type `0x0`), the traditional Ethereum transactions in use since the network's inception, include the following parameters: +- `nonce`, +- `gasPrice`, +- `gasLimit`, +- `to`, +- `value`, +- `data`, +- `v`, +- `r`, +- `s`. + +These transactions do not utilize access lists, which specify the addresses and storage keys to be accessed, nor do they incorporate EIP-1559 fee market changes. + +## EIP-2930 Transactions + +Introduced in [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930), transactions with type `0x1` incorporate an `accessList` parameter alongside legacy parameters. This `accessList` specifies an array of addresses and storage keys that the transaction plans to access, enabling gas savings on cross-contract calls by pre-declaring the accessed contract and storage slots. They do not include EIP-1559 fee market changes. + +## EIP-1559 Transactions + +[EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) transactions (type `0x2`) were introduced in Ethereum's London fork to address network congestion and transaction fee overpricing caused by the historical fee market. Unlike traditional transactions, EIP-1559 transactions don't specify a gas price (`gasPrice`). Instead, they use an in-protocol, dynamically changing base fee per gas, adjusted at each block to manage network congestion. + +Alongside the `accessList` parameter and legacy parameters (except `gasPrice`), EIP-1559 transactions include: +- `maxPriorityFeePerGas`, specifying the maximum fee above the base fee the sender is willing to pay, +- `maxFeePerGas`, setting the maximum total fee the sender is willing to pay. + +The base fee is burned, while the priority fee is paid to the miner who includes the transaction, incentivizing miners to include transactions with higher priority fees per gas. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 20cd6374d783..5a22b60f9bca 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -302,11 +302,32 @@ impl TxEip4844 { #[derive_arbitrary(compact)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum Transaction { - /// Legacy transaction. + /// Legacy transaction (type `0x0`). + /// + /// Traditional Ethereum transactions, containing parameters `nonce`, `gasPrice`, `gasLimit`, + /// `to`, `value`, `data`, `v`, `r`, and `s`. + /// + /// These transactions do not utilize access lists nor do they incorporate EIP-1559 fee market + /// changes. Legacy(TxLegacy), - /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). + /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)), type `0x1`. + /// + /// The `accessList` specifies an array of addresses and storage keys that the transaction + /// plans to access, enabling gas savings on cross-contract calls by pre-declaring the accessed + /// contract and storage slots. Eip2930(TxEip2930), - /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). + /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)), type `0x2`. + /// + /// Unlike traditional transactions, EIP-1559 transactions use an in-protocol, dynamically + /// changing base fee per gas, adjusted at each block to manage network congestion. + /// + /// - `maxPriorityFeePerGas`, specifying the maximum fee above the base fee the sender is + /// willing to pay + /// - `maxFeePerGas`, setting the maximum total fee the sender is willing to pay. + /// + /// The base fee is burned, while the priority fee is paid to the miner who includes the + /// transaction, incentivizing miners to include transactions with higher priority fees per + /// gas. Eip1559(TxEip1559), } From 1b31a55d62ac9641e06d3ca4d57dd20a461201f1 Mon Sep 17 00:00:00 2001 From: pistomat Date: Tue, 25 Jul 2023 13:33:23 +0200 Subject: [PATCH 239/722] feat: add a `--dev` option (#3866) --- bin/reth/src/args/dev_args.rs | 99 ++++++++++++++++++++++++++ bin/reth/src/args/mod.rs | 4 ++ bin/reth/src/args/network_args.rs | 2 +- bin/reth/src/args/rpc_server_args.rs | 2 +- bin/reth/src/args/utils.rs | 4 +- bin/reth/src/node/mod.rs | 53 +++++++++++--- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/primitives/res/genesis/dev.json | 75 +++++++++++++++++++ crates/primitives/src/chain/mod.rs | 7 +- crates/primitives/src/chain/spec.rs | 52 +++++++++++++- crates/primitives/src/constants.rs | 4 ++ crates/primitives/src/lib.rs | 4 +- 12 files changed, 291 insertions(+), 17 deletions(-) create mode 100644 bin/reth/src/args/dev_args.rs create mode 100644 crates/primitives/res/genesis/dev.json diff --git a/bin/reth/src/args/dev_args.rs b/bin/reth/src/args/dev_args.rs new file mode 100644 index 000000000000..5cc02522d1fa --- /dev/null +++ b/bin/reth/src/args/dev_args.rs @@ -0,0 +1,99 @@ +//! clap [Args](clap::Args) for Dev testnet configuration +use std::time::Duration; + +use clap::Args; +use humantime::parse_duration; + +/// Parameters for Dev testnet configuration +#[derive(Debug, Args, PartialEq, Default, Clone, Copy)] +#[command(next_help_heading = "Dev testnet")] +pub struct DevArgs { + /// Start the node in dev mode + /// + /// This mode uses a local proof-of-authority consensus engine with either fixed block times + /// or automatically mined blocks. + /// Disables network discovery and enables local http server. + /// Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test + /// test junk" with 10 000 ETH each. + #[arg(long = "dev", alias = "auto-mine", help_heading = "Dev testnet", verbatim_doc_comment)] + pub dev: bool, + + /// How many transactions to mine per block. + #[arg( + long = "dev.block_max_transactions", + help_heading = "Dev testnet", + conflicts_with = "block_time" + )] + pub block_max_transactions: Option, + + /// Interval between blocks. + /// + /// Parses strings using [humantime::parse_duration] + /// --dev.block_time 12s + #[arg( + long = "dev.block_time", + help_heading = "Dev testnet", + conflicts_with = "block_max_transactions", + value_parser = parse_duration, + verbatim_doc_comment + )] + pub block_time: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + /// A helper type to parse Args more easily + #[derive(Parser)] + struct CommandParser { + #[clap(flatten)] + args: T, + } + + #[test] + fn test_parse_dev_args() { + let args = CommandParser::::parse_from(["reth"]).args; + assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None }); + + let args = CommandParser::::parse_from(["reth", "--dev"]).args; + assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + + let args = CommandParser::::parse_from(["reth", "--auto-mine"]).args; + assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + + let args = CommandParser::::parse_from([ + "reth", + "--dev", + "--dev.block_max_transactions", + "2", + ]) + .args; + assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); + + let args = + CommandParser::::parse_from(["reth", "--dev", "--dev.block_time", "1s"]).args; + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: Some(std::time::Duration::from_secs(1)) + } + ); + } + + #[test] + fn test_parse_dev_args_conflicts() { + let args = CommandParser::::try_parse_from([ + "reth", + "--dev", + "--dev.block_max_transactions", + "2", + "--dev.block_time", + "1s", + ]); + assert!(args.is_err()); + } +} diff --git a/bin/reth/src/args/mod.rs b/bin/reth/src/args/mod.rs index 05d691e8a371..dd4dd83d0b69 100644 --- a/bin/reth/src/args/mod.rs +++ b/bin/reth/src/args/mod.rs @@ -35,4 +35,8 @@ pub use gas_price_oracle_args::GasPriceOracleArgs; mod txpool_args; pub use txpool_args::TxPoolArgs; +/// DevArgs for configuring the dev testnet +mod dev_args; +pub use dev_args::DevArgs; + pub mod utils; diff --git a/bin/reth/src/args/network_args.rs b/bin/reth/src/args/network_args.rs index 8027c3022ce0..c945fdaf32f6 100644 --- a/bin/reth/src/args/network_args.rs +++ b/bin/reth/src/args/network_args.rs @@ -110,7 +110,7 @@ impl NetworkArgs { #[derive(Debug, Args)] pub struct DiscoveryArgs { /// Disable the discovery service. - #[arg(short, long)] + #[arg(short, long, default_value_if("dev", "true", "true"))] pub disable_discovery: bool, /// Disable the DNS discovery. diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index a38cd6994450..baf3f94f9f65 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -56,7 +56,7 @@ pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; #[command(next_help_heading = "RPC")] pub struct RpcServerArgs { /// Enable the HTTP-RPC server - #[arg(long)] + #[arg(long, default_value_if("dev", "true", "true"))] pub http: bool, /// Http server address to listen on diff --git a/bin/reth/src/args/utils.rs b/bin/reth/src/args/utils.rs index 4a11339450ed..c9fe80685d5d 100644 --- a/bin/reth/src/args/utils.rs +++ b/bin/reth/src/args/utils.rs @@ -1,7 +1,7 @@ //! Clap parser utilities use reth_primitives::{ - fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, GOERLI, MAINNET, SEPOLIA, + fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, DEV, GOERLI, MAINNET, SEPOLIA, }; use reth_revm::primitives::B256 as H256; use std::{ @@ -25,6 +25,7 @@ pub fn chain_spec_value_parser(s: &str) -> eyre::Result, eyre::Er "mainnet" => MAINNET.clone(), "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), + "dev" => DEV.clone(), _ => { let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; serde_json::from_str(&raw)? @@ -39,6 +40,7 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error "mainnet" => MAINNET.clone(), "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), + "dev" => DEV.clone(), _ => { let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; let genesis: AllGenesisFormats = serde_json::from_str(&raw)?; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 6a1da252f0b5..98fa3b113064 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -2,7 +2,7 @@ //! //! Starts the client use crate::{ - args::{get_secret_key, DebugArgs, NetworkArgs, RpcServerArgs, TxPoolArgs}, + args::{get_secret_key, DebugArgs, DevArgs, NetworkArgs, RpcServerArgs, TxPoolArgs}, dirs::DataDirPath, init::init_genesis, prometheus_exporter, @@ -14,7 +14,7 @@ use clap::Parser; use eyre::Context; use fdlimit::raise_fd_limit; use futures::{future::Either, pin_mut, stream, stream_select, StreamExt}; -use reth_auto_seal_consensus::{AutoSealBuilder, AutoSealConsensus}; +use reth_auto_seal_consensus::{AutoSealBuilder, AutoSealConsensus, MiningMode}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::{BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN}; use reth_blockchain_tree::{ @@ -112,12 +112,15 @@ pub struct Command { /// - mainnet /// - goerli /// - sepolia + /// - dev #[arg( long, value_name = "CHAIN_OR_PATH", verbatim_doc_comment, default_value = "mainnet", - value_parser = genesis_value_parser + default_value_if("dev", "true", "dev"), + value_parser = genesis_value_parser, + required = false, )] chain: Arc, @@ -145,9 +148,8 @@ pub struct Command { #[clap(flatten)] db: DatabaseArgs, - /// Automatically mine blocks for new transactions - #[arg(long)] - auto_mine: bool, + #[clap(flatten)] + dev: DevArgs, } impl Command { @@ -181,7 +183,7 @@ impl Command { info!(target: "reth::cli", "{}", DisplayHardforks::from(self.chain.hardforks().clone())); - let consensus: Arc = if self.auto_mine { + let consensus: Arc = if self.dev.dev { debug!(target: "reth::cli", "Using auto seal"); Arc::new(AutoSealConsensus::new(Arc::clone(&self.chain))) } else { @@ -304,13 +306,28 @@ impl Command { }; // Configure the pipeline - let (mut pipeline, client) = if self.auto_mine { + let (mut pipeline, client) = if self.dev.dev { + info!(target: "reth::cli", "Starting Reth in dev mode"); + + let mining_mode = if let Some(interval) = self.dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = self.dev.block_max_transactions { + MiningMode::instant( + max_transactions, + transaction_pool.pending_transactions_listener(), + ) + } else { + info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); + MiningMode::instant(1, transaction_pool.pending_transactions_listener()) + }; + let (_, client, mut task) = AutoSealBuilder::new( Arc::clone(&self.chain), blockchain_db.clone(), transaction_pool.clone(), consensus_engine_tx.clone(), canon_state_notification_sender, + mining_mode, ) .build(); @@ -798,6 +815,8 @@ async fn run_network_until_shutdown( #[cfg(test)] mod tests { + use reth_primitives::DEV; + use super::*; use std::{net::IpAddr, path::Path}; @@ -869,4 +888,22 @@ mod tests { let db_path = data_dir.db_path(); assert_eq!(db_path, Path::new("my/custom/path/db")); } + + #[test] + fn parse_dev() { + let cmd = Command::parse_from(["reth", "--dev"]); + let chain = DEV.clone(); + assert_eq!(cmd.chain.chain, chain.chain); + assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); + assert_eq!( + cmd.chain.paris_block_and_final_difficulty, + chain.paris_block_and_final_difficulty + ); + assert_eq!(cmd.chain.hardforks, chain.hardforks); + + assert!(cmd.rpc.http); + assert!(cmd.network.discovery.disable_discovery); + + assert!(cmd.dev.dev); + } } diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 76bf16e28fd3..668a2117f6b9 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -115,13 +115,13 @@ where pool: Pool, to_engine: UnboundedSender, canon_state_notification: CanonStateNotificationSender, + mode: MiningMode, ) -> Self { let latest_header = client .latest_header() .ok() .flatten() .unwrap_or_else(|| chain_spec.sealed_genesis_header()); - let mode = MiningMode::interval(std::time::Duration::from_secs(1)); Self { storage: Storage::new(latest_header), diff --git a/crates/primitives/res/genesis/dev.json b/crates/primitives/res/genesis/dev.json new file mode 100644 index 000000000000..46018b128fb4 --- /dev/null +++ b/crates/primitives/res/genesis/dev.json @@ -0,0 +1,75 @@ +{ + "nonce": "0x0", + "timestamp": "0x6490fdd2", + "extraData": "0x", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "stateRoot": "0x5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494", + "alloc": { + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x976EA74026E726554dB657fA54763abd0C3a0aa9": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xBcd4042DE499D14e55001CcbB24a551F3b954096": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x71bE63f3384f5fb98995898A86B02Fb2426c5788": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xcd3B766CCDd6AE721141F452C550Ca635964ce71": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x2546BcD3c84621e976D8185a91A922aE77ECEc30": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdD2FD4581271e230360230F9337D5c0430Bf44C0": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": { + "balance": "0xD3C21BCECCEDA1000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 425939ce44b6..b46519f32566 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -11,7 +11,7 @@ use std::{fmt, str::FromStr}; mod spec; pub use spec::{ AllGenesisFormats, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkCondition, - ForkTimestamps, GOERLI, MAINNET, SEPOLIA, + ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, }; // The chain info module. @@ -44,6 +44,11 @@ impl Chain { Chain::Named(ethers_core::types::Chain::Sepolia) } + /// Returns the dev chain. + pub const fn dev() -> Self { + Chain::Named(ethers_core::types::Chain::Dev) + } + /// The id of the chain pub fn id(&self) -> u64 { match self { diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 0c84dc275399..356c9409b7e0 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -130,6 +130,43 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { .into() }); +/// Dev testnet specification +/// +/// Includes 20 prefunded accounts with 10_000 ETH each derived from mnemonic "test test test test +/// test test test test test test test junk". +pub static DEV: Lazy> = Lazy::new(|| { + ChainSpec { + chain: Chain::dev(), + genesis: serde_json::from_str(include_str!("../../res/genesis/dev.json")) + .expect("Can't deserialize Dev testnet genesis json"), + genesis_hash: Some(H256(hex!( + "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" + ))), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + fork_timestamps: ForkTimestamps::default().shanghai(0), + hardforks: BTreeMap::from([ + (Hardfork::Frontier, ForkCondition::Block(0)), + (Hardfork::Homestead, ForkCondition::Block(0)), + (Hardfork::Dao, ForkCondition::Block(0)), + (Hardfork::Tangerine, ForkCondition::Block(0)), + (Hardfork::SpuriousDragon, ForkCondition::Block(0)), + (Hardfork::Byzantium, ForkCondition::Block(0)), + (Hardfork::Constantinople, ForkCondition::Block(0)), + (Hardfork::Petersburg, ForkCondition::Block(0)), + (Hardfork::Istanbul, ForkCondition::Block(0)), + (Hardfork::MuirGlacier, ForkCondition::Block(0)), + (Hardfork::Berlin, ForkCondition::Block(0)), + (Hardfork::London, ForkCondition::Block(0)), + ( + Hardfork::Paris, + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, + ), + (Hardfork::Shanghai, ForkCondition::Timestamp(0)), + ]), + } + .into() +}); + /// An Ethereum chain specification. /// /// A chain specification describes: @@ -887,8 +924,8 @@ where mod tests { use crate::{ Address, AllGenesisFormats, Chain, ChainSpec, ChainSpecBuilder, DisplayHardforks, - ForkCondition, ForkHash, ForkId, Genesis, Hardfork, Head, GOERLI, H256, MAINNET, SEPOLIA, - U256, + ForkCondition, ForkHash, ForkId, Genesis, Hardfork, Head, DEV, GOERLI, H256, MAINNET, + SEPOLIA, U256, }; use bytes::BytesMut; use ethers_core::types as EtherType; @@ -1188,6 +1225,17 @@ Post-merge hard forks (timestamp based): ); } + #[test] + fn dev_forkids() { + test_fork_ids( + &DEV, + &[( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0x45, 0xb8, 0x36, 0x12]), next: 0 }, + )], + ) + } + /// Checks that time-based forks work /// /// This is based off of the test vectors here: https://github.com/ethereum/go-ethereum/blob/5c8cc10d1e05c23ff1108022f4150749e73c0ca1/core/forkid/forkid_test.go#L155-L188 diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants.rs index 048a8801c4b3..c5922048276d 100644 --- a/crates/primitives/src/constants.rs +++ b/crates/primitives/src/constants.rs @@ -77,6 +77,10 @@ pub const GOERLI_GENESIS: H256 = pub const SEPOLIA_GENESIS: H256 = H256(hex!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9")); +/// Testnet genesis hash. +pub const DEV_GENESIS: H256 = + H256(hex!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c")); + /// Keccak256 over empty array. pub const KECCAK_EMPTY: H256 = H256(hex!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 35df6932c1e7..64faa1e03b11 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -60,11 +60,11 @@ pub use block::{ pub use bloom::Bloom; pub use chain::{ AllGenesisFormats, Chain, ChainInfo, ChainSpec, ChainSpecBuilder, DisplayHardforks, - ForkCondition, ForkTimestamps, GOERLI, MAINNET, SEPOLIA, + ForkCondition, ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, }; pub use compression::*; pub use constants::{ - EMPTY_OMMER_ROOT, GOERLI_GENESIS, KECCAK_EMPTY, MAINNET_GENESIS, SEPOLIA_GENESIS, + DEV_GENESIS, EMPTY_OMMER_ROOT, GOERLI_GENESIS, KECCAK_EMPTY, MAINNET_GENESIS, SEPOLIA_GENESIS, }; pub use forkid::{ForkFilter, ForkHash, ForkId, ForkTransition, ValidationError}; pub use genesis::{Genesis, GenesisAccount}; From ab9a2e6c2e6f0d8c0f2be50dfd4aa3b0d52089b8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 25 Jul 2023 14:21:02 +0100 Subject: [PATCH 240/722] chore(storage): format MDBX flags doc comments (#3905) --- crates/storage/libmdbx-rs/src/flags.rs | 33 ++++++++++++++------------ 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index ac11b03e8bc8..df9f817de109 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -41,14 +41,16 @@ pub enum SyncMode { /// keeps untouched pages within B-tree of the last transaction "steady" which was synced to /// disk completely. This has big implications for both data durability and (unfortunately) /// performance: + /// - A system crash can't corrupt the database, but you will lose the last transactions; + /// because MDBX will rollback to last steady commit since it kept explicitly. + /// - The last steady transaction makes an effect similar to "long-lived" read transaction + /// since prevents reuse of pages freed by newer write transactions, thus the any data + /// changes will be placed in newly allocated pages. + /// - To avoid rapid database growth, the system will sync data and issue a steady commit-point + /// to resume reuse pages, each time there is insufficient space and before increasing the + /// size of the file on disk. /// - /// A system crash can't corrupt the database, but you will lose the last transactions; because - /// MDBX will rollback to last steady commit since it kept explicitly. The last steady - /// transaction makes an effect similar to "long-lived" read transaction since prevents reuse - /// of pages freed by newer write transactions, thus the any data changes will be placed in - /// newly allocated pages. To avoid rapid database growth, the system will sync data and - /// issue a steady commit-point to resume reuse pages, each time there is insufficient space - /// and before increasing the size of the file on disk. In other words, with + /// In other words, with /// [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the /// cost increasing database size and/or number of disk IOPs. So, [SyncMode::SafeNoSync] /// flag could be used with [Environment::sync()](crate::Environment::sync) as alternatively @@ -81,15 +83,16 @@ pub enum SyncMode { /// expect the corrupted database after a system crash. /// /// So, most important thing about [SyncMode::UtterlyNoSync]: + /// - A system crash immediately after commit the write transaction high likely lead to + /// database corruption. + /// - Successful completion of [Environment::sync(force=true)](crate::Environment::sync) after + /// one or more committed transactions guarantees consistency and durability. + /// - BUT by committing two or more transactions you back database into a weak state, in which + /// a system crash may lead to database corruption! In case single transaction after + /// [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not + /// a whole database. /// - /// A system crash immediately after commit the write transaction high likely lead to database - /// corruption. Successful completion of - /// [Environment::sync(force=true)](crate::Environment::sync) after one or more committed - /// transactions guarantees consistency and durability. BUT by committing two or more - /// transactions you back database into a weak state, in which a system crash may lead to - /// database corruption! In case single transaction after - /// [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not a - /// whole database. Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in + /// Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in /// case of an application crash (but no durability on system failure), and therefore may /// be very useful in scenarios where data durability is not required over a system failure /// (e.g for short-lived data), or if you can take such risk. From dc52650018a2b7f95d15058eefe380b403218663 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 15:49:41 +0200 Subject: [PATCH 241/722] chore: add eip4844 constants (#3906) --- crates/primitives/src/constants/eip4844.rs | 25 +++++++++++++++++++ .../src/{constants.rs => constants/mod.rs} | 3 +++ 2 files changed, 28 insertions(+) create mode 100644 crates/primitives/src/constants/eip4844.rs rename crates/primitives/src/{constants.rs => constants/mod.rs} (98%) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs new file mode 100644 index 000000000000..edeb3eee5d1b --- /dev/null +++ b/crates/primitives/src/constants/eip4844.rs @@ -0,0 +1,25 @@ +//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants for shard Blob Transactions. + +/// Size a single field element in bytes. +pub const FIELD_ELEMENT_BYTES: u64 = 32; + +/// How many field elements are stored in a single data blob. +pub const FIELD_ELEMENTS_PER_BLOB: u64 = 4096; + +/// Gas consumption of a single data blob. +pub const DATA_GAS_PER_BLOB: u64 = 131_072u64; // 32*4096 = 131072 == 2^17 == 0x20000 + +/// Maximum data gas for data blobs in a single block. +pub const MAX_DATA_GAS_PER_BLOCK: u64 = 786_432u64; // 0xC0000 + +/// Target data gas for data blobs in a single block. +pub const TARGET_DATA_GAS_PER_BLOCK: u64 = 393_216u64; // 0x60000 + +/// Maximum number of data blobs in a single block. +pub const MAX_BLOBS_PER_BLOCK: u64 = MAX_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; // 786432 / 131072 = 6 + +/// Target number of data blobs in a single block. +pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; // 393216 / 131072 = 3 + +/// Used to determine the price for next data blob +pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3_338_477u64; // 3338477 diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants/mod.rs similarity index 98% rename from crates/primitives/src/constants.rs rename to crates/primitives/src/constants/mod.rs index c5922048276d..96c55cbbe11b 100644 --- a/crates/primitives/src/constants.rs +++ b/crates/primitives/src/constants/mod.rs @@ -4,6 +4,9 @@ use crate::{H256, U256}; use hex_literal::hex; use std::time::Duration; +/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. +pub mod eip4844; + /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); From f7e887fb35e721c10f908988a476b1beb279836a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 15:49:53 +0200 Subject: [PATCH 242/722] chore: add execution aborted error (#3901) --- crates/rpc/rpc/src/eth/error.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 023e76740610..f52bf7ca1144 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -1,12 +1,16 @@ //! Implementation specific Errors for the `eth_` namespace. use crate::result::{internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code}; -use jsonrpsee::{core::Error as RpcError, types::ErrorObject}; +use jsonrpsee::{ + core::Error as RpcError, + types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}, +}; use reth_primitives::{abi::decode_revert_reason, Address, Bytes, U256}; use reth_revm::tracing::js::JsInspectorError; use reth_rpc_types::{error::EthRpcErrorCode, BlockError}; use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError}; use revm::primitives::{EVMError, ExecutionResult, Halt, OutOfGasError}; +use std::time::Duration; /// Result alias pub type EthResult = Result; @@ -80,6 +84,9 @@ pub enum EthApiError { /// Error thrown when a spawned blocking task failed to deliver an anticipated response. #[error("internal eth error")] InternalEthError, + /// Error thrown when a (tracing) call exceeded the configured timeout. + #[error("execution aborted (timeout = {0:?})")] + ExecutionTimedOut(Duration), /// Internal Error thrown by the javascript tracer #[error("{0}")] InternalJsTracerError(String), @@ -112,6 +119,9 @@ impl From for ErrorObject<'static> { EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), EthApiError::InvalidRewardPercentiles => internal_rpc_err(error.to_string()), + err @ EthApiError::ExecutionTimedOut(_) => { + rpc_error_with_code(CALL_EXECUTION_FAILED_CODE, err.to_string()) + } err @ EthApiError::InternalTracingError => internal_rpc_err(err.to_string()), err @ EthApiError::InternalEthError => internal_rpc_err(err.to_string()), } @@ -532,3 +542,14 @@ pub(crate) fn ensure_success(result: ExecutionResult) -> EthResult { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn timed_out_error() { + let err = EthApiError::ExecutionTimedOut(Duration::from_secs(10)); + assert_eq!(err.to_string(), "execution aborted (timeout = 10s)"); + } +} From 1c4e19bad72e2f31a6f9f661400eaef244aa83e6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 16:35:58 +0200 Subject: [PATCH 243/722] feat: add alias for max response size (#3902) --- bin/reth/src/args/rpc_server_args.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index baf3f94f9f65..d05ce42b431e 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -120,7 +120,7 @@ pub struct RpcServerArgs { pub rpc_max_request_size: u32, /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. - #[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] + #[arg(long, visible_alias = "--rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)] pub rpc_max_response_size: u32, /// Set the the maximum concurrent subscriptions per connection. From 076c91a916f1b584adb19569927163882de7a3fc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 16:51:30 +0200 Subject: [PATCH 244/722] feat: add Tracing call pool (#3908) --- Cargo.lock | 2 + Cargo.toml | 1 + crates/net/downloaders/Cargo.toml | 2 +- crates/rpc/rpc-builder/Cargo.toml | 3 + crates/rpc/rpc-builder/src/lib.rs | 3 + crates/rpc/rpc-builder/src/tracing_pool.rs | 138 +++++++++++++++++++++ crates/stages/Cargo.toml | 2 +- 7 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 crates/rpc/rpc-builder/src/tracing_pool.rs diff --git a/Cargo.lock b/Cargo.lock index 7b674b4eec8b..5df882cc4fb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5740,6 +5740,8 @@ version = "0.1.0-alpha.4" dependencies = [ "hyper", "jsonrpsee", + "pin-project", + "rayon", "reth-beacon-consensus", "reth-interfaces", "reth-ipc", diff --git a/Cargo.toml b/Cargo.toml index 92ed754a35aa..89efb758c8f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ serde_json = "1.0.94" serde = { version = "1.0", default-features = false } rand = "0.8.5" strum = "0.25" +rayon = "1.7" ### proc-macros proc-macro2 = "1.0" diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 07942b2ad0e2..36179fe07747 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -26,7 +26,7 @@ tokio-util = { workspace = true, features = ["codec"] } # misc tracing = { workspace = true } -rayon = "1.6.0" +rayon = { workspace = true } thiserror = { workspace = true } # optional deps for the test-utils feature diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 8d51e178022e..c0078969d381 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -33,6 +33,9 @@ strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } tracing = { workspace = true } +rayon = { workspace = true } +pin-project = { workspace = true } +tokio = { workspace = true, features = ["sync"] } [dev-dependencies] reth-tracing = { path = "../../tracing" } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 8737519fa81b..afa7a37bf2bc 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -154,6 +154,9 @@ mod eth; /// Common RPC constants. pub mod constants; +/// Additional support for tracing related rpc calls +pub mod tracing_pool; + // re-export for convenience pub use crate::eth::{EthConfig, EthHandlers}; pub use jsonrpsee::server::ServerBuilder; diff --git a/crates/rpc/rpc-builder/src/tracing_pool.rs b/crates/rpc/rpc-builder/src/tracing_pool.rs new file mode 100644 index 000000000000..dd3056117801 --- /dev/null +++ b/crates/rpc/rpc-builder/src/tracing_pool.rs @@ -0,0 +1,138 @@ +//! Additional helpers for executing tracing calls + +use std::{ + future::Future, + panic::{catch_unwind, AssertUnwindSafe}, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, + thread, +}; +use tokio::sync::oneshot; + +/// Used to execute tracing calls on a rayon threadpool from within a tokio runtime. +#[derive(Clone, Debug)] +pub struct TracingCallPool { + pool: Arc, +} + +impl TracingCallPool { + /// Create a new `TracingCallPool` with the given threadpool. + pub fn new(pool: rayon::ThreadPool) -> Self { + Self { pool: Arc::new(pool) } + } + + /// Convenience function to start building a new threadpool. + pub fn builder() -> rayon::ThreadPoolBuilder { + rayon::ThreadPoolBuilder::new() + } + + /// Convenience function to build a new threadpool with the default configuration. + /// + /// Uses [`rayon::ThreadPoolBuilder::build`](rayon::ThreadPoolBuilder::build) defaults but + /// increases the stack size to 8MB. + pub fn build() -> Result { + Self::builder() + // increase stack size, mostly for RPC calls that use the evm: and + .stack_size(8 * 1024 * 1024) + .build() + .map(Self::new) + } + + /// Asynchronous wrapper around Rayon's + /// [`ThreadPool::spawn`](rayon::ThreadPool::spawn). + /// + /// Runs a function on the configured threadpool, returning a future that resolves with the + /// function's return value. + /// + /// If the function panics, the future will resolve to an error. + pub fn spawn(&self, func: F) -> TracingCallHandle + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let (tx, rx) = oneshot::channel(); + + self.pool.spawn(move || { + let _result = tx.send(catch_unwind(AssertUnwindSafe(func))); + }); + + TracingCallHandle { rx } + } + + /// Asynchronous wrapper around Rayon's + /// [`ThreadPool::spawn_fifo`](rayon::ThreadPool::spawn_fifo). + /// + /// Runs a function on the configured threadpool, returning a future that resolves with the + /// function's return value. + /// + /// If the function panics, the future will resolve to an error. + pub fn spawn_fifo(&self, func: F) -> TracingCallHandle + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let (tx, rx) = oneshot::channel(); + + self.pool.spawn_fifo(move || { + let _result = tx.send(catch_unwind(AssertUnwindSafe(func))); + }); + + TracingCallHandle { rx } + } +} + +/// Async handle for a blocking tracing task running in a Rayon thread pool. +/// +/// ## Panics +/// +/// If polled from outside a tokio runtime. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +#[pin_project::pin_project] +pub struct TracingCallHandle { + #[pin] + pub(crate) rx: oneshot::Receiver>, +} + +impl Future for TracingCallHandle { + type Output = thread::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(self.project().rx.poll(cx)) { + Ok(res) => Poll::Ready(res), + Err(_) => Poll::Ready(Err(Box::::default())), + } + } +} + +/// An error returned when the Tokio channel is dropped while awaiting a result. +/// +/// This should only happen +#[derive(Debug, Default, thiserror::Error)] +#[error("Tokio channel dropped while awaiting result")] +#[non_exhaustive] +pub struct TokioTracingCallError; + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn tracing_pool() { + let pool = TracingCallPool::build().unwrap(); + let res = pool.spawn(move || 5); + let res = res.await.unwrap(); + assert_eq!(res, 5); + } + + #[tokio::test] + async fn tracing_pool_panic() { + let pool = TracingCallPool::build().unwrap(); + let res = pool.spawn(move || -> i32 { + panic!(); + }); + let res = res.await; + assert!(res.is_err()); + } +} diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index f83dcab22c1b..c8236c3a713c 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -41,7 +41,7 @@ serde = { workspace = true } thiserror = { workspace = true } aquamarine = "0.3.0" itertools = "0.10.5" -rayon = "1.6.0" +rayon = { workspace = true } num-traits = "0.2.15" [dev-dependencies] From af604289bb7f7f6da08926a071cc18d285d32247 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 17:19:30 +0200 Subject: [PATCH 245/722] chore: use ruint 1.9 and remove patch (#3910) --- Cargo.lock | 12 ++++++------ Cargo.toml | 3 --- crates/primitives/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5df882cc4fb3..b2c14b5d4715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6045,24 +6045,24 @@ dependencies = [ [[package]] name = "ruint" -version = "1.8.0" -source = "git+https://github.com/paradigmxyz/uint#38f565ff907ccaf2a2c57d395ed7c2b8905ae1ab" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77e1574d439643c8962edf612a888e7cc5581bcdf36cb64e6bc88466b03b2daa" dependencies = [ "arbitrary", - "derive_more", "primitive-types", "proptest", "rlp", "ruint-macro", - "rustc_version", "serde", "thiserror", ] [[package]] name = "ruint-macro" -version = "1.0.2" -source = "git+https://github.com/paradigmxyz/uint#38f565ff907ccaf2a2c57d395ed7c2b8905ae1ab" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e666a5496a0b2186dbcd0ff6106e29e093c15591bde62c20d3842007c6978a09" [[package]] name = "rustc-demangle" diff --git a/Cargo.toml b/Cargo.toml index 89efb758c8f8..ff1fe7c89eeb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,9 +77,6 @@ codegen-units = 1 incremental = false [patch.crates-io] -# patched for quantity U256 responses -ruint = { git = "https://github.com/paradigmxyz/uint" } - revm = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } revm-primitives = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index aca12eb5468a..10e47b7a3a3f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -19,7 +19,7 @@ revm-primitives = { workspace = true, features = ["serde"] } ethers-core = { workspace = true, default-features = false } tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } -ruint = { version = "1.7.0", features = ["primitive-types", "rlp"] } +ruint = { version = "1.9.0", features = ["primitive-types", "rlp"] } # Bloom fixed-hash = { version = "0.8", default-features = false, features = ["rustc-hex"] } From 4a24ae25555fc2e15d0e4ade4dd15c1fbd43aa09 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 25 Jul 2023 18:33:13 +0200 Subject: [PATCH 246/722] fix(rpc): support both input and data fields (#3911) --- crates/rpc/rpc-types/src/eth/call.rs | 92 ++++++++++++++++++++-- crates/rpc/rpc-types/src/eth/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 5 +- crates/rpc/rpc/src/eth/revm_utils.rs | 4 +- 5 files changed, 95 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/call.rs b/crates/rpc/rpc-types/src/eth/call.rs index 49b5b7613d2a..9aec714c1266 100644 --- a/crates/rpc/rpc-types/src/eth/call.rs +++ b/crates/rpc/rpc-types/src/eth/call.rs @@ -19,11 +19,9 @@ pub struct CallRequest { pub gas: Option, /// Value pub value: Option, - /// Transaction data - /// - /// This accepts both `input` and `data` - #[serde(alias = "input")] - pub data: Option, + /// Transaction input data + #[serde(default, flatten)] + pub input: CallInput, /// Nonce pub nonce: Option, /// chain id @@ -44,6 +42,71 @@ impl CallRequest { } } +/// Helper type that supports both `data` and `input` fields that map to transaction input data. +/// +/// This is done for compatibility reasons where older implementations used `data` instead of the +/// newer, recommended `input` field. +/// +/// If both fields are set, it is expected that they contain the same value, otherwise an error is +/// returned. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct CallInput { + /// Transaction data + pub input: Option, + /// Transaction data + /// + /// This is the same as `input` but is used for backwards compatibility: + pub data: Option, +} + +impl CallInput { + /// Consumes the type and returns the optional input data. + /// + /// Returns an error if both `data` and `input` fields are set and not equal. + pub fn try_into_unique_input(self) -> Result, CallInputError> { + let Self { input, data } = self; + match (input, data) { + (Some(input), Some(data)) if input == data => Ok(Some(input)), + (Some(_), Some(_)) => Err(CallInputError::default()), + (Some(input), None) => Ok(Some(input)), + (None, Some(data)) => Ok(Some(data)), + (None, None) => Ok(None), + } + } + + /// Consumes the type and returns the optional input data. + /// + /// Returns an error if both `data` and `input` fields are set and not equal. + pub fn unique_input(&self) -> Result, CallInputError> { + let Self { input, data } = self; + match (input, data) { + (Some(input), Some(data)) if input == data => Ok(Some(input)), + (Some(_), Some(_)) => Err(CallInputError::default()), + (Some(input), None) => Ok(Some(input)), + (None, Some(data)) => Ok(Some(data)), + (None, None) => Ok(None), + } + } +} + +impl From for CallInput { + fn from(input: Bytes) -> Self { + Self { input: Some(input), data: None } + } +} + +impl From> for CallInput { + fn from(input: Option) -> Self { + Self { input, data: None } + } +} + +/// Error thrown when both `data` and `input` fields are set and not equal. +#[derive(Debug, Default, thiserror::Error)] +#[error("both \"data\" and \"input\" are set and not equal. Please use \"input\" to pass transaction call data")] +#[non_exhaustive] +pub struct CallInputError; + #[cfg(test)] mod tests { use super::*; @@ -53,4 +116,23 @@ mod tests { let s = r#"{"accessList":[],"data":"0x0902f1ac","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; let _req = serde_json::from_str::(s).unwrap(); } + + #[test] + fn serde_unique_call_input() { + let s = r#"{"accessList":[],"data":"0x0902f1ac", "input":"0x0902f1ac","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; + let req = serde_json::from_str::(s).unwrap(); + assert!(req.input.try_into_unique_input().unwrap().is_some()); + + let s = r#"{"accessList":[],"data":"0x0902f1ac","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; + let req = serde_json::from_str::(s).unwrap(); + assert!(req.input.try_into_unique_input().unwrap().is_some()); + + let s = r#"{"accessList":[],"input":"0x0902f1ac","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; + let req = serde_json::from_str::(s).unwrap(); + assert!(req.input.try_into_unique_input().unwrap().is_some()); + + let s = r#"{"accessList":[],"data":"0x0902f1ac", "input":"0x0902f1","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; + let req = serde_json::from_str::(s).unwrap(); + assert!(req.input.try_into_unique_input().is_err()); + } } diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index 04b2da62143a..446e4192f615 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -19,7 +19,7 @@ mod work; pub use account::*; pub use block::*; -pub use call::CallRequest; +pub use call::{CallInput, CallInputError, CallRequest}; pub use fee::{FeeHistory, TxGasAndReward}; pub use filter::*; pub use index::Index; diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index e8dd2ed7ccb1..514d436b3ab7 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -423,7 +423,7 @@ where gas_price: Some(U256::from(gas_price)), max_fee_per_gas: Some(U256::from(max_fee_per_gas)), value: request.value, - data: request.data.clone(), + input: request.data.clone().into(), nonce: request.nonce, chain_id: Some(chain_id), access_list: request.access_list.clone(), diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index f52bf7ca1144..3b3d179b48ae 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -7,7 +7,7 @@ use jsonrpsee::{ }; use reth_primitives::{abi::decode_revert_reason, Address, Bytes, U256}; use reth_revm::tracing::js::JsInspectorError; -use reth_rpc_types::{error::EthRpcErrorCode, BlockError}; +use reth_rpc_types::{error::EthRpcErrorCode, BlockError, CallInputError}; use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError}; use revm::primitives::{EVMError, ExecutionResult, Halt, OutOfGasError}; use std::time::Duration; @@ -90,6 +90,8 @@ pub enum EthApiError { /// Internal Error thrown by the javascript tracer #[error("{0}")] InternalJsTracerError(String), + #[error(transparent)] + CallInputError(#[from] CallInputError), } impl From for ErrorObject<'static> { @@ -124,6 +126,7 @@ impl From for ErrorObject<'static> { } err @ EthApiError::InternalTracingError => internal_rpc_err(err.to_string()), err @ EthApiError::InternalEthError => internal_rpc_err(err.to_string()), + err @ EthApiError::CallInputError(_) => invalid_params_rpc_err(err.to_string()), } } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index d40689abe18e..d83b3d53e100 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -282,7 +282,7 @@ pub(crate) fn create_txn_env(block_env: &BlockEnv, request: CallRequest) -> EthR max_priority_fee_per_gas, gas, value, - data, + input, nonce, access_list, chain_id, @@ -308,7 +308,7 @@ pub(crate) fn create_txn_env(block_env: &BlockEnv, request: CallRequest) -> EthR gas_priority_fee: max_priority_fee_per_gas, transact_to: to.map(TransactTo::Call).unwrap_or_else(TransactTo::create), value: value.unwrap_or_default(), - data: data.map(|data| data.0).unwrap_or_default(), + data: input.try_into_unique_input()?.map(|data| data.0).unwrap_or_default(), chain_id: chain_id.map(|c| c.as_u64()), access_list: access_list.map(AccessList::flattened).unwrap_or_default(), }; From 19aa11834f9d2948b8c0b14fdb4d8665d38f61bd Mon Sep 17 00:00:00 2001 From: Aditya Pandey Date: Tue, 25 Jul 2023 22:24:25 +0530 Subject: [PATCH 247/722] feat: add dial metric to dashboard (#3802) Co-authored-by: Aditya Pandey --- etc/grafana/dashboards/overview.json | 93 ++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 0ca8bd3d9d13..8500bd19aaf8 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1693,6 +1693,99 @@ "title": "Peer disconnect reasons", "type": "piechart" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of dials that resulted in a peer being added to the peerset", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 14, + "x": 8, + "y": 60 + }, + "id": 103, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_total_dial_successes{instance=~\"$instance\"}", + "legendFormat": "Total Dial Successes", + "range": true, + "refId": "A" + } + ], + "title": "Total Dial Success", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { From 9a07f577629b70f827fa1971eed02bbfc14aa0fa Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 25 Jul 2023 18:20:37 +0100 Subject: [PATCH 248/722] feat(prune): take `PruneMode::Full` into account when validating the config (#3810) --- crates/primitives/src/prune/target.rs | 44 ++++++++++++++------- crates/primitives/src/serde_helper/mod.rs | 2 +- crates/primitives/src/serde_helper/prune.rs | 40 ++++++++++++++----- crates/prune/src/pruner.rs | 4 +- 4 files changed, 63 insertions(+), 27 deletions(-) diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 8e16235ff8ae..b314ae9d0382 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -1,4 +1,4 @@ -use crate::{serde_helper::deserialize_opt_prune_mode_with_min_distance, BlockNumber, PruneMode}; +use crate::{serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, PruneMode}; use paste::paste; use serde::{Deserialize, Serialize}; @@ -15,7 +15,7 @@ pub struct PruneModes { /// Receipts pruning configuration. #[serde( skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_distance::<64, _>" + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" )] pub receipts: Option, /// Account History pruning configuration. @@ -27,7 +27,7 @@ pub struct PruneModes { } macro_rules! impl_prune_parts { - ($(($part:ident, $human_part:expr)),+) => { + ($(($part:ident, $human_part:expr, $min_blocks:expr)),+) => { $( paste! { #[doc = concat!( @@ -51,8 +51,12 @@ macro_rules! impl_prune_parts { $human_part, " pruning needs to be done, inclusive, according to the provided tip." )] - pub fn [](&self, tip: BlockNumber) -> Option<(BlockNumber, PruneMode)> { - self.$part.as_ref().map(|mode| (self.prune_to_block(mode, tip), *mode)) + pub fn [](&self, tip: BlockNumber) -> Option<(BlockNumber, PruneMode)> { + self.$part.as_ref().and_then(|mode| { + self.prune_target_block(mode, tip, $min_blocks).map(|block| { + (block, *mode) + }) + }) } } )+ @@ -90,20 +94,30 @@ impl PruneModes { } /// Returns block up to which pruning needs to be done, inclusive, according to the provided - /// prune mode and tip. - pub fn prune_to_block(&self, mode: &PruneMode, tip: BlockNumber) -> BlockNumber { + /// prune mode, tip block number and minimum number of blocks allowed to be pruned. + pub fn prune_target_block( + &self, + mode: &PruneMode, + tip: BlockNumber, + min_blocks: Option, + ) -> Option { match mode { - PruneMode::Full => tip, - PruneMode::Distance(distance) => tip.saturating_sub(*distance), - PruneMode::Before(n) => *n, + PruneMode::Full if min_blocks.unwrap_or_default() == 0 => Some(tip), + PruneMode::Distance(distance) if *distance >= min_blocks.unwrap_or_default() => { + Some(tip.saturating_sub(*distance)) + } + PruneMode::Before(n) if tip.saturating_sub(*n) >= min_blocks.unwrap_or_default() => { + Some(*n) + } + _ => None, } } impl_prune_parts!( - (sender_recovery, "Sender Recovery"), - (transaction_lookup, "Transaction Lookup"), - (receipts, "Receipts"), - (account_history, "Account History"), - (storage_history, "Storage History") + (sender_recovery, "Sender Recovery", None), + (transaction_lookup, "Transaction Lookup", None), + (receipts, "Receipts", Some(64)), + (account_history, "Account History", None), + (storage_history, "Storage History", None) ); } diff --git a/crates/primitives/src/serde_helper/mod.rs b/crates/primitives/src/serde_helper/mod.rs index 7cd85f892f98..23db0bde8374 100644 --- a/crates/primitives/src/serde_helper/mod.rs +++ b/crates/primitives/src/serde_helper/mod.rs @@ -11,7 +11,7 @@ pub use jsonu256::*; pub mod num; mod prune; -pub use prune::deserialize_opt_prune_mode_with_min_distance; +pub use prune::deserialize_opt_prune_mode_with_min_blocks; /// serde functions for handling primitive `u64` as [U64](crate::U64) pub mod u64_hex { diff --git a/crates/primitives/src/serde_helper/prune.rs b/crates/primitives/src/serde_helper/prune.rs index 7dffafdf8f7f..5c305ae59592 100644 --- a/crates/primitives/src/serde_helper/prune.rs +++ b/crates/primitives/src/serde_helper/prune.rs @@ -1,11 +1,17 @@ use crate::PruneMode; use serde::{Deserialize, Deserializer}; -/// Deserializes [`Option`] and validates that the value contained in -/// [PruneMode::Distance] (if any) is not less than the const generic parameter `MIN_DISTANCE`. -pub fn deserialize_opt_prune_mode_with_min_distance< +/// Deserializes [`Option`] and validates that the value is not less than the const +/// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be +/// left in database after the pruning. +/// +/// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`. +/// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed +/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we +/// have one block in the database. +pub fn deserialize_opt_prune_mode_with_min_blocks< 'de, - const MIN_DISTANCE: u64, + const MIN_BLOCKS: u64, D: Deserializer<'de>, >( deserializer: D, @@ -13,11 +19,20 @@ pub fn deserialize_opt_prune_mode_with_min_distance< let prune_mode = Option::::deserialize(deserializer)?; match prune_mode { - Some(PruneMode::Distance(distance)) if distance < MIN_DISTANCE => { + Some(PruneMode::Full) if MIN_BLOCKS > 0 => { + Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str("full"), + // This message should have "expected" wording + &format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database") + .as_str(), + )) + } + Some(PruneMode::Distance(distance)) if distance < MIN_BLOCKS => { Err(serde::de::Error::invalid_value( serde::de::Unexpected::Unsigned(distance), - // This message should have "expected" wording, so we say "not less than" - &format!("prune mode distance not less than {MIN_DISTANCE} blocks").as_str(), + // This message should have "expected" wording + &format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database") + .as_str(), )) } _ => Ok(prune_mode), @@ -31,11 +46,11 @@ mod test { use serde::Deserialize; #[test] - fn deserialize_opt_prune_mode_with_min_distance() { + fn deserialize_opt_prune_mode_with_min_blocks() { #[derive(Debug, Deserialize, PartialEq, Eq)] struct V( #[serde( - deserialize_with = "super::deserialize_opt_prune_mode_with_min_distance::<10, _>" + deserialize_with = "super::deserialize_opt_prune_mode_with_min_blocks::<10, _>" )] Option, ); @@ -43,7 +58,12 @@ mod test { assert!(serde_json::from_str::(r#"{"distance": 10}"#).is_ok()); assert_matches!( serde_json::from_str::(r#"{"distance": 9}"#), - Err(err) if err.to_string() == "invalid value: integer `9`, expected prune mode distance not less than 10 blocks" + Err(err) if err.to_string() == "invalid value: integer `9`, expected prune mode that leaves at least 10 blocks in the database" + ); + + assert_matches!( + serde_json::from_str::(r#""full""#), + Err(err) if err.to_string() == "invalid value: string \"full\", expected prune mode that leaves at least 10 blocks in the database" ); } } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 4cdde139d7ef..400d0bda2260 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -64,7 +64,9 @@ impl Pruner { pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { let provider = self.provider_factory.provider_rw()?; - if let Some((to_block, prune_mode)) = self.modes.prune_to_block_receipts(tip_block_number) { + if let Some((to_block, prune_mode)) = + self.modes.prune_target_block_receipts(tip_block_number) + { self.prune_receipts(&provider, to_block, prune_mode)?; } From 736de2028cc7150314b31ac1faf39a0c95cc8c17 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 25 Jul 2023 18:41:23 +0100 Subject: [PATCH 249/722] feat(pruner): transaction lookup (#3892) --- Cargo.lock | 1 + crates/prune/Cargo.toml | 1 + crates/prune/src/error.rs | 3 + crates/prune/src/pruner.rs | 248 +++++++++++++++--- crates/stages/src/test_utils/test_db.rs | 14 +- .../src/providers/database/provider.rs | 23 +- 6 files changed, 245 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2c14b5d4715..6d2141a46abf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5588,6 +5588,7 @@ version = "0.1.0-alpha.4" dependencies = [ "assert_matches", "itertools 0.10.5", + "rayon", "reth-db", "reth-interfaces", "reth-primitives", diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 432a0ecfdd9b..8db44ce6b31a 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -21,6 +21,7 @@ reth-interfaces = { workspace = true } tracing = { workspace = true } thiserror = { workspace = true } itertools = "0.10" +rayon = "1.6.0" [dev-dependencies] # reth diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index a38e3d6e5d3a..fdc0af4484a8 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -4,6 +4,9 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum PrunerError { + #[error("Inconsistent data: {0}")] + InconsistentData(&'static str), + #[error("An interface error occurred.")] Interface(#[from] reth_interfaces::Error), diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 400d0bda2260..de020dcd272a 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -1,10 +1,16 @@ //! Support for pruning. use crate::PrunerError; +use rayon::prelude::*; use reth_db::{database::Database, tables}; -use reth_primitives::{BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart}; -use reth_provider::{BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointWriter}; -use std::sync::Arc; +use reth_primitives::{ + BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, +}; +use reth_provider::{ + BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, + TransactionsProvider, +}; +use std::{ops::RangeInclusive, sync::Arc}; use tracing::{debug, instrument, trace}; /// Result of [Pruner::run] execution @@ -15,11 +21,12 @@ pub type PrunerWithResult = (Pruner, PrunerResult); pub struct BatchSizes { receipts: usize, + transaction_lookup: usize, } impl Default for BatchSizes { fn default() -> Self { - Self { receipts: 10000 } + Self { receipts: 10000, transaction_lookup: 10000 } } } @@ -70,6 +77,12 @@ impl Pruner { self.prune_receipts(&provider, to_block, prune_mode)?; } + if let Some((to_block, prune_mode)) = + self.modes.prune_target_block_transaction_lookup(tip_block_number) + { + self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + } + provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); @@ -97,6 +110,37 @@ impl Pruner { } } + /// Get next inclusive tx number range to prune according to the checkpoint and `to_block` block + /// number. + /// + /// To get the range start: + /// 1. If checkpoint exists, get next block body and return its first tx number. + /// 2. If checkpoint doesn't exist, return 0. + /// + /// To get the range end: get last tx number for the provided `to_block`. + fn get_next_tx_num_range_from_checkpoint( + &self, + provider: &DatabaseProviderRW<'_, DB>, + prune_part: PrunePart, + to_block: BlockNumber, + ) -> reth_interfaces::Result>> { + let from_tx_num = provider + .get_prune_checkpoint(prune_part)? + .map(|checkpoint| provider.block_body_indices(checkpoint.block_number + 1)) + .transpose()? + .flatten() + .map(|body| body.first_tx_num) + .unwrap_or_default(); + + let to_tx_num = match provider.block_body_indices(to_block)? { + Some(body) => body, + None => return Ok(None), + } + .last_tx_num(); + + Ok(Some(from_tx_num..=to_tx_num)) + } + /// Prune receipts up to the provided block, inclusive. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_receipts( @@ -105,16 +149,20 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let to_block_body = match provider.block_body_indices(to_block)? { - Some(body) => body, + let range = match self.get_next_tx_num_range_from_checkpoint( + provider, + PrunePart::Receipts, + to_block, + )? { + Some(range) => range, None => { trace!(target: "pruner", "No receipts to prune"); return Ok(()) } }; - provider.prune_table_in_batches::( - ..=to_block_body.last_tx_num(), + provider.prune_table_in_batches::( + range, self.batch_sizes.receipts, |receipts| { trace!( @@ -132,6 +180,71 @@ impl Pruner { Ok(()) } + + /// Prune transaction lookup entries up to the provided block, inclusive. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_transaction_lookup( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + prune_mode: PruneMode, + ) -> PrunerResult { + let range = match self.get_next_tx_num_range_from_checkpoint( + provider, + PrunePart::TransactionLookup, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No receipts to prune"); + return Ok(()) + } + }; + let last_tx_num = *range.end(); + + for i in range.step_by(self.batch_sizes.transaction_lookup) { + // The `min` ensures that the transaction range doesn't exceed the last transaction + // number. `last_tx_num + 1` is used to include the last transaction in the range. + let tx_range = i..(i + self.batch_sizes.transaction_lookup as u64).min(last_tx_num + 1); + + // Retrieve transactions in the range and calculate their hashes in parallel + let mut hashes = provider + .transactions_by_tx_range(tx_range.clone())? + .into_par_iter() + .map(|transaction| transaction.hash()) + .collect::>(); + + // Number of transactions retrieved from the database should match the tx range count + let tx_count = tx_range.clone().count(); + if hashes.len() != tx_count { + return Err(PrunerError::InconsistentData( + "Unexpected number of transaction hashes retrieved by transaction number range", + )) + } + + // Pre-sort hashes to prune them in order + hashes.sort(); + + provider.prune_table_in_batches::( + hashes, + self.batch_sizes.transaction_lookup, + |entries| { + trace!( + target: "pruner", + %entries, + "Pruned transaction lookup" + ); + }, + )?; + } + + provider.save_prune_checkpoint( + PrunePart::TransactionLookup, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } } #[cfg(test)] @@ -143,7 +256,9 @@ mod tests { generators, generators::{random_block_range, random_receipt}, }; - use reth_primitives::{PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET}; + use reth_primitives::{ + BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, + }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::TestTransaction; @@ -192,34 +307,103 @@ mod tests { tx.table::().unwrap().len() ); - let prune_to_block = 10; - let prune_mode = PruneMode::Before(prune_to_block); - let pruner = Pruner::new( - tx.inner_raw(), - MAINNET.clone(), - 5, - 0, - PruneModes { receipts: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - receipts: 10, - }, - ); + let test_prune = |to_block: BlockNumber| { + let prune_mode = PruneMode::Before(to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + 0, + PruneModes { receipts: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + receipts: 10, + ..Default::default() + }, + ); - let provider = tx.inner_rw(); - assert_matches!(pruner.prune_receipts(&provider, prune_to_block, prune_mode), Ok(())); - provider.commit().expect("commit"); + let provider = tx.inner_rw(); + assert_matches!(pruner.prune_receipts(&provider, to_block, prune_mode), Ok(())); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::Receipts).unwrap(), + Some(PruneCheckpoint { block_number: to_block, prune_mode }) + ); + }; + + // Pruning first time ever, no previous checkpoint is present + test_prune(10); + // Prune second time, previous checkpoint is present, should continue pruning from where + // ended last time + test_prune(20); + } + + #[test] + fn prune_transaction_lookup() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range(&mut rng, 0..=100, H256::zero(), 0..10); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let mut tx_hash_numbers = Vec::new(); + for block in &blocks { + for transaction in &block.body { + tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); + } + } + tx.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); assert_eq!( - tx.table::().unwrap().len(), - blocks[prune_to_block as usize + 1..] - .iter() - .map(|block| block.body.len()) - .sum::() + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() ); assert_eq!( - tx.inner().get_prune_checkpoint(PrunePart::Receipts).unwrap(), - Some(PruneCheckpoint { block_number: prune_to_block, prune_mode }) + tx.table::().unwrap().len(), + tx.table::().unwrap().len() ); + + let test_prune = |to_block: BlockNumber| { + let prune_mode = PruneMode::Before(to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + 0, + PruneModes { transaction_lookup: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + transaction_lookup: 10, + ..Default::default() + }, + ); + + let provider = tx.inner_rw(); + assert_matches!( + pruner.prune_transaction_lookup(&provider, to_block, prune_mode), + Ok(()) + ); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::TransactionLookup).unwrap(), + Some(PruneCheckpoint { block_number: to_block, prune_mode }) + ); + }; + + // Pruning first time ever, no previous checkpoint is present + test_prune(10); + // Prune second time, previous checkpoint is present, should continue pruning from where + // ended last time + test_prune(20); } } diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 4efb8debffb5..425cf1d7120c 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -11,7 +11,7 @@ use reth_db::{ }; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StorageEntry, - TxNumber, H256, MAINNET, U256, + TxHash, TxNumber, H256, MAINNET, U256, }; use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, ProviderFactory}; use std::{ @@ -268,6 +268,18 @@ impl TestTransaction { }) } + pub fn insert_tx_hash_numbers(&self, tx_hash_numbers: I) -> Result<(), DbError> + where + I: IntoIterator, + { + self.commit(|tx| { + tx_hash_numbers.into_iter().try_for_each(|(tx_hash, tx_num)| { + // Insert into tx hash numbers table. + tx.put::(tx_hash, tx_num) + }) + }) + } + /// Insert collection of ([TxNumber], [Receipt]) into the corresponding table. pub fn insert_receipts(&self, receipts: I) -> Result<(), DbError> where diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 339807580edc..1f871cfa29aa 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -619,40 +619,39 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { Ok(()) } - /// Prune the table for the specified key range. + /// Prune the table for the specified pre-sorted key iterator. /// Returns number of rows pruned. pub fn prune_table( &self, - range: impl RangeBounds, + keys: impl IntoIterator, ) -> std::result::Result where T: Table, K: Key, { - self.prune_table_in_batches::(range, usize::MAX, |_| {}) + self.prune_table_in_batches::(keys, usize::MAX, |_| {}) } - /// Prune the table for the specified key range calling `chunk_callback` after every - /// `batch_size` pruned rows. + /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after + /// every `batch_size` pruned rows. /// /// Returns number of rows pruned. - pub fn prune_table_in_batches( + pub fn prune_table_in_batches( &self, - range: impl RangeBounds, + keys: impl IntoIterator, batch_size: usize, - batch_callback: F, + batch_callback: impl Fn(usize), ) -> std::result::Result where T: Table, K: Key, - F: Fn(usize), { let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(range)?; let mut deleted = 0; - while let Some(Ok(_)) = walker.next() { - walker.delete_current()?; + for key in keys { + cursor.seek_exact(key)?; + cursor.delete_current()?; deleted += 1; if deleted % batch_size == 0 { From d5ea16815c74badb5884ab4acdafea1ec34473c7 Mon Sep 17 00:00:00 2001 From: pistomat Date: Wed, 26 Jul 2023 13:53:21 +0200 Subject: [PATCH 250/722] Reexport geth pre_state AccountState, DiffMode and PreStateMode (#3922) --- crates/rpc/rpc-types/src/eth/trace/geth/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs b/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs index eb9a33acd28c..ebd1a4b2caeb 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/mod.rs @@ -11,7 +11,7 @@ pub use self::{ call::{CallConfig, CallFrame, CallLogFrame}, four_byte::FourByteFrame, noop::NoopFrame, - pre_state::{PreStateConfig, PreStateFrame}, + pre_state::{AccountState, DiffMode, PreStateConfig, PreStateFrame, PreStateMode}, }; mod call; From caa26833a595de99c1285007c13f5f571968194e Mon Sep 17 00:00:00 2001 From: prames <134806363+0xprames@users.noreply.github.com> Date: Wed, 26 Jul 2023 09:17:36 -0400 Subject: [PATCH 251/722] rpc server metrics impl (#3913) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/src/lib.rs | 17 ++++-- crates/rpc/rpc-builder/src/metrics.rs | 84 +++++++++++++++++++++++++++ 4 files changed, 99 insertions(+), 4 deletions(-) create mode 100644 crates/rpc/rpc-builder/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 6d2141a46abf..1d5a1f6b2290 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5746,6 +5746,7 @@ dependencies = [ "reth-beacon-consensus", "reth-interfaces", "reth-ipc", + "reth-metrics", "reth-network-api", "reth-payload-builder", "reth-primitives", diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index c0078969d381..07753d13317a 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -21,6 +21,7 @@ reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-rpc-types = { workspace = true } reth-tasks = { workspace = true } reth-transaction-pool = { workspace = true } +reth-metrics = { workspace = true, features = ["common"] } # rpc/net jsonrpsee = { version = "0.18", features = ["server"] } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index afa7a37bf2bc..4e86181b940a 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -103,7 +103,7 @@ //! } //! ``` -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError}; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcServerMetrics}; use constants::*; use error::{RpcError, ServerKind}; use jsonrpsee::{ @@ -157,6 +157,9 @@ pub mod constants; /// Additional support for tracing related rpc calls pub mod tracing_pool; +// Rpc server metrics +mod metrics; + // re-export for convenience pub use crate::eth::{EthConfig, EthHandlers}; pub use jsonrpsee::server::ServerBuilder; @@ -1232,7 +1235,7 @@ impl RpcServerConfig { let ws_socket_addr = self .ws_addr .unwrap_or(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, DEFAULT_WS_RPC_PORT))); - + let metrics = RpcServerMetrics::default(); // If both are configured on the same port, we combine them into one server. if self.http_addr == self.ws_addr && self.http_server_config.is_some() && @@ -1264,6 +1267,7 @@ impl RpcServerConfig { http_socket_addr, cors, ServerKind::WsHttp(http_socket_addr), + metrics.clone(), ) .await?; return Ok(WsHttpServer { @@ -1285,6 +1289,7 @@ impl RpcServerConfig { ws_socket_addr, self.ws_cors_domains.take(), ServerKind::WS(ws_socket_addr), + metrics.clone(), ) .await?; ws_local_addr = Some(addr); @@ -1298,6 +1303,7 @@ impl RpcServerConfig { http_socket_addr, self.http_cors_domains.take(), ServerKind::Http(http_socket_addr), + metrics.clone(), ) .await?; http_local_addr = Some(addr); @@ -1529,9 +1535,9 @@ impl Default for WsHttpServers { /// Http Servers Enum enum WsHttpServerKind { /// Http server - Plain(Server), + Plain(Server), /// Http server with cors - WithCors(Server>), + WithCors(Server, RpcServerMetrics>), } // === impl WsHttpServerKind === @@ -1551,12 +1557,14 @@ impl WsHttpServerKind { socket_addr: SocketAddr, cors_domains: Option, server_kind: ServerKind, + metrics: RpcServerMetrics, ) -> Result<(Self, SocketAddr), RpcError> { if let Some(cors) = cors_domains.as_deref().map(cors::create_cors_layer) { let cors = cors.map_err(|err| RpcError::Custom(err.to_string()))?; let middleware = tower::ServiceBuilder::new().layer(cors); let server = builder .set_middleware(middleware) + .set_logger(metrics) .build(socket_addr) .await .map_err(|err| RpcError::from_jsonrpsee_error(err, server_kind))?; @@ -1565,6 +1573,7 @@ impl WsHttpServerKind { Ok((server, local_addr)) } else { let server = builder + .set_logger(metrics) .build(socket_addr) .await .map_err(|err| RpcError::from_jsonrpsee_error(err, server_kind))?; diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs new file mode 100644 index 000000000000..e40e2c232586 --- /dev/null +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -0,0 +1,84 @@ +use jsonrpsee::server::logger::{HttpRequest, Logger, MethodKind, Params, TransportProtocol}; +use reth_metrics::{ + metrics::{self, Counter, Histogram}, + Metrics, +}; +use std::{net::SocketAddr, time::Instant}; + +/// Metrics for the rpc server +#[derive(Metrics, Clone)] +#[metrics(scope = "rpc_server")] +pub(crate) struct RpcServerMetrics { + /// The number of calls started + calls_started: Counter, + /// The number of successful calls + successful_calls: Counter, + /// The number of failed calls + failed_calls: Counter, + /// The number of requests started + requests_started: Counter, + /// The number of requests finished + requests_finished: Counter, + /// The number of ws sessions opened + ws_session_opened: Counter, + /// The number of ws sessions closed + ws_session_closed: Counter, + /// Latency for a single request/response pair + request_latency: Histogram, + /// Latency for a single call + call_latency: Histogram, +} + +impl Logger for RpcServerMetrics { + type Instant = Instant; + fn on_connect( + &self, + _remote_addr: SocketAddr, + _request: &HttpRequest, + transport: TransportProtocol, + ) { + match transport { + TransportProtocol::Http => {} + TransportProtocol::WebSocket => self.ws_session_opened.increment(1), + } + } + fn on_request(&self, _transport: TransportProtocol) -> Self::Instant { + self.requests_started.increment(1); + Instant::now() + } + fn on_call( + &self, + _method_name: &str, + _params: Params<'_>, + _kind: MethodKind, + _transport: TransportProtocol, + ) { + self.calls_started.increment(1); + } + fn on_result( + &self, + _method_name: &str, + success: bool, + started_at: Self::Instant, + _transport: TransportProtocol, + ) { + // capture call duration + self.call_latency.record(started_at.elapsed().as_millis() as f64); + if !success { + self.failed_calls.increment(1); + } else { + self.successful_calls.increment(1); + } + } + fn on_response(&self, _result: &str, started_at: Self::Instant, _transport: TransportProtocol) { + // capture request latency for this request/response pair + self.request_latency.record(started_at.elapsed().as_millis() as f64); + self.requests_finished.increment(1); + } + fn on_disconnect(&self, _remote_addr: SocketAddr, transport: TransportProtocol) { + match transport { + TransportProtocol::Http => {} + TransportProtocol::WebSocket => self.ws_session_closed.increment(1), + } + } +} From 8cdb097829604515075b315ed1e163e963d3f043 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 26 Jul 2023 17:21:09 +0200 Subject: [PATCH 252/722] chore: simplify workspace = true usage (#3930) --- bin/reth/Cargo.toml | 24 ++++++++++----------- crates/blockchain-tree/Cargo.toml | 8 +++---- crates/config/Cargo.toml | 4 ++-- crates/consensus/auto-seal/Cargo.toml | 14 ++++++------ crates/consensus/beacon/Cargo.toml | 22 +++++++++---------- crates/consensus/common/Cargo.toml | 6 +++--- crates/interfaces/Cargo.toml | 20 ++++++++--------- crates/net/common/Cargo.toml | 4 ++-- crates/net/discv4/Cargo.toml | 12 +++++------ crates/net/dns/Cargo.toml | 12 +++++------ crates/net/downloaders/Cargo.toml | 24 ++++++++++----------- crates/net/ecies/Cargo.toml | 14 ++++++------ crates/net/eth-wire/Cargo.toml | 18 ++++++++-------- crates/net/nat/Cargo.toml | 4 ++-- crates/net/network-api/Cargo.toml | 8 +++---- crates/net/network/Cargo.toml | 30 +++++++++++++------------- crates/payload/basic/Cargo.toml | 20 ++++++++--------- crates/payload/builder/Cargo.toml | 20 ++++++++--------- crates/primitives/Cargo.toml | 16 +++++++------- crates/prune/Cargo.toml | 12 +++++------ crates/revm/Cargo.toml | 12 +++++------ crates/revm/revm-inspectors/Cargo.toml | 6 +++--- crates/revm/revm-primitives/Cargo.toml | 4 ++-- crates/rpc/ipc/Cargo.toml | 16 +++++++------- crates/rpc/rpc-api/Cargo.toml | 6 +++--- crates/rpc/rpc-builder/Cargo.toml | 26 +++++++++++----------- crates/rpc/rpc-engine-api/Cargo.toml | 18 ++++++++-------- crates/rpc/rpc-testing-util/Cargo.toml | 8 +++---- crates/rpc/rpc-types/Cargo.toml | 10 ++++----- crates/rpc/rpc/Cargo.toml | 28 ++++++++++++------------ crates/stages/Cargo.toml | 30 +++++++++++++------------- crates/storage/db/Cargo.toml | 22 +++++++++---------- crates/storage/libmdbx-rs/Cargo.toml | 4 ++-- crates/storage/provider/Cargo.toml | 10 ++++----- crates/tasks/Cargo.toml | 8 +++---- crates/tracing/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 20 ++++++++--------- crates/trie/Cargo.toml | 14 ++++++------ testing/ef-tests/Cargo.toml | 12 +++++------ 39 files changed, 274 insertions(+), 274 deletions(-) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 2c0965dfaa32..5655605d9437 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -18,36 +18,36 @@ reth-revm = { path = "../../crates/revm" } reth-revm-inspectors = { path = "../../crates/revm/revm-inspectors" } reth-stages = { path = "../../crates/stages" } reth-interfaces = { workspace = true, features = ["test-utils", "clap"] } -reth-transaction-pool = { workspace = true } +reth-transaction-pool.workspace = true reth-beacon-consensus = { path = "../../crates/consensus/beacon" } reth-auto-seal-consensus = { path = "../../crates/consensus/auto-seal" } reth-blockchain-tree = { path = "../../crates/blockchain-tree" } reth-rpc-engine-api = { path = "../../crates/rpc/rpc-engine-api" } reth-rpc-builder = { path = "../../crates/rpc/rpc-builder" } reth-rpc = { path = "../../crates/rpc/rpc" } -reth-rlp = { workspace = true } +reth-rlp.workspace = true reth-network = { path = "../../crates/net/network", features = ["serde"] } -reth-network-api = { workspace = true } +reth-network-api.workspace = true reth-downloaders = { path = "../../crates/net/downloaders", features = ["test-utils"] } reth-tracing = { path = "../../crates/tracing" } -reth-tasks = { workspace = true } +reth-tasks.workspace = true reth-net-nat = { path = "../../crates/net/nat" } -reth-payload-builder = { workspace = true } +reth-payload-builder.workspace = true reth-basic-payload-builder = { path = "../../crates/payload/basic" } reth-discv4 = { path = "../../crates/net/discv4" } -reth-metrics = { workspace = true } +reth-metrics.workspace = true reth-prune = { path = "../../crates/prune" } # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } # tracing -tracing = { workspace = true } +tracing.workspace = true # io fdlimit = "0.2.1" -serde = { workspace = true } -serde_json = { workspace = true } +serde.workspace = true +serde_json.workspace = true shellexpand = "3.0.0" dirs-next = "2.0.0" confy = "0.5" @@ -69,8 +69,8 @@ human_bytes = "0.4.1" # async tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } -futures = { workspace = true } -pin-project = { workspace = true } +futures.workspace = true +pin-project.workspace = true # http/rpc hyper = "0.14.25" @@ -81,7 +81,7 @@ clap = { version = "4", features = ["derive"] } tempfile = { version = "3.3.0" } backon = "0.4" hex = "0.4" -thiserror = { workspace = true } +thiserror.workspace = true pretty_assertions = "1.3.0" humantime = "2.1.0" const-str = "0.5.6" diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index f7c0316480ed..ba025605557a 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -15,17 +15,17 @@ normal = [ [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true reth-db = { path = "../storage/db" } reth-metrics = { workspace = true, features = ["common"] } -reth-provider = { workspace = true } +reth-provider.workspace = true reth-stages = { path = "../stages" } # common parking_lot = { version = "0.12" } lru = "0.10" -tracing = { workspace = true } +tracing.workspace = true # mics aquamarine = "0.3.0" diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index c278440cff75..d50b42d0fb9d 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -17,8 +17,8 @@ reth-stages = { path = "../../crates/stages" } reth-primitives = { path = "../primitives" } # io -serde = { workspace = true } -serde_json = { workspace = true } +serde.workspace = true +serde_json.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 0805bba21715..49a37d9e664c 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -11,18 +11,18 @@ description = "A consensus impl for local testing purposes" [dependencies] # reth reth-beacon-consensus = { path = "../beacon" } -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } -reth-provider = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true reth-stages = { path = "../../stages" } reth-revm = { path = "../../revm" } -reth-transaction-pool = { workspace = true } +reth-transaction-pool.workspace = true # async -futures-util = { workspace = true } +futures-util.workspace = true tokio = { workspace = true, features = ["sync", "time"] } -tokio-stream = { workspace = true } -tracing = { workspace = true } +tokio-stream.workspace = true +tracing.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index bb676f71d101..6b23c39642b1 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -10,25 +10,25 @@ repository.workspace = true [dependencies] # reth reth-consensus-common = { path = "../common" } -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true reth-stages = { path = "../../stages" } reth-db = { path = "../../storage/db" } -reth-provider = { workspace = true } -reth-rpc-types = { workspace = true } -reth-tasks = { workspace = true } -reth-payload-builder = { workspace = true } -reth-metrics = { workspace = true } +reth-provider.workspace = true +reth-rpc-types.workspace = true +reth-tasks.workspace = true +reth-payload-builder.workspace = true +reth-metrics.workspace = true reth-prune = { path = "../../prune" } # async tokio = { workspace = true, features = ["sync"] } -tokio-stream = { workspace = true } -futures = { workspace = true } +tokio-stream.workspace = true +futures.workspace = true # misc -tracing = { workspace = true } -thiserror = { workspace = true } +tracing.workspace = true +thiserror.workspace = true schnellru = "0.2" [dev-dependencies] diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 55253ae5ece6..8611aa2317fd 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -9,9 +9,9 @@ repository.workspace = true [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } -reth-provider = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index be012c7aaf5d..c0525a778412 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -9,27 +9,27 @@ repository.workspace = true [dependencies] reth-codecs = { path = "../storage/codecs" } -reth-primitives = { workspace = true } -reth-rpc-types = { workspace = true } -reth-network-api = { workspace = true } +reth-primitives.workspace = true +reth-rpc-types.workspace = true +reth-network-api.workspace = true # TODO(onbjerg): We only need this for [BlockBody] reth-eth-wire = { path = "../net/eth-wire" } # eth -revm-primitives = { workspace = true } +revm-primitives.workspace = true parity-scale-codec = { version = "3.2.1", features = ["bytes"] } # async -async-trait = { workspace = true } -futures = { workspace = true } +async-trait.workspace = true +futures.workspace = true tokio = { workspace = true, features = ["sync"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true # misc auto_impl = "1.0" -thiserror = { workspace = true } -tracing = { workspace = true } -rand = { workspace = true } +thiserror.workspace = true +tracing.workspace = true +rand.workspace = true arbitrary = { version = "1.1.7", features = ["derive"], optional = true } secp256k1 = { workspace = true, default-features = false, features = [ "alloc", diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 998c9212f808..17c953dff8c8 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -12,8 +12,8 @@ Types shared across network code [dependencies] # reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true # async -pin-project = { workspace = true } +pin-project.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 1775e98ebef3..2c5bd92cbe98 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -12,8 +12,8 @@ Ethereum network discovery [dependencies] # reth -reth-primitives = { workspace = true } -reth-rlp = { workspace = true } +reth-primitives.workspace = true +reth-rlp.workspace = true reth-rlp-derive = { path = "../../rlp/rlp-derive" } reth-net-common = { path = "../common" } reth-net-nat = { path = "../nat" } @@ -25,18 +25,18 @@ enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1 # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true # misc -tracing = { workspace = true } -thiserror = { workspace = true } +tracing.workspace = true +thiserror.workspace = true hex = "0.4" rand = { workspace = true, optional = true } generic-array = "0.14" serde = { workspace = true, optional = true } [dev-dependencies] -rand = { workspace = true } +rand.workspace = true tokio = { workspace = true, features = ["macros"] } reth-tracing = { path = "../../tracing" } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 4fe6d4c4534b..14054205de95 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -10,9 +10,9 @@ description = "Support for EIP-1459 Node Discovery via DNS" [dependencies] # reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-net-common = { path = "../common" } -reth-rlp = { workspace = true } +reth-rlp.workspace = true # ethereum secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } @@ -20,18 +20,18 @@ enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1 # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true # trust-dns trust-dns-resolver = "0.22" # misc data-encoding = "2" -async-trait = { workspace = true } +async-trait.workspace = true linked_hash_set = "0.1" schnellru = "0.2" -thiserror = { workspace = true } -tracing = { workspace = true } +thiserror.workspace = true +tracing.workspace = true parking_lot = "0.12" serde = { workspace = true, optional = true } serde_with = { version = "2.1.0", optional = true } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 36179fe07747..b5ca3d6b5b02 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -10,24 +10,24 @@ description = "Implementations of various block downloaders" [dependencies] # reth -reth-interfaces = { workspace = true } -reth-primitives = { workspace = true } +reth-interfaces.workspace = true +reth-primitives.workspace = true reth-db = { path = "../../storage/db" } -reth-tasks = { workspace = true } -reth-metrics = { workspace = true } +reth-tasks.workspace = true +reth-metrics.workspace = true # async -futures = { workspace = true } -futures-util = { workspace = true } -pin-project = { workspace = true } +futures.workspace = true +futures-util.workspace = true +pin-project.workspace = true tokio = { workspace = true, features = ["sync"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } # misc -tracing = { workspace = true } -rayon = { workspace = true } -thiserror = { workspace = true } +tracing.workspace = true +rayon.workspace = true +thiserror.workspace = true # optional deps for the test-utils feature reth-rlp = { workspace = true, optional = true } @@ -41,7 +41,7 @@ reth-tracing = { path = "../../tracing" } assert_matches = "1.5.0" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -reth-rlp = { workspace = true } +reth-rlp.workspace = true itertools = "0.10" tempfile = "3.3" diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index 67e146d68544..9a83e64bc7e3 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -9,18 +9,18 @@ repository.workspace = true [dependencies] reth-rlp = { workspace = true, features = ["derive", "ethereum-types", "std"] } -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-net-common = { path = "../common" } -futures = { workspace = true } -thiserror = { workspace = true } +futures.workspace = true +thiserror.workspace = true tokio = { workspace = true, features = ["full"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } -pin-project = { workspace = true } +pin-project.workspace = true educe = "0.4.19" -tracing = { workspace = true } +tracing.workspace = true # HeaderBytes generic-array = "0.14.6" @@ -28,7 +28,7 @@ typenum = "1.15.0" byteorder = "1.4.3" # crypto -rand = { workspace = true } +rand.workspace = true ctr = "0.9.2" digest = "0.10.5" secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 987f7505d9c3..19a78317c8aa 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -10,12 +10,12 @@ repository.workspace = true [dependencies] bytes.workspace = true -thiserror = { workspace = true } +thiserror.workspace = true serde = { workspace = true, optional = true } # reth reth-codecs = { path = "../../storage/codecs" } -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-ecies = { path = "../ecies" } reth-rlp = { workspace = true, features = [ "alloc", @@ -24,20 +24,20 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } -reth-metrics = { workspace = true } +reth-metrics.workspace = true # used for Chain and builders ethers-core = { workspace = true, default-features = false } tokio = { workspace = true, features = ["full"] } tokio-util = { workspace = true, features = ["io", "codec"] } -futures = { workspace = true } -tokio-stream = { workspace = true } -pin-project = { workspace = true } -tracing = { workspace = true } +futures.workspace = true +tokio-stream.workspace = true +pin-project.workspace = true +tracing.workspace = true snap = "1.0.5" smol_str = "0.2" -async-trait = { workspace = true } +async-trait.workspace = true # arbitrary utils arbitrary = { version = "1.1.7", features = ["derive"], optional = true } @@ -53,7 +53,7 @@ test-fuzz = "4" tokio-util = { workspace = true, features = ["io", "codec"] } hex-literal = "0.3" hex = "0.4" -rand = { workspace = true } +rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } arbitrary = { version = "1.1.7", features = ["derive"] } diff --git a/crates/net/nat/Cargo.toml b/crates/net/nat/Cargo.toml index f0fe8f3e2556..ee127ea40277 100644 --- a/crates/net/nat/Cargo.toml +++ b/crates/net/nat/Cargo.toml @@ -18,10 +18,10 @@ public-ip = "0.2" igd = { git = "https://github.com/stevefan1999-personal/rust-igd", features = ["aio", "tokio1"] } # misc -tracing = { workspace = true } +tracing.workspace = true pin-project-lite = "0.2.9" tokio = { workspace = true, features = ["time"] } -thiserror = { workspace = true } +thiserror.workspace = true serde_with = { version = "2.1.0", optional = true } [dev-dependencies] diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 0769c5daec54..acd433d78521 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -10,16 +10,16 @@ description = "Network interfaces" [dependencies] # reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-eth-wire = { path = "../eth-wire" } -reth-rpc-types = { workspace = true } +reth-rpc-types.workspace = true # io serde = { workspace = true, features = ["derive"], optional = true } # misc -async-trait = { workspace = true } -thiserror = { workspace = true } +async-trait.workspace = true +thiserror.workspace = true tokio = { workspace = true, features = ["sync"] } [features] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 3e5195c1cc29..a4008f5caabd 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -18,27 +18,27 @@ normal = [ [dependencies] # reth -reth-interfaces = { workspace = true } -reth-primitives = { workspace = true } +reth-interfaces.workspace = true +reth-primitives.workspace = true reth-net-common = { path = "../common" } -reth-network-api = { workspace = true } +reth-network-api.workspace = true reth-discv4 = { path = "../discv4" } reth-dns-discovery = { path = "../dns" } reth-eth-wire = { path = "../eth-wire" } reth-ecies = { path = "../ecies" } -reth-rlp = { workspace = true } +reth-rlp.workspace = true reth-rlp-derive = { path = "../../rlp/rlp-derive" } -reth-tasks = { workspace = true } -reth-transaction-pool = { workspace = true } -reth-provider = { workspace = true } +reth-tasks.workspace = true +reth-transaction-pool.workspace = true +reth-provider.workspace = true reth-metrics = { workspace = true, features = ["common"] } -reth-rpc-types = { workspace = true } +reth-rpc-types.workspace = true # async/futures -futures = { workspace = true } -pin-project = { workspace = true } +futures.workspace = true +pin-project.workspace = true tokio = { workspace = true, features = ["io-util", "net", "macros", "rt-multi-thread", "time"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } # io @@ -49,14 +49,14 @@ serde_json = { workspace = true, optional = true } # misc auto_impl = "1" aquamarine = "0.3.0" -tracing = { workspace = true } +tracing.workspace = true fnv = "1.0" -thiserror = { workspace = true } +thiserror.workspace = true parking_lot = "0.12" -async-trait = { workspace = true } +async-trait.workspace = true linked_hash_set = "0.1" linked-hash-map = "0.5.6" -rand = { workspace = true } +rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } enr = { version = "0.8.1", features = ["rust-secp256k1"], optional = true } diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index a2a9f3288aff..874c9842d830 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -10,22 +10,22 @@ description = "A basic payload builder for reth that uses the txpool API to buil [dependencies] ## reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-revm = { path = "../../revm" } -reth-transaction-pool = { workspace = true } -reth-rlp = { workspace = true } -reth-provider = { workspace = true } -reth-payload-builder = { workspace = true } -reth-tasks = { workspace = true } -reth-metrics = { workspace = true } +reth-transaction-pool.workspace = true +reth-rlp.workspace = true +reth-provider.workspace = true +reth-payload-builder.workspace = true +reth-tasks.workspace = true +reth-metrics.workspace = true ## ethereum -revm = { workspace = true } +revm.workspace = true ## async tokio = { workspace = true, features = ["sync", "time"] } futures-core = "0.3" -futures-util = { workspace = true } +futures-util.workspace = true ## misc -tracing = { workspace = true } +tracing.workspace = true diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index b83236fd9b35..664fae7039a2 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -10,25 +10,25 @@ description = "reth payload builder" [dependencies] ## reth -reth-primitives = { workspace = true } -reth-rpc-types = { workspace = true } -reth-rlp = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-rpc-types.workspace = true +reth-rlp.workspace = true +reth-interfaces.workspace = true reth-revm-primitives = { path = "../../revm/revm-primitives" } -reth-metrics = { workspace = true } +reth-metrics.workspace = true ## ethereum -revm-primitives = { workspace = true } +revm-primitives.workspace = true ## async tokio = { workspace = true, features = ["sync"] } -tokio-stream = { workspace = true } -futures-util = { workspace = true } +tokio-stream.workspace = true +futures-util.workspace = true ## misc -thiserror = { workspace = true } +thiserror.workspace = true sha2 = { version = "0.10", default-features = false } -tracing = { workspace = true } +tracing.workspace = true [features] test-utils = [] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 10e47b7a3a3f..cd4913698e21 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -35,18 +35,18 @@ secp256k1 = { workspace = true, default-features = false, features = [ crc = "3" # tracing -tracing = { workspace = true } +tracing.workspace = true # tokio tokio = { workspace = true, default-features = false, features = ["sync"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true # misc bytes.workspace = true -serde = { workspace = true } -serde_json = { workspace = true } +serde.workspace = true +serde_json.workspace = true serde_with = "2.1.0" -thiserror = { workspace = true } +thiserror.workspace = true sucds = "0.5.0" hex = "0.4" hex-literal = "0.3" @@ -71,10 +71,10 @@ proptest-derive = { version = "0.3", optional = true } strum = { workspace = true, features = ["derive"] } [dev-dependencies] -serde_json = { workspace = true } +serde_json.workspace = true hex-literal = "0.3" test-fuzz = "4" -rand = { workspace = true } +rand.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } arbitrary = { version = "1.1.7", features = ["derive"] } proptest = { version = "1.0" } @@ -84,7 +84,7 @@ toml = "0.7.4" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 -secp256k1 = { workspace = true } +secp256k1.workspace = true criterion = "0.5" pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 8db44ce6b31a..13d5ae4f9169 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -12,14 +12,14 @@ Pruning implementation [dependencies] # reth -reth-primitives = { workspace = true } -reth-db = { workspace = true } -reth-provider = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-db.workspace = true +reth-provider.workspace = true +reth-interfaces.workspace = true # misc -tracing = { workspace = true } -thiserror = { workspace = true } +tracing.workspace = true +thiserror.workspace = true itertools = "0.10" rayon = "1.6.0" diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index e80ab708c0f3..037a717d41af 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -10,19 +10,19 @@ description = "reth specific revm utilities" [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } -reth-provider = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true reth-revm-primitives = { path = "./revm-primitives" } reth-revm-inspectors = { path = "./revm-inspectors" } reth-consensus-common = { path = "../consensus/common" } # revm -revm = { workspace = true } +revm.workspace = true # common -tracing = { workspace = true } +tracing.workspace = true [dev-dependencies] -reth-rlp = { workspace = true } +reth-rlp.workspace = true once_cell = "1.17.0" diff --git a/crates/revm/revm-inspectors/Cargo.toml b/crates/revm/revm-inspectors/Cargo.toml index 874f41dfce15..0c411cccdb07 100644 --- a/crates/revm/revm-inspectors/Cargo.toml +++ b/crates/revm/revm-inspectors/Cargo.toml @@ -10,10 +10,10 @@ description = "revm inspector implementations used by reth" [dependencies] # reth -reth-primitives = { workspace = true } -reth-rpc-types = { workspace = true } +reth-primitives.workspace = true +reth-rpc-types.workspace = true -revm = { workspace = true } +revm.workspace = true # remove from reth and reexport from revm hashbrown = "0.13" diff --git a/crates/revm/revm-primitives/Cargo.toml b/crates/revm/revm-primitives/Cargo.toml index 96efb46ff864..a17ca3cfb42d 100644 --- a/crates/revm/revm-primitives/Cargo.toml +++ b/crates/revm/revm-primitives/Cargo.toml @@ -10,6 +10,6 @@ description = "core reth specific revm utilities" [dependencies] # reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true -revm = { workspace = true } +revm.workspace = true diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 3c6d3832b532..e78c8f5dee3e 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -13,21 +13,21 @@ IPC support for reth [dependencies] # async/net -futures = { workspace = true } +futures.workspace = true parity-tokio-ipc = "0.9.0" tokio = { workspace = true, features = ["net", "time", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["codec"] } -tokio-stream = { workspace = true } -async-trait = { workspace = true } -pin-project = { workspace = true } +tokio-stream.workspace = true +async-trait.workspace = true +pin-project.workspace = true tower = "0.4" # misc jsonrpsee = { version = "0.18", features = ["server", "client"] } -serde_json = { workspace = true } -tracing = { workspace = true } -bytes = { workspace = true } -thiserror = { workspace = true } +serde_json.workspace = true +tracing.workspace = true +bytes.workspace = true +thiserror.workspace = true [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index a6dde25c1a7c..7b220ba11232 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -12,12 +12,12 @@ Reth RPC interfaces [dependencies] # reth -reth-primitives = { workspace = true } -reth-rpc-types = { workspace = true } +reth-primitives.workspace = true +reth-rpc-types.workspace = true # misc jsonrpsee = { version = "0.18", features = ["server", "macros"] } -serde_json = { workspace = true } +serde_json.workspace = true [features] client = ["jsonrpsee/client", "jsonrpsee/async-client"] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 07753d13317a..aeb1d300d233 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -10,17 +10,17 @@ description = "Helpers for configuring RPC" [dependencies] # reth -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-ipc = { path = "../ipc" } -reth-interfaces = { workspace = true } -reth-network-api = { workspace = true } -reth-provider = { workspace = true } +reth-interfaces.workspace = true +reth-network-api.workspace = true +reth-provider.workspace = true reth-rpc = { path = "../rpc" } reth-rpc-api = { path = "../rpc-api" } reth-rpc-engine-api = { path = "../rpc-engine-api" } -reth-rpc-types = { workspace = true } -reth-tasks = { workspace = true } -reth-transaction-pool = { workspace = true } +reth-rpc-types.workspace = true +reth-tasks.workspace = true +reth-transaction-pool.workspace = true reth-metrics = { workspace = true, features = ["common"] } # rpc/net @@ -32,10 +32,10 @@ hyper = "0.14" # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } -thiserror = { workspace = true } -tracing = { workspace = true } -rayon = { workspace = true } -pin-project = { workspace = true } +thiserror.workspace = true +tracing.workspace = true +rayon.workspace = true +pin-project.workspace = true tokio = { workspace = true, features = ["sync"] } [dev-dependencies] @@ -43,10 +43,10 @@ reth-tracing = { path = "../../tracing" } reth-rpc-api = { path = "../rpc-api", features = ["client"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } -reth-network-api = { workspace = true } +reth-network-api.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-beacon-consensus = { path = "../../consensus/beacon" } reth-payload-builder = { workspace = true, features = ["test-utils"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } -serde_json = { workspace = true } +serde_json.workspace = true diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 219edaca6f4b..fc09c60010a4 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -10,24 +10,24 @@ description = "Implementation of Engine API" [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } -reth-provider = { workspace = true } -reth-rpc-types = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true +reth-rpc-types.workspace = true reth-rpc-api = { path = "../rpc-api" } reth-beacon-consensus = { path = "../../consensus/beacon" } -reth-payload-builder = { workspace = true } -reth-tasks = { workspace = true } +reth-payload-builder.workspace = true +reth-tasks.workspace = true # async tokio = { workspace = true, features = ["sync"] } # misc -async-trait = { workspace = true } -thiserror = { workspace = true } +async-trait.workspace = true +thiserror.workspace = true jsonrpsee-types = "0.18" jsonrpsee-core = "0.18" -tracing = { workspace = true } +tracing.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 7fc6976a87e8..93415adb6816 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -12,13 +12,13 @@ Reth RPC testing helpers [dependencies] # reth -reth-primitives = { workspace = true } -reth-rpc-types = { workspace = true } +reth-primitives.workspace = true +reth-rpc-types.workspace = true reth-rpc-api = { path = "../rpc-api", default-features = false, features = ["client"] } # async -async-trait = { workspace = true } -futures = { workspace = true } +async-trait.workspace = true +futures.workspace = true # misc jsonrpsee = { version = "0.18", features = ["client", "async-client"] } diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 5db05498bac2..8ca59c36ca8d 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -12,15 +12,15 @@ Reth RPC types [dependencies] # reth -reth-primitives = { workspace = true } -reth-rlp = { workspace = true } +reth-primitives.workspace = true +reth-rlp.workspace = true # errors -thiserror = { workspace = true } +thiserror.workspace = true # misc serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } +serde_json.workspace = true jsonrpsee-types = { version = "0.18" } [dev-dependencies] @@ -28,6 +28,6 @@ jsonrpsee-types = { version = "0.18" } reth-interfaces = { workspace = true, features = ["test-utils"] } # misc -rand = { workspace = true } +rand.workspace = true assert_matches = "1.5" similar-asserts = "1.4" diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 11111cafbc69..bfb9421f92d8 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -11,18 +11,18 @@ Reth RPC implementation """ [dependencies] # reth -reth-interfaces = { workspace = true } -reth-primitives = { workspace = true } +reth-interfaces.workspace = true +reth-primitives.workspace = true reth-rpc-api = { path = "../rpc-api" } -reth-rlp = { workspace = true } -reth-rpc-types = { workspace = true } +reth-rlp.workspace = true +reth-rpc-types.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-network-api = { workspace = true } +reth-network-api.workspace = true reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-revm = { path = "../../revm" } -reth-tasks = { workspace = true } -reth-metrics = { workspace = true } +reth-tasks.workspace = true +reth-metrics.workspace = true reth-consensus-common = { path = "../../consensus/common" } # eth @@ -42,24 +42,24 @@ hyper = "0.14.24" jsonwebtoken = "8" # async -async-trait = { workspace = true } +async-trait.workspace = true tokio = { workspace = true, features = ["sync"] } tower = "0.4" tokio-stream = { workspace = true, features = ["sync"] } tokio-util = "0.7" -pin-project = { workspace = true } +pin-project.workspace = true bytes.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -thiserror = { workspace = true } +serde_json.workspace = true +thiserror.workspace = true hex = "0.4" -rand = { workspace = true } -tracing = { workspace = true } +rand.workspace = true +tracing.workspace = true tracing-futures = "0.2" schnellru = "0.2" -futures = { workspace = true } +futures.workspace = true [dev-dependencies] jsonrpsee = { version = "0.18", features = ["client"] } diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index c8236c3a713c..9d975e28e8ca 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -16,32 +16,32 @@ normal = [ [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true reth-db = { path = "../storage/db" } reth-codecs = { path = "../storage/codecs" } -reth-provider = { workspace = true } -reth-metrics = { workspace = true } +reth-provider.workspace = true +reth-metrics.workspace = true reth-trie = { path = "../trie" } # async tokio = { workspace = true, features = ["sync"] } -tokio-stream = { workspace = true } -async-trait = { workspace = true } -futures-util = { workspace = true } -pin-project = { workspace = true } +tokio-stream.workspace = true +async-trait.workspace = true +futures-util.workspace = true +pin-project.workspace = true # observability -tracing = { workspace = true } +tracing.workspace = true # io -serde = { workspace = true } +serde.workspace = true # misc -thiserror = { workspace = true } +thiserror.workspace = true aquamarine = "0.3.0" itertools = "0.10.5" -rayon = { workspace = true } +rayon.workspace = true num-traits = "0.2.15" [dev-dependencies] @@ -52,14 +52,14 @@ reth-interfaces = { workspace = true, features = ["test-utils"] } reth-downloaders = { path = "../net/downloaders" } reth-eth-wire = { path = "../net/eth-wire" } # TODO(onbjerg): We only need this for [BlockBody] reth-blockchain-tree = { path = "../blockchain-tree" } -reth-rlp = { workspace = true } +reth-rlp.workspace = true reth-revm = { path = "../revm" } reth-trie = { path = "../trie", features = ["test-utils"] } itertools = "0.10.5" tokio = { workspace = true, features = ["rt", "sync", "macros"] } assert_matches = "1.5.0" -rand = { workspace = true } +rand.workspace = true paste = "1.0" # Stage benchmarks @@ -67,7 +67,7 @@ pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterio criterion = { version = "0.5", features = ["async_futures"] } # io -serde_json = { workspace = true } +serde_json.workspace = true [features] test-utils = ["reth-interfaces/test-utils"] diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 4141cabef87b..9e2d405b1973 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -10,20 +10,20 @@ description = "Staged syncing primitives used in reth." [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true reth-codecs = { path = "../codecs" } reth-libmdbx = { path = "../libmdbx-rs", optional = true, features = ["return-borrowed"] } -reth-metrics = { workspace = true } +reth-metrics.workspace = true # codecs serde = { workspace = true, default-features = false } postcard = { version = "1.0.2", features = ["alloc"] } heapless = "0.7.16" parity-scale-codec = { version = "3.2.1", features = ["bytes"] } -futures = { workspace = true } -tokio-stream = { workspace = true } -rand = { workspace = true } +futures.workspace = true +tokio-stream.workspace = true +rand.workspace = true secp256k1 = { workspace = true, default-features = false, features = [ "alloc", "recovery", @@ -34,7 +34,7 @@ modular-bitfield = "0.11.2" # misc bytes.workspace = true page_size = "0.4.2" -thiserror = { workspace = true } +thiserror.workspace = true tempfile = { version = "3.3.0", optional = true } parking_lot = "0.12" derive_more = "0.99" @@ -49,7 +49,7 @@ proptest-derive = { version = "0.3", optional = true } # reth libs with arbitrary reth-primitives = { workspace = true, features = ["arbitrary"] } reth-codecs = { path = "../codecs", features = ["arbitrary"] } -reth-interfaces = { workspace = true } +reth-interfaces.workspace = true tempfile = "3.3.0" test-fuzz = "4" @@ -61,15 +61,15 @@ tokio = { workspace = true, features = ["full"] } reth-db = { path = ".", features = ["test-utils", "bench"] } # needed for test-fuzz to work properly, see https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 -secp256k1 = { workspace = true } +secp256k1.workspace = true -async-trait = { workspace = true } +async-trait.workspace = true arbitrary = { version = "1.1.7", features = ["derive"] } proptest = { version = "1.0" } proptest-derive = "0.3" -serde_json = { workspace = true } +serde_json.workspace = true paste = "1.0" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 669b63a54eab..2895c56f2ed0 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -18,7 +18,7 @@ derive_more = "0.99" indexmap = "1" libc = "0.2" parking_lot = "0.12" -thiserror = { workspace = true } +thiserror.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } @@ -31,7 +31,7 @@ return-borrowed = [] [dev-dependencies] pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } criterion = "0.5" -rand = { workspace = true } +rand.workspace = true rand_xorshift = "0.3" tempfile = "3" diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 07a2a9a02f20..25b1fd3ccddd 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -10,8 +10,8 @@ description = "Reth storage provider." [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true reth-revm-primitives = { path = "../../revm/revm-primitives" } reth-db = { path = "../db" } reth-trie = { path = "../../trie" } @@ -21,12 +21,12 @@ tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } tokio-stream = { workspace = true, features = ["sync"] } # tracing -tracing = { workspace = true } +tracing.workspace = true # misc auto_impl = "1.0" itertools = "0.10" -pin-project = { workspace = true } +pin-project.workspace = true derive_more = "0.99" parking_lot = "0.12" @@ -36,7 +36,7 @@ reth-rlp = { workspace = true, optional = true } [dev-dependencies] reth-db = { path = "../db", features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } -reth-rlp = { workspace = true } +reth-rlp.workspace = true reth-trie = { path = "../../trie", features = ["test-utils"] } parking_lot = "0.12" tempfile = "3.3" diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 183224bbda6a..7a754014bac8 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -13,15 +13,15 @@ description = "Task management" ## async tokio = { workspace = true, features = ["sync", "rt"] } tracing-futures = "0.2" -futures-util = { workspace = true } +futures-util.workspace = true ## misc -tracing = { workspace = true } -thiserror = { workspace = true } +tracing.workspace = true +thiserror.workspace = true dyn-clone = "1.0" ## rpc/metrics -reth-metrics = { workspace = true } +reth-metrics.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread", "time", "macros"] } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 63531a08ff65..444f1b23afd0 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true description = "tracing helpers" [dependencies] -tracing = { workspace = true } +tracing.workspace = true tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt"] } tracing-appender = "0.2" tracing-journald = "0.3" diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index c562df71cf5e..6866cf1ea20b 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -18,24 +18,24 @@ normal = [ [dependencies] # reth -reth-primitives = { workspace = true } -reth-provider = { workspace = true } -reth-interfaces = { workspace = true } -reth-rlp = { workspace = true } -reth-metrics = { workspace = true } -reth-tasks = { workspace = true } +reth-primitives.workspace = true +reth-provider.workspace = true +reth-interfaces.workspace = true +reth-rlp.workspace = true +reth-metrics.workspace = true +reth-tasks.workspace = true # async/futures -async-trait = { workspace = true} -futures-util = { workspace = true } +async-trait.workspace = true +futures-util.workspace = true parking_lot = "0.12" tokio = { workspace = true, default-features = false, features = ["sync"] } tokio-stream.workspace = true # misc aquamarine = "0.3.0" -thiserror = { workspace = true } -tracing = { workspace = true } +thiserror.workspace = true +tracing.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } fnv = "1.0.7" bitflags = "1.3" diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 9664d3192c27..428561838694 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -12,20 +12,20 @@ Merkle trie implementation [dependencies] # reth -reth-primitives = { workspace = true } -reth-interfaces = { workspace = true } -reth-rlp = { workspace = true } +reth-primitives.workspace = true +reth-interfaces.workspace = true +reth-rlp.workspace = true reth-db = { path = "../storage/db" } # tokio tokio = { workspace = true, default-features = false, features = ["sync"] } # tracing -tracing = { workspace = true } +tracing.workspace = true # misc hex = "0.4" -thiserror = { workspace = true } +thiserror.workspace = true derive_more = "0.99" # test-utils @@ -35,7 +35,7 @@ triehash = { version = "0.8", optional = true } # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { path = "../storage/db", features = ["test-utils"] } -reth-provider = { workspace = true } +reth-provider.workspace = true # trie triehash = "0.8" @@ -43,7 +43,7 @@ triehash = "0.8" # misc proptest = "1.0" tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } -tokio-stream = { workspace = true } +tokio-stream.workspace = true criterion = "0.5" [features] diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index c394b9a8d48b..d415acde44c8 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -12,16 +12,16 @@ repository.workspace = true ef-tests = [] [dependencies] -reth-primitives = { workspace = true } +reth-primitives.workspace = true reth-db = { path = "../../crates/storage/db", features = ["mdbx", "test-utils"] } -reth-provider = { workspace = true } +reth-provider.workspace = true reth-stages = { path = "../../crates/stages" } -reth-rlp = { workspace = true } -reth-interfaces = { workspace = true } +reth-rlp.workspace = true +reth-interfaces.workspace = true reth-revm = { path = "../../crates/revm" } tokio = "1.28.1" walkdir = "2.3.3" serde = "1.0.163" -serde_json = { workspace = true } -thiserror = { workspace = true } +serde_json.workspace = true +thiserror.workspace = true serde_bytes = "0.11.9" \ No newline at end of file From 49e112789ba0d769ce58648c8c10a2eec821bced Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 26 Jul 2023 18:33:59 +0200 Subject: [PATCH 253/722] feat: use tracing pool for tracing calls (#3914) --- Cargo.lock | 3 +- crates/rpc/rpc-builder/Cargo.toml | 3 - crates/rpc/rpc-builder/src/auth.rs | 3 +- crates/rpc/rpc-builder/src/eth.rs | 4 +- crates/rpc/rpc-builder/src/lib.rs | 33 ++- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/call_guard.rs | 26 -- crates/rpc/rpc/src/debug.rs | 147 ++++------- crates/rpc/rpc/src/eth/api/mod.rs | 8 + crates/rpc/rpc/src/eth/api/server.rs | 3 +- crates/rpc/rpc/src/eth/api/state.rs | 7 +- crates/rpc/rpc/src/eth/api/transactions.rs | 151 ++++++----- crates/rpc/rpc/src/lib.rs | 4 +- crates/rpc/rpc/src/trace.rs | 245 +++++++----------- .../src/tracing_call.rs} | 39 ++- 15 files changed, 312 insertions(+), 365 deletions(-) delete mode 100644 crates/rpc/rpc/src/call_guard.rs rename crates/rpc/{rpc-builder/src/tracing_pool.rs => rpc/src/tracing_call.rs} (71%) diff --git a/Cargo.lock b/Cargo.lock index 1d5a1f6b2290..119ae278d573 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5682,6 +5682,7 @@ dependencies = [ "jsonwebtoken", "pin-project", "rand 0.8.5", + "rayon", "reth-consensus-common", "reth-interfaces", "reth-metrics", @@ -5741,8 +5742,6 @@ version = "0.1.0-alpha.4" dependencies = [ "hyper", "jsonrpsee", - "pin-project", - "rayon", "reth-beacon-consensus", "reth-interfaces", "reth-ipc", diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index aeb1d300d233..7f8869e64de3 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -34,9 +34,6 @@ strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tracing.workspace = true -rayon.workspace = true -pin-project.workspace = true -tokio = { workspace = true, features = ["sync"] } [dev-dependencies] reth-tracing = { path = "../../tracing" } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 758713bbdebc..d7d14f963d1f 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -17,7 +17,7 @@ use reth_provider::{ use reth_rpc::{ eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, AuthLayer, Claims, EngineEthApi, EthApi, EthFilter, EthSubscriptionIdProvider, - JwtAuthValidator, JwtSecret, + JwtAuthValidator, JwtSecret, TracingCallPool, }; use reth_rpc_api::{servers::*, EngineApiServer}; use reth_tasks::TaskSpawner; @@ -64,6 +64,7 @@ where gas_oracle, EthConfig::default().rpc_gas_cap, Box::new(executor.clone()), + TracingCallPool::build().expect("failed to build tracing pool"), ); let eth_filter = EthFilter::new( provider, diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 34ec4989c62b..b372d3be77ea 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -4,7 +4,7 @@ use reth_rpc::{ gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }, - EthApi, EthFilter, EthPubSub, + EthApi, EthFilter, EthPubSub, TracingCallPool, }; use serde::{Deserialize, Serialize}; @@ -25,6 +25,8 @@ pub struct EthHandlers { pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) pub pubsub: EthPubSub, + /// The configured tracing call pool + pub tracing_call_pool: TracingCallPool, } /// Additional config values for the eth namespace diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4e86181b940a..44af9cb5e1e8 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -122,7 +122,8 @@ use reth_rpc::{ gas_oracle::GasPriceOracle, }, AdminApi, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, - NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TracingCallGuard, TxPoolApi, Web3Api, + NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TracingCallGuard, TracingCallPool, TxPoolApi, + Web3Api, }; use reth_rpc_api::{servers::*, EngineApiServer}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -154,9 +155,6 @@ mod eth; /// Common RPC constants. pub mod constants; -/// Additional support for tracing related rpc calls -pub mod tracing_pool; - // Rpc server metrics mod metrics; @@ -816,15 +814,9 @@ where let eth = self.eth_handlers(); self.modules.insert( RethRpcModule::Trace, - TraceApi::new( - self.provider.clone(), - eth.api.clone(), - eth.cache, - Box::new(self.executor.clone()), - self.tracing_call_guard.clone(), - ) - .into_rpc() - .into(), + TraceApi::new(self.provider.clone(), eth.api.clone(), self.tracing_call_guard.clone()) + .into_rpc() + .into(), ); self } @@ -895,8 +887,13 @@ where &mut self, namespaces: impl Iterator, ) -> Vec { - let EthHandlers { api: eth_api, cache: eth_cache, filter: eth_filter, pubsub: eth_pubsub } = - self.with_eth(|eth| eth.clone()); + let EthHandlers { + api: eth_api, + filter: eth_filter, + pubsub: eth_pubsub, + cache: _, + tracing_call_pool: _, + } = self.with_eth(|eth| eth.clone()); // Create a copy, so we can list out all the methods for rpc_ api let namespaces: Vec<_> = namespaces.collect(); @@ -933,8 +930,6 @@ where RethRpcModule::Trace => TraceApi::new( self.provider.clone(), eth_api.clone(), - eth_cache.clone(), - Box::new(self.executor.clone()), self.tracing_call_guard.clone(), ) .into_rpc() @@ -997,6 +992,7 @@ where ); let executor = Box::new(self.executor.clone()); + let tracing_call_pool = TracingCallPool::build().expect("failed to build tracing pool"); let api = EthApi::with_spawner( self.provider.clone(), self.pool.clone(), @@ -1005,6 +1001,7 @@ where gas_oracle, self.config.eth.rpc_gas_cap, executor.clone(), + tracing_call_pool.clone(), ); let filter = EthFilter::new( self.provider.clone(), @@ -1022,7 +1019,7 @@ where executor, ); - let eth = EthHandlers { api, cache, filter, pubsub }; + let eth = EthHandlers { api, cache, filter, pubsub, tracing_call_pool }; self.eth = Some(eth); } f(self.eth.as_ref().expect("exists; qed")) diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index bfb9421f92d8..dfd86b271070 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -48,6 +48,7 @@ tower = "0.4" tokio-stream = { workspace = true, features = ["sync"] } tokio-util = "0.7" pin-project.workspace = true +rayon.workspace = true bytes.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } diff --git a/crates/rpc/rpc/src/call_guard.rs b/crates/rpc/rpc/src/call_guard.rs deleted file mode 100644 index bec4ed3ca988..000000000000 --- a/crates/rpc/rpc/src/call_guard.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::sync::Arc; -use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore}; - -/// RPC Tracing call guard semaphore. -/// -/// This is used to restrict the number of concurrent RPC requests to tracing methods like -/// `debug_traceTransaction` because they can consume a lot of memory and CPU. -#[derive(Clone, Debug)] -pub struct TracingCallGuard(Arc); - -impl TracingCallGuard { - /// Create a new `TracingCallGuard` with the given maximum number of tracing calls in parallel. - pub fn new(max_tracing_requests: u32) -> Self { - Self(Arc::new(Semaphore::new(max_tracing_requests as usize))) - } - - /// See also [Semaphore::acquire_owned] - pub async fn acquire_owned(self) -> Result { - self.0.acquire_owned().await - } - - /// See also [Semaphore::acquire_many_owned] - pub async fn acquire_many_owned(self, n: u32) -> Result { - self.0.acquire_many_owned(n).await - } -} diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index d6abad21fe22..3c2da93f61db 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -40,8 +40,8 @@ use revm_primitives::{ db::{DatabaseCommit, DatabaseRef}, BlockEnv, CfgEnv, }; -use std::{future::Future, sync::Arc}; -use tokio::sync::{mpsc, oneshot, AcquireError, OwnedSemaphorePermit}; +use std::sync::Arc; +use tokio::sync::{mpsc, AcquireError, OwnedSemaphorePermit}; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; /// `debug` API implementation. @@ -74,30 +74,13 @@ where Provider: BlockReaderIdExt + HeaderProvider + 'static, Eth: EthTransactions + 'static, { - /// Executes the future on a new blocking task. - async fn on_blocking_task(&self, c: C) -> EthResult - where - C: FnOnce(Self) -> F, - F: Future> + Send + 'static, - R: Send + 'static, - { - let (tx, rx) = oneshot::channel(); - let this = self.clone(); - let f = c(this); - self.inner.task_spawner.spawn_blocking(Box::pin(async move { - let res = f.await; - let _ = tx.send(res); - })); - rx.await.map_err(|_| EthApiError::InternalTracingError)? - } - /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { self.inner.tracing_call_guard.clone().acquire_owned().await } - /// Trace the entire block - fn trace_block_with_sync( + /// Trace the entire block asynchronously + async fn trace_block_with( &self, at: BlockId, transactions: Vec, @@ -107,43 +90,31 @@ where ) -> EthResult> { // replay all transactions of the block let this = self.clone(); - self.inner.eth_api.with_state_at_block(at, move |state| { - let mut results = Vec::with_capacity(transactions.len()); - let mut db = SubState::new(State::new(state)); - - let mut transactions = transactions.into_iter().peekable(); - while let Some(tx) = transactions.next() { - let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; - let tx = tx_env_with_recovered(&tx); - let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; - let (result, state_changes) = - this.trace_transaction(opts.clone(), env, at, &mut db)?; - results.push(TraceResult::Success { result }); - - if transactions.peek().is_some() { - // need to apply the state changes of this transaction before executing the next - // transaction - db.commit(state_changes) - } - } + self.inner + .eth_api + .spawn_with_state_at_block(at, move |state| { + let mut results = Vec::with_capacity(transactions.len()); + let mut db = SubState::new(State::new(state)); - Ok(results) - }) - } + let mut transactions = transactions.into_iter().peekable(); + while let Some(tx) = transactions.next() { + let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; + let tx = tx_env_with_recovered(&tx); + let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; + let (result, state_changes) = + this.trace_transaction(opts.clone(), env, at, &mut db)?; + results.push(TraceResult::Success { result }); + + if transactions.peek().is_some() { + // need to apply the state changes of this transaction before executing the + // next transaction + db.commit(state_changes) + } + } - /// Trace the entire block asynchronously - async fn trace_block_with( - &self, - at: BlockId, - transactions: Vec, - cfg: CfgEnv, - block_env: BlockEnv, - opts: GethDebugTracingOptions, - ) -> EthResult> { - self.on_blocking_task(|this| async move { - this.trace_block_with_sync(at, transactions, cfg, block_env, opts) - }) - .await + Ok(results) + }) + .await } /// Replays the given block and returns the trace of each transaction. @@ -171,17 +142,6 @@ where &self, block_id: BlockId, opts: GethDebugTracingOptions, - ) -> EthResult> { - self.on_blocking_task( - |this| async move { this.try_debug_trace_block(block_id, opts).await }, - ) - .await - } - - async fn try_debug_trace_block( - &self, - block_id: BlockId, - opts: GethDebugTracingOptions, ) -> EthResult> { let block_hash = self .inner @@ -199,7 +159,7 @@ where // its parent block's state let state_at = block.parent_hash; - self.trace_block_with_sync(state_at.into(), block.body, cfg, block_env, opts) + self.trace_block_with(state_at.into(), block.body, cfg, block_env, opts).await } /// Trace the transaction according to the provided options. @@ -221,8 +181,10 @@ where let state_at: BlockId = block.parent_hash.into(); let block_txs = block.body; - self.on_blocking_task(|this| async move { - this.inner.eth_api.with_state_at_block(state_at, |state| { + let this = self.clone(); + self.inner + .eth_api + .spawn_with_state_at_block(state_at, move |state| { // configure env for the target transaction let tx = transaction.into_recovered(); @@ -239,8 +201,7 @@ where let env = Env { cfg, block: block_env, tx: tx_env_with_recovered(&tx) }; this.trace_transaction(opts, env, state_at, &mut db).map(|(trace, _)| trace) }) - }) - .await + .await } /// The debug_traceCall method lets you run an `eth_call` within the context of the given block @@ -250,22 +211,6 @@ where call: CallRequest, block_id: Option, opts: GethDebugTracingCallOptions, - ) -> EthResult { - self.on_blocking_task(|this| async move { - this.try_debug_trace_call(call, block_id, opts).await - }) - .await - } - - /// The debug_traceCall method lets you run an `eth_call` within the context of the given block - /// execution using the final state of parent block as the base. - /// - /// Caution: while this is async, this may still be blocking on necessary DB io. - async fn try_debug_trace_call( - &self, - call: CallRequest, - block_id: Option, - opts: GethDebugTracingCallOptions, ) -> EthResult { let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = @@ -278,10 +223,13 @@ where GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { let mut inspector = FourByteInspector::default(); - let (_res, _) = self + let inspector = self .inner .eth_api - .inspect_call_at(call, at, overrides, &mut inspector) + .spawn_with_call_at(call, at, overrides, move |db, env| { + inspect(db, env, &mut inspector)?; + Ok(inspector) + }) .await?; return Ok(FourByteFrame::from(inspector).into()) } @@ -295,10 +243,13 @@ where .set_record_logs(call_config.with_log.unwrap_or_default()), ); - let _ = self + let inspector = self .inner .eth_api - .inspect_call_at(call, at, overrides, &mut inspector) + .spawn_with_call_at(call, at, overrides, move |db, env| { + inspect(db, env, &mut inspector)?; + Ok(inspector) + }) .await?; let frame = inspector.into_geth_builder().geth_call_traces(call_config); @@ -351,8 +302,14 @@ where let mut inspector = TracingInspector::new(inspector_config); - let (res, _) = - self.inner.eth_api.inspect_call_at(call, at, overrides, &mut inspector).await?; + let (res, inspector) = self + .inner + .eth_api + .spawn_with_call_at(call, at, overrides, move |db, env| { + let (res, _) = inspect(db, env, &mut inspector)?; + Ok((res, inspector)) + }) + .await?; let gas_used = res.result.gas_used(); let return_value = result_output(&res.result).unwrap_or_default().into(); let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); @@ -365,6 +322,8 @@ where /// Returns the trace frame and the state that got updated after executing the transaction. /// /// Note: this does not apply any state overrides if they're configured in the `opts`. + /// + /// Caution: this is blocking and should be performed on a blocking task. fn trace_transaction( &self, opts: GethDebugTracingOptions, diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index de5d12f8d105..20ecf114e2a9 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -37,6 +37,7 @@ mod sign; mod state; mod transactions; +use crate::TracingCallPool; pub use transactions::{EthTransactions, TransactionSource}; /// `Eth` API trait. @@ -88,6 +89,7 @@ where eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, + tracing_call_pool: TracingCallPool, ) -> Self { Self::with_spawner( provider, @@ -97,10 +99,12 @@ where gas_oracle, gas_cap.into().into(), Box::::default(), + tracing_call_pool, ) } /// Creates a new, shareable instance. + #[allow(clippy::too_many_arguments)] pub fn with_spawner( provider: Provider, pool: Pool, @@ -109,6 +113,7 @@ where gas_oracle: GasPriceOracle, gas_cap: u64, task_spawner: Box, + tracing_call_pool: TracingCallPool, ) -> Self { // get the block number of the latest block let latest_block = provider @@ -129,6 +134,7 @@ where starting_block: U256::from(latest_block), task_spawner, pending_block: Default::default(), + tracing_call_pool, }; Self { inner: Arc::new(inner) } } @@ -421,4 +427,6 @@ struct EthApiInner { task_spawner: Box, /// Cached pending block if any pending_block: Mutex>, + /// A pool dedicated to tracing calls + tracing_call_pool: TracingCallPool, } diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 1cca7addc90c..663308cd895c 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -392,7 +392,7 @@ where mod tests { use crate::{ eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, - EthApi, + EthApi, TracingCallPool, }; use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_interfaces::test_utils::{generators, generators::Rng}; @@ -428,6 +428,7 @@ mod tests { cache.clone(), GasPriceOracle::new(provider, Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, + TracingCallPool::build().expect("failed to build tracing pool"), ) } diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 0930bf0b6c50..2887ac58fb8f 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -146,7 +146,10 @@ where #[cfg(test)] mod tests { use super::*; - use crate::eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}; + use crate::{ + eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, + TracingCallPool, + }; use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, StorageKey, StorageValue}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_transaction_pool::test_utils::testing_pool; @@ -165,6 +168,7 @@ mod tests { cache.clone(), GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, + TracingCallPool::build().expect("failed to build tracing pool"), ); let address = Address::random(); let storage = eth_api.storage_at(address, U256::ZERO.into(), None).unwrap(); @@ -186,6 +190,7 @@ mod tests { cache.clone(), GasPriceOracle::new(mock_provider, Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, + TracingCallPool::build().expect("failed to build tracing pool"), ); let storage_key: U256 = storage_key.into(); diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 514d436b3ab7..6d869daa6048 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -40,7 +40,10 @@ use revm_primitives::{utilities::create_address, Env, ResultAndState, SpecId}; /// Helper alias type for the state's [CacheDB] pub(crate) type StateCacheDB<'r> = CacheDB>>; -/// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace +/// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace. +/// +/// Async functions that are spawned onto the +/// [TracingCallPool](crate::tracing_call::TracingCallPool) begin with `spawn_` #[async_trait::async_trait] pub trait EthTransactions: Send + Sync { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. @@ -54,6 +57,12 @@ pub trait EthTransactions: Send + Sync { where F: FnOnce(StateProviderBox<'_>) -> EthResult; + /// Executes the closure with the state that corresponds to the given [BlockId] on a new task + async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult + where + F: FnOnce(StateProviderBox<'_>) -> EthResult + Send + 'static, + T: Send + 'static; + /// Returns the revm evm env for the requested [BlockId] /// /// If the [BlockId] this will return the [BlockId::Hash] of the block the env was configured @@ -121,8 +130,8 @@ pub trait EthTransactions: Send + Sync { async fn send_transaction(&self, request: TransactionRequest) -> EthResult; /// Prepares the state and env for the given [CallRequest] at the given [BlockId] and executes - /// the closure. - async fn with_call_at( + /// the closure on a new task returning the result of the closure. + async fn spawn_with_call_at( &self, request: CallRequest, at: BlockId, @@ -130,7 +139,8 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send; + F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send + 'static, + R: Send + 'static; /// Executes the call request at the given [BlockId]. async fn transact_call_at( @@ -140,8 +150,9 @@ pub trait EthTransactions: Send + Sync { overrides: EvmOverrides, ) -> EthResult<(ResultAndState, Env)>; - /// Executes the call request at the given [BlockId] - async fn inspect_call_at( + /// Executes the call request at the given [BlockId] on a new task and returns the result of the + /// inspect call. + async fn spawn_inspect_call_at( &self, request: CallRequest, at: BlockId, @@ -149,24 +160,15 @@ pub trait EthTransactions: Send + Sync { inspector: I, ) -> EthResult<(ResultAndState, Env)> where - I: for<'r> Inspector> + Send; - - /// Executes the call request at the given [BlockId] - async fn inspect_call_at_and_return_state<'a, I>( - &'a self, - request: CallRequest, - at: BlockId, - overrides: EvmOverrides, - inspector: I, - ) -> EthResult<(ResultAndState, Env, StateCacheDB<'a>)> - where - I: Inspector> + Send; + I: for<'r> Inspector> + Send + 'static; /// Executes the transaction on top of the given [BlockId] with a tracer configured by the /// config. /// /// The callback is then called with the [TracingInspector] and the [ResultAndState] after the /// configured [Env] was inspected. + /// + /// Caution: this is blocking fn trace_at( &self, env: Env, @@ -184,7 +186,7 @@ pub trait EthTransactions: Send + Sync { /// /// The callback is then called with the [TracingInspector] and the [ResultAndState] after the /// configured [Env] was inspected. - fn trace_at_with_state( + async fn spawn_trace_at_with_state( &self, env: Env, config: TracingInspectorConfig, @@ -192,7 +194,10 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult; + F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult + + Send + + 'static, + R: Send + 'static; /// Fetches the transaction and the transaction's block async fn transaction_and_block( @@ -206,7 +211,10 @@ pub trait EthTransactions: Send + Sync { /// state by executing them first. /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and /// the database that points to the beginning of the transaction. - async fn trace_transaction_in_block( + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [TracingCallPool](crate::tracing_call::TracingCallPool). + async fn spawn_trace_transaction_in_block( &self, hash: H256, config: TracingInspectorConfig, @@ -219,7 +227,9 @@ pub trait EthTransactions: Send + Sync { ResultAndState, StateCacheDB<'a>, ) -> EthResult - + Send; + + Send + + 'static, + R: Send + 'static; } #[async_trait] @@ -245,6 +255,22 @@ where f(state) } + async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult + where + F: FnOnce(StateProviderBox<'_>) -> EthResult + Send + 'static, + T: Send + 'static, + { + let this = self.clone(); + self.inner + .tracing_call_pool + .spawn(move || { + let state = this.state_at(at)?; + f(state) + }) + .await + .map_err(|_| EthApiError::InternalTracingError)? + } + async fn evm_env_at(&self, at: BlockId) -> EthResult<(CfgEnv, BlockEnv, BlockId)> { if at.is_pending() { let PendingBlockEnv { cfg, block_env, origin } = self.pending_block_env_and_cfg()?; @@ -473,7 +499,7 @@ where Ok(hash) } - async fn with_call_at( + async fn spawn_with_call_at( &self, request: CallRequest, at: BlockId, @@ -481,15 +507,29 @@ where f: F, ) -> EthResult where - F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send, + F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send + 'static, + R: Send + 'static, { let (cfg, block_env, at) = self.evm_env_at(at).await?; - let state = self.state_at(at)?; - let mut db = SubState::new(State::new(state)); - - let env = - prepare_call_env(cfg, block_env, request, self.call_gas_limit(), &mut db, overrides)?; - f(db, env) + let this = self.clone(); + self.inner + .tracing_call_pool + .spawn(move || { + let state = this.state_at(at)?; + let mut db = SubState::new(State::new(state)); + + let env = prepare_call_env( + cfg, + block_env, + request, + this.call_gas_limit(), + &mut db, + overrides, + )?; + f(db, env) + }) + .await + .map_err(|_| EthApiError::InternalTracingError)? } async fn transact_call_at( @@ -498,10 +538,11 @@ where at: BlockId, overrides: EvmOverrides, ) -> EthResult<(ResultAndState, Env)> { - self.with_call_at(request, at, overrides, |mut db, env| transact(&mut db, env)).await + self.spawn_with_call_at(request, at, overrides, move |mut db, env| transact(&mut db, env)) + .await } - async fn inspect_call_at( + async fn spawn_inspect_call_at( &self, request: CallRequest, at: BlockId, @@ -509,28 +550,10 @@ where inspector: I, ) -> EthResult<(ResultAndState, Env)> where - I: for<'r> Inspector> + Send, - { - self.with_call_at(request, at, overrides, |db, env| inspect(db, env, inspector)).await - } - - async fn inspect_call_at_and_return_state<'a, I>( - &'a self, - request: CallRequest, - at: BlockId, - overrides: EvmOverrides, - inspector: I, - ) -> EthResult<(ResultAndState, Env, StateCacheDB<'a>)> - where - I: Inspector> + Send, + I: for<'r> Inspector> + Send + 'static, { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - let state = self.state_at(at)?; - let mut db = SubState::new(State::new(state)); - - let env = - prepare_call_env(cfg, block_env, request, self.call_gas_limit(), &mut db, overrides)?; - inspect_and_return_db(db, env, inspector) + self.spawn_with_call_at(request, at, overrides, move |db, env| inspect(db, env, inspector)) + .await } fn trace_at( @@ -553,7 +576,7 @@ where }) } - fn trace_at_with_state( + async fn spawn_trace_at_with_state( &self, env: Env, config: TracingInspectorConfig, @@ -561,15 +584,19 @@ where f: F, ) -> EthResult where - F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult, + F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult + + Send + + 'static, + R: Send + 'static, { - self.with_state_at_block(at, |state| { + self.spawn_with_state_at_block(at, move |state| { let db = SubState::new(State::new(state)); let mut inspector = TracingInspector::new(config); let (res, _, db) = inspect_and_return_db(db, env, &mut inspector)?; f(inspector, res, db) }) + .await } async fn transaction_and_block( @@ -590,7 +617,7 @@ where Ok(block.map(|block| (transaction, block.seal(block_hash)))) } - async fn trace_transaction_in_block( + async fn spawn_trace_transaction_in_block( &self, hash: H256, config: TracingInspectorConfig, @@ -603,7 +630,9 @@ where ResultAndState, StateCacheDB<'a>, ) -> EthResult - + Send, + + Send + + 'static, + R: Send + 'static, { let (transaction, block) = match self.transaction_and_block(hash).await? { None => return Ok(None), @@ -618,7 +647,7 @@ where let parent_block = block.parent_hash; let block_txs = block.body; - self.with_state_at_block(parent_block.into(), |state| { + self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = SubState::new(State::new(state)); // replay all transactions prior to the targeted transaction @@ -630,6 +659,7 @@ where let (res, _, db) = inspect_and_return_db(db, env, &mut inspector)?; f(tx_info, inspector, res, db) }) + .await .map(Some) } } @@ -878,7 +908,7 @@ mod tests { use super::*; use crate::{ eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, - EthApi, + EthApi, TracingCallPool, }; use reth_network_api::noop::NoopNetwork; use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex, Bytes}; @@ -900,6 +930,7 @@ mod tests { cache.clone(), GasPriceOracle::new(noop_provider, Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, + TracingCallPool::build().expect("failed to build tracing pool"), ); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index b885608139a7..e1818523f296 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -31,7 +31,6 @@ //! disk-io, hence these calls are spawned as futures to a blocking task manually. mod admin; -mod call_guard; mod debug; mod engine; pub mod eth; @@ -41,11 +40,11 @@ mod otterscan; mod reth; mod rpc; mod trace; +pub mod tracing_call; mod txpool; mod web3; pub use admin::AdminApi; -pub use call_guard::TracingCallGuard; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; @@ -55,6 +54,7 @@ pub use otterscan::OtterscanApi; pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; +pub use tracing_call::{TracingCallGuard, TracingCallPool}; pub use txpool::TxPoolApi; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index ff2d09e50c73..494741097c7b 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,8 +1,7 @@ use crate::{ eth::{ - cache::EthStateCache, error::{EthApiError, EthResult}, - revm_utils::{inspect, prepare_call_env, EvmOverrides}, + revm_utils::{inspect, inspect_and_return_db, prepare_call_env, EvmOverrides}, utils::recover_raw_transaction, EthTransactions, }, @@ -29,11 +28,10 @@ use reth_rpc_types::{ trace::{filter::TraceFilter, parity::*}, BlockError, BlockOverrides, CallRequest, Index, TransactionInfo, }; -use reth_tasks::TaskSpawner; use revm::{db::CacheDB, primitives::Env}; use revm_primitives::{db::DatabaseCommit, ExecutionResult, ResultAndState}; -use std::{collections::HashSet, future::Future, sync::Arc}; -use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; +use std::{collections::HashSet, sync::Arc}; +use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. /// @@ -51,20 +49,8 @@ impl TraceApi { } /// Create a new instance of the [TraceApi] - pub fn new( - provider: Provider, - eth_api: Eth, - eth_cache: EthStateCache, - task_spawner: Box, - tracing_call_guard: TracingCallGuard, - ) -> Self { - let inner = Arc::new(TraceApiInner { - provider, - eth_api, - eth_cache, - task_spawner, - tracing_call_guard, - }); + pub fn new(provider: Provider, eth_api: Eth, tracing_call_guard: TracingCallGuard) -> Self { + let inner = Arc::new(TraceApiInner { provider, eth_api, tracing_call_guard }); Self { inner } } @@ -83,23 +69,6 @@ where Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, Eth: EthTransactions + 'static, { - /// Executes the future on a new blocking task. - async fn on_blocking_task(&self, c: C) -> EthResult - where - C: FnOnce(Self) -> F, - F: Future> + Send + 'static, - R: Send + 'static, - { - let (tx, rx) = oneshot::channel(); - let this = self.clone(); - let f = c(this); - self.inner.task_spawner.spawn_blocking(Box::pin(async move { - let res = f.await; - let _ = tx.send(res); - })); - rx.await.map_err(|_| EthApiError::InternalTracingError)? - } - /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call( &self, @@ -108,43 +77,23 @@ where block_id: Option, state_overrides: Option, block_overrides: Option>, - ) -> EthResult { - self.on_blocking_task(|this| async move { - this.try_trace_call( - call, - trace_types, - block_id, - EvmOverrides::new(state_overrides, block_overrides), - ) - .await - }) - .await - } - - async fn try_trace_call( - &self, - call: CallRequest, - trace_types: HashSet, - block_id: Option, - overrides: EvmOverrides, ) -> EthResult { let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); let config = tracing_config(&trace_types); + let overrides = EvmOverrides::new(state_overrides, block_overrides); let mut inspector = TracingInspector::new(config); - - let (res, _, db) = self - .inner + self.inner .eth_api - .inspect_call_at_and_return_state(call, at, overrides, &mut inspector) - .await?; - - let trace_res = inspector.into_parity_builder().into_trace_results_with_state( - res, - &trace_types, - &db, - )?; - - Ok(trace_res) + .spawn_with_call_at(call, at, overrides, move |db, env| { + let (res, _, db) = inspect_and_return_db(db, env, &mut inspector)?; + let trace_res = inspector.into_parity_builder().into_trace_results_with_state( + res, + &trace_types, + &db, + )?; + Ok(trace_res) + }) + .await } /// Traces a call to `eth_sendRawTransaction` without making the call, returning the traces. @@ -166,16 +115,16 @@ where let config = tracing_config(&trace_types); - self.on_blocking_task(|this| async move { - this.inner.eth_api.trace_at_with_state(env, config, at, |inspector, res, db| { + self.inner + .eth_api + .spawn_trace_at_with_state(env, config, at, move |inspector, res, db| { Ok(inspector.into_parity_builder().into_trace_results_with_state( res, &trace_types, &db, )?) }) - }) - .await + .await } /// Performs multiple call traces on top of the same block. i.e. transaction n will be executed @@ -190,10 +139,11 @@ where let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; - self.on_blocking_task(|this| async move { - let gas_limit = this.inner.eth_api.call_gas_limit(); - // execute all transactions on top of each other and record the traces - this.inner.eth_api.with_state_at_block(at, move |state| { + let gas_limit = self.inner.eth_api.call_gas_limit(); + // execute all transactions on top of each other and record the traces + self.inner + .eth_api + .spawn_with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(calls.len()); let mut db = SubState::new(State::new(state)); @@ -239,8 +189,7 @@ where Ok(results) }) - }) - .await + .await } /// Replays a transaction, returning the traces. @@ -250,22 +199,19 @@ where trace_types: HashSet, ) -> EthResult { let config = tracing_config(&trace_types); - self.on_blocking_task(|this| async move { - this.inner - .eth_api - .trace_transaction_in_block(hash, config, |_, inspector, res, db| { - let trace_res = inspector.into_parity_builder().into_trace_results_with_state( - res, - &trace_types, - &db, - )?; - Ok(trace_res) - }) - .await - .transpose() - .ok_or_else(|| EthApiError::TransactionNotFound)? - }) - .await + self.inner + .eth_api + .spawn_trace_transaction_in_block(hash, config, move |_, inspector, res, db| { + let trace_res = inspector.into_parity_builder().into_trace_results_with_state( + res, + &trace_types, + &db, + )?; + Ok(trace_res) + }) + .await + .transpose() + .ok_or_else(|| EthApiError::TransactionNotFound)? } /// Returns transaction trace objects at the given index @@ -308,22 +254,18 @@ where &self, hash: H256, ) -> EthResult>> { - self.on_blocking_task(|this| async move { - this.inner - .eth_api - .trace_transaction_in_block( - hash, - TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _| { - let traces = inspector - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - Ok(traces) - }, - ) - .await - }) - .await + self.inner + .eth_api + .spawn_trace_transaction_in_block( + hash, + TracingInspectorConfig::default_parity(), + move |tx_info, inspector, _, _| { + let traces = + inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + Ok(traces) + }, + ) + .await } /// Executes all transactions of a block and returns a list of callback results. @@ -371,48 +313,46 @@ where let block_hash = block.hash; let transactions = block.body; - self.on_blocking_task(|this| async move { - // replay all transactions of the block - this.inner - .eth_api - .with_state_at_block(state_at.into(), move |state| { - let mut results = Vec::with_capacity(transactions.len()); - let mut db = SubState::new(State::new(state)); - - let mut transactions = transactions.into_iter().enumerate().peekable(); - - while let Some((idx, tx)) = transactions.next() { - let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; - let tx_info = TransactionInfo { - hash: Some(tx.hash()), - index: Some(idx as u64), - block_hash: Some(block_hash), - block_number: Some(block_env.number.try_into().unwrap_or(u64::MAX)), - base_fee: Some(block_env.basefee.try_into().unwrap_or(u64::MAX)), - }; - - let tx = tx_env_with_recovered(&tx); - let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; - - let mut inspector = TracingInspector::new(config); - let (res, _) = inspect(&mut db, env, &mut inspector)?; - let ResultAndState { result, state } = res; - results.push(f(tx_info, inspector, result, &state, &db)?); - - // need to apply the state changes of this transaction before executing the - // next transaction - if transactions.peek().is_some() { - // need to apply the state changes of this transaction before executing - // the next transaction - db.commit(state) - } + // replay all transactions of the block + self.inner + .eth_api + .spawn_with_state_at_block(state_at.into(), move |state| { + let mut results = Vec::with_capacity(transactions.len()); + let mut db = SubState::new(State::new(state)); + + let mut transactions = transactions.into_iter().enumerate().peekable(); + + while let Some((idx, tx)) = transactions.next() { + let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; + let tx_info = TransactionInfo { + hash: Some(tx.hash()), + index: Some(idx as u64), + block_hash: Some(block_hash), + block_number: Some(block_env.number.try_into().unwrap_or(u64::MAX)), + base_fee: Some(block_env.basefee.try_into().unwrap_or(u64::MAX)), + }; + + let tx = tx_env_with_recovered(&tx); + let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; + + let mut inspector = TracingInspector::new(config); + let (res, _) = inspect(&mut db, env, &mut inspector)?; + let ResultAndState { result, state } = res; + results.push(f(tx_info, inspector, result, &state, &db)?); + + // need to apply the state changes of this transaction before executing the + // next transaction + if transactions.peek().is_some() { + // need to apply the state changes of this transaction before executing + // the next transaction + db.commit(state) } + } - Ok(results) - }) - .map(Some) - }) - .await + Ok(results) + }) + .await + .map(Some) } /// Returns traces created at given block. @@ -626,11 +566,6 @@ struct TraceApiInner { provider: Provider, /// Access to commonly used code of the `eth` namespace eth_api: Eth, - /// The async cache frontend for eth-related data - #[allow(unused)] // we need this for trace_filter eventually - eth_cache: EthStateCache, - /// The type that can spawn tasks which would otherwise be blocking. - task_spawner: Box, // restrict the number of concurrent calls to `trace_*` tracing_call_guard: TracingCallGuard, } diff --git a/crates/rpc/rpc-builder/src/tracing_pool.rs b/crates/rpc/rpc/src/tracing_call.rs similarity index 71% rename from crates/rpc/rpc-builder/src/tracing_pool.rs rename to crates/rpc/rpc/src/tracing_call.rs index dd3056117801..26956ae2d50f 100644 --- a/crates/rpc/rpc-builder/src/tracing_pool.rs +++ b/crates/rpc/rpc/src/tracing_call.rs @@ -8,9 +8,46 @@ use std::{ task::{ready, Context, Poll}, thread, }; -use tokio::sync::oneshot; +use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit, Semaphore}; + +/// RPC Tracing call guard semaphore. +/// +/// This is used to restrict the number of concurrent RPC requests to tracing methods like +/// `debug_traceTransaction` because they can consume a lot of memory and CPU. +/// +/// This types serves as an entry guard for the [TracingCallPool] and is used to rate limit parallel +/// tracing calls on the pool. +#[derive(Clone, Debug)] +pub struct TracingCallGuard(Arc); + +impl TracingCallGuard { + /// Create a new `TracingCallGuard` with the given maximum number of tracing calls in parallel. + pub fn new(max_tracing_requests: u32) -> Self { + Self(Arc::new(Semaphore::new(max_tracing_requests as usize))) + } + + /// See also [Semaphore::acquire_owned] + pub async fn acquire_owned(self) -> Result { + self.0.acquire_owned().await + } + + /// See also [Semaphore::acquire_many_owned] + pub async fn acquire_many_owned(self, n: u32) -> Result { + self.0.acquire_many_owned(n).await + } +} /// Used to execute tracing calls on a rayon threadpool from within a tokio runtime. +/// +/// This is a dedicated threadpool for tracing calls which are CPU bound. +/// RPC calls that perform blocking IO (disk lookups) are not executed on this pool but on the tokio +/// runtime's blocking pool, which performs poorly with CPU bound tasks. Once the tokio blocking +/// pool is saturated it is converted into a queue, tracing calls could then interfere with the +/// queue and block other RPC calls. +/// +/// See also [tokio-docs] for more information. +/// +/// [tokio-docs]: https://docs.rs/tokio/latest/tokio/index.html#cpu-bound-tasks-and-blocking-code #[derive(Clone, Debug)] pub struct TracingCallPool { pool: Arc, From 1c4d12748863f30ec8307d054335908573ed5d4c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 26 Jul 2023 17:36:46 +0100 Subject: [PATCH 254/722] feat(bin): `db clear` (#3934) --- bin/reth/src/db/clear.rs | 41 ++++++++++++++++++++++++++++++++++++++++ bin/reth/src/db/mod.rs | 7 +++++++ 2 files changed, 48 insertions(+) create mode 100644 bin/reth/src/db/clear.rs diff --git a/bin/reth/src/db/clear.rs b/bin/reth/src/db/clear.rs new file mode 100644 index 000000000000..0bd816db8b45 --- /dev/null +++ b/bin/reth/src/db/clear.rs @@ -0,0 +1,41 @@ +use clap::Parser; + +use reth_db::{ + database::Database, + table::Table, + transaction::{DbTx, DbTxMut}, + TableViewer, Tables, +}; + +/// The arguments for the `reth db clear` command +#[derive(Parser, Debug)] +pub struct Command { + /// Table name + #[arg()] + pub table: Tables, +} + +impl Command { + /// Execute `db clear` command + pub fn execute(self, db: &DB) -> eyre::Result<()> { + self.table.view(&ClearViewer { db })?; + + Ok(()) + } +} + +struct ClearViewer<'a, DB: Database> { + db: &'a DB, +} + +impl TableViewer<()> for ClearViewer<'_, DB> { + type Error = eyre::Report; + + fn view(&self) -> Result<(), Self::Error> { + let tx = self.db.tx_mut()?; + tx.clear::()?; + tx.commit()?; + + Ok(()) + } +} diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 4670683c43d5..04b51a15462d 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -17,6 +17,7 @@ use reth_db::{ use reth_primitives::ChainSpec; use std::sync::Arc; +mod clear; mod get; mod list; /// DB List TUI @@ -71,6 +72,8 @@ pub enum Subcommands { Get(get::Command), /// Deletes all database entries Drop, + /// Deletes all table entries + Clear(clear::Command), /// Lists current and local database versions Version, /// Returns the full database path @@ -172,6 +175,10 @@ impl Command { let mut tool = DbTool::new(&db, self.chain.clone())?; tool.drop(db_path)?; } + Subcommands::Clear(command) => { + let db = open_db(&db_path, self.db.log_level)?; + command.execute(&db)?; + } Subcommands::Version => { let local_db_version = match get_db_version(&db_path) { Ok(version) => Some(version), From 0cfc424291bb2ce83597875083dd52958fbe29e7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 26 Jul 2023 17:38:36 +0100 Subject: [PATCH 255/722] feat(pruner): percentage progress and prune only if key exists (#3932) --- crates/prune/src/pruner.rs | 33 ++++++++++--------- .../src/providers/database/provider.rs | 7 ++-- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index de020dcd272a..96f41c9ae213 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -160,14 +160,18 @@ impl Pruner { return Ok(()) } }; + let total = range.clone().count(); + let mut processed = 0; provider.prune_table_in_batches::( range, self.batch_sizes.receipts, - |receipts| { + |entries| { + processed += entries; trace!( target: "pruner", - %receipts, + %entries, + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), "Pruned receipts" ); }, @@ -201,6 +205,8 @@ impl Pruner { } }; let last_tx_num = *range.end(); + let total = range.clone().count(); + let mut processed = 0; for i in range.step_by(self.batch_sizes.transaction_lookup) { // The `min` ensures that the transaction range doesn't exceed the last transaction @@ -223,19 +229,16 @@ impl Pruner { } // Pre-sort hashes to prune them in order - hashes.sort(); - - provider.prune_table_in_batches::( - hashes, - self.batch_sizes.transaction_lookup, - |entries| { - trace!( - target: "pruner", - %entries, - "Pruned transaction lookup" - ); - }, - )?; + hashes.sort_unstable(); + + let entries = provider.prune_table::(hashes)?; + processed += entries; + trace!( + target: "pruner", + %entries, + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + "Pruned transaction lookup" + ); } provider.save_prune_checkpoint( diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f871cfa29aa..446065457c3c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -640,7 +640,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { &self, keys: impl IntoIterator, batch_size: usize, - batch_callback: impl Fn(usize), + mut batch_callback: impl FnMut(usize), ) -> std::result::Result where T: Table, @@ -650,8 +650,9 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { let mut deleted = 0; for key in keys { - cursor.seek_exact(key)?; - cursor.delete_current()?; + if cursor.seek_exact(key)?.is_some() { + cursor.delete_current()?; + } deleted += 1; if deleted % batch_size == 0 { From 96b108f25a0ea14a812c78efce523ba90fd3bb59 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 26 Jul 2023 18:39:38 +0200 Subject: [PATCH 256/722] feat: _V3 engine api skeletons (#3931) --- crates/rpc/rpc-api/src/engine.rs | 19 +++++++++++++++++++ crates/rpc/rpc-engine-api/src/engine_api.rs | 15 ++++++++++++++- .../rpc/rpc-types/src/eth/engine/payload.rs | 12 ++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 756cb5473c7c..65b5039965ac 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -21,6 +21,17 @@ pub trait EngineApi { #[method(name = "newPayloadV2")] async fn new_payload_v2(&self, payload: ExecutionPayload) -> RpcResult; + /// Post Cancun payload handler + /// + /// See also + #[method(name = "newPayloadV3")] + async fn new_payload_v3( + &self, + payload: ExecutionPayload, + versioned_hashes: Vec, + parent_beacon_block_root: H256, + ) -> RpcResult; + /// See also /// /// Caution: This should not accept the `withdrawals` field @@ -59,6 +70,14 @@ pub trait EngineApi { #[method(name = "getPayloadV2")] async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; + /// Post Cancun payload handler which also returns a blobs bundle. + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + #[method(name = "getPayloadV3")] + async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + /// See also #[method(name = "getPayloadBodiesByHashV1")] async fn get_payload_bodies_by_hash_v1( diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 2a0581595f49..5cb0b9bedac2 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -4,7 +4,7 @@ use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::PayloadStore; -use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, U64}; +use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, H256, U64}; use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ @@ -355,6 +355,15 @@ where Ok(EngineApi::new_payload_v2(self, payload).await?) } + async fn new_payload_v3( + &self, + _payload: ExecutionPayload, + _versioned_hashes: Vec, + _parent_beacon_block_root: H256, + ) -> RpcResult { + Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + } + /// Handler for `engine_forkchoiceUpdatedV1` /// See also /// @@ -409,6 +418,10 @@ where Ok(EngineApi::get_payload_v2(self, payload_id).await?) } + async fn get_payload_v3(&self, _payload_id: PayloadId) -> RpcResult { + Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + } + /// Handler for `engine_getPayloadBodiesByHashV1` /// See also async fn get_payload_bodies_by_hash_v1( diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 5b63e1b8cc5c..6085377bee59 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -46,6 +46,10 @@ pub struct ExecutionPayloadEnvelope { /// The expected value to be received by the feeRecipient in wei #[serde(rename = "blockValue")] pub block_value: U256, + // + // // TODO(mattsse): for V3 + // #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] + // pub blobs_bundle: Option, } impl ExecutionPayloadEnvelope { @@ -187,6 +191,14 @@ impl TryFrom for SealedBlock { } } +/// This includes all bundled blob related data of an executed payload. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlobsBundleV1 { + pub commitments: Vec, + pub proofs: Vec, + pub blobs: Vec, +} + /// Error that can occur when handling payloads. #[derive(thiserror::Error, Debug)] pub enum PayloadError { From aa5d39dd6d985fe9773f2d86c28fb1812af8767b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 26 Jul 2023 18:42:05 +0200 Subject: [PATCH 257/722] feat: add Hardfork::Cancun (#3933) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- bin/reth/src/init.rs | 2 +- crates/primitives/src/chain/spec.rs | 27 +++++++++++++++++++++++++++ crates/primitives/src/hardfork.rs | 6 +++++- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index a70949dbbe9d..cb6f885801b1 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -269,7 +269,7 @@ mod tests { ..Default::default() }, hardforks: BTreeMap::default(), - fork_timestamps: ForkTimestamps { shanghai: None }, + fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, }); diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 356c9409b7e0..d86d375a44b3 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -306,6 +306,15 @@ impl ChainSpec { .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp)) } + /// Convenience method to check if [Hardfork::Cancun] is active at a given timestamp. + #[inline] + pub fn is_cancun_activated_at_timestamp(&self, timestamp: u64) -> bool { + self.fork_timestamps + .cancun + .map(|cancun| timestamp >= cancun) + .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) + } + /// Creates a [`ForkFilter`](crate::ForkFilter) for the block described by [Head]. pub fn fork_filter(&self, head: Head) -> ForkFilter { let forks = self.forks_iter().filter_map(|(_, condition)| { @@ -433,6 +442,8 @@ impl From for ChainSpec { pub struct ForkTimestamps { /// The timestamp of the shanghai fork pub shanghai: Option, + /// The timestamp of the cancun fork + pub cancun: Option, } impl ForkTimestamps { @@ -442,6 +453,9 @@ impl ForkTimestamps { if let Some(shanghai) = forks.get(&Hardfork::Shanghai).and_then(|f| f.as_timestamp()) { timestamps = timestamps.shanghai(shanghai); } + if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { + timestamps = timestamps.cancun(cancun); + } timestamps } @@ -450,6 +464,12 @@ impl ForkTimestamps { self.shanghai = Some(shanghai); self } + + /// Sets the given cancun timestamp + pub fn cancun(mut self, cancun: u64) -> Self { + self.cancun = Some(cancun); + self + } } /// A helper type for compatibility with geth's config @@ -614,6 +634,13 @@ impl ChainSpecBuilder { self } + /// Enable Cancun at genesis. + pub fn cancun_activated(mut self) -> Self { + self = self.paris_activated(); + self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`ChainSpec`]. /// /// # Panics diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs index f26ffbcc4032..ba87a53ef77a 100644 --- a/crates/primitives/src/hardfork.rs +++ b/crates/primitives/src/hardfork.rs @@ -39,6 +39,8 @@ pub enum Hardfork { Paris, /// Shanghai. Shanghai, + /// Cancun. + Cancun, } impl Hardfork { @@ -82,6 +84,7 @@ impl FromStr for Hardfork { "grayglacier" => Hardfork::GrayGlacier, "paris" => Hardfork::Paris, "shanghai" => Hardfork::Shanghai, + "cancun" => Hardfork::Cancun, _ => return Err(format!("Unknown hardfork: {s}")), }; Ok(hardfork) @@ -97,7 +100,6 @@ impl Display for Hardfork { #[cfg(test)] mod tests { use super::*; - use crate::{Chain, Genesis}; use std::collections::BTreeMap; @@ -120,6 +122,7 @@ mod tests { "grayglacier", "PARIS", "ShAnGhAI", + "CaNcUn", ]; let expected_hardforks = [ Hardfork::Frontier, @@ -138,6 +141,7 @@ mod tests { Hardfork::GrayGlacier, Hardfork::Paris, Hardfork::Shanghai, + Hardfork::Cancun, ]; let hardforks: Vec = From 74bbe5afa86ce1917a75e8f23cbee78d22457eff Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 26 Jul 2023 17:44:46 +0100 Subject: [PATCH 258/722] fix(stages): transaction lookup stage checkpoint calculation (#3909) --- crates/stages/src/stages/tx_lookup.rs | 79 +++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 6 deletions(-) diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 09e0e6d674fa..211266d45d81 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -11,9 +11,9 @@ use reth_db::{ use reth_primitives::{ keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, - TransactionSignedNoHash, TxNumber, H256, + PrunePart, TransactionSignedNoHash, TxNumber, H256, }; -use reth_provider::DatabaseProviderRW; +use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointReader}; use tokio::sync::mpsc; use tracing::*; @@ -183,9 +183,20 @@ fn calculate_hash( fn stage_checkpoint( provider: &DatabaseProviderRW<'_, &DB>, -) -> Result { +) -> Result { + let pruned_entries = provider + .get_prune_checkpoint(PrunePart::TransactionLookup)? + .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) + .transpose()? + .flatten() + // +1 is needed because TxNumber is 0-indexed + .map(|body| body.last_tx_num() + 1) + .unwrap_or_default(); Ok(EntitiesCheckpoint { - processed: provider.tx_ref().entries::()? as u64, + // If `TxHashNumber` table was pruned, we will have a number of entries in it not matching + // the actual number of processed transactions. To fix that, we add the number of pruned + // `TxHashNumber` entries. + processed: provider.tx_ref().entries::()? as u64 + pruned_entries, total: provider.tx_ref().entries::()? as u64, }) } @@ -202,8 +213,13 @@ mod tests { generators, generators::{random_block, random_block_range}, }; - use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, H256}; - use reth_provider::{BlockReader, ProviderError, TransactionsProvider}; + use reth_primitives::{ + stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, H256, + MAINNET, + }; + use reth_provider::{ + BlockReader, ProviderError, ProviderFactory, PruneCheckpointWriter, TransactionsProvider, + }; // Implement stage test suite. stage_test_suite_ext!(TransactionLookupTestRunner, transaction_lookup); @@ -321,6 +337,57 @@ mod tests { assert!(runner.validate_execution(first_input, result.ok()).is_ok(), "validation failed"); } + #[test] + fn stage_checkpoint_pruned() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range(&mut rng, 0..=100, H256::zero(), 0..10); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let max_pruned_block = 30; + let max_processed_block = 70; + + let mut tx_hash_numbers = Vec::new(); + let mut tx_hash_number = 0; + for block in &blocks[..=max_processed_block] { + for transaction in &block.body { + if block.number > max_pruned_block { + tx_hash_numbers.push((transaction.hash, tx_hash_number)); + } + tx_hash_number += 1; + } + } + tx.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); + + let provider = tx.inner_rw(); + provider + .save_prune_checkpoint( + PrunePart::TransactionLookup, + PruneCheckpoint { + block_number: max_pruned_block as BlockNumber, + prune_mode: PruneMode::Full, + }, + ) + .expect("save stage checkpoint"); + provider.commit().expect("commit"); + + let db = tx.inner_raw(); + let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().expect("provider rw"); + + assert_eq!( + stage_checkpoint(&provider).expect("stage checkpoint"), + EntitiesCheckpoint { + processed: blocks[..=max_processed_block] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + total: blocks.iter().map(|block| block.body.len() as u64).sum::() + } + ); + } + struct TransactionLookupTestRunner { tx: TestTransaction, threshold: u64, From a71ad1e8d78445505db1c7b8a198d7f67bf55cdb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 26 Jul 2023 18:02:21 +0100 Subject: [PATCH 259/722] docs: `db clear` CLI (#3936) --- book/cli/db.md | 153 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 113 insertions(+), 40 deletions(-) diff --git a/book/cli/db.md b/book/cli/db.md index 95627871c42d..5772fe612cf1 100644 --- a/book/cli/db.md +++ b/book/cli/db.md @@ -16,6 +16,8 @@ Commands: Gets the content of a table for the given key drop Deletes all database entries + clear + Deletes all table entries version Lists current and local database versions path @@ -95,14 +97,85 @@ Display: Silence all log output ``` -## `reth db drop` +## `reth db stats` -Deletes all database entries +Lists all the tables, their entry count and their size ```bash -$ reth db drop --help +$ reth db stats --help -Usage: reth db drop [OPTIONS] +Usage: reth db stats [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + - mainnet + - goerli + - sepolia + + [default: mainnet] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.persistent + The flag to enable persistent logs + + --log.directory + The path to put log files in + + [default: /reth/logs] + + --log.journald + Log events to journald + + --log.filter + The filter to use for logs written to the log file + + [default: error] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` + +## `reth db list` + +Lists the contents of a table + +```bash +$ reth db list --help + +Usage: reth db list [OPTIONS]
+ +Arguments: +
+ The table name Options: --datadir @@ -116,6 +189,11 @@ Options: [default: default] + -s, --skip + Skip first N entries + + [default: 0] + --chain The chain this node is running. @@ -128,6 +206,17 @@ Options: [default: mainnet] + -r, --reverse + Reverse the order of the entries. If enabled last table entries are read + + -l, --len + How many items to take from the walker + + [default: 5] + + -j, --json + Dump as JSON instead of using TUI + -h, --help Print help (see a summary with '-h') @@ -238,18 +327,14 @@ Display: Silence all log output ``` -## `reth db list` +## `reth db drop` -Lists the contents of a table +Deletes all database entries ```bash -$ reth db list --help - -Usage: reth db list [OPTIONS]
+$ reth db drop --help -Arguments: -
- The table name +Usage: reth db drop [OPTIONS] Options: --datadir @@ -263,11 +348,6 @@ Options: [default: default] - -s, --skip - Skip first N entries - - [default: 0] - --chain The chain this node is running. @@ -280,17 +360,6 @@ Options: [default: mainnet] - -r, --reverse - Reverse the order of the entries. If enabled last table entries are read - - -l, --len - How many items to take from the walker - - [default: 5] - - -j, --json - Dump as JSON instead of using TUI - -h, --help Print help (see a summary with '-h') @@ -325,14 +394,18 @@ Display: Silence all log output ``` -## `reth db path` +## `reth db clear` -Returns the full database path +Deletes all table entries ```bash -$ reth db path --help +$ reth db clear --help -Usage: reth db path [OPTIONS] +Usage: reth db clear [OPTIONS]
+ +Arguments: +
+ Table name Options: --datadir @@ -392,14 +465,14 @@ Display: Silence all log output ``` -## `reth db stats` +## `reth db version` -Lists all the tables, their entry count and their size +Lists current and local database versions ```bash -$ reth db stats --help +$ reth db version --help -Usage: reth db stats [OPTIONS] +Usage: reth db version [OPTIONS] Options: --datadir @@ -459,14 +532,14 @@ Display: Silence all log output ``` -## `reth db version` +## `reth db path` -Lists current and local database versions +Returns the full database path ```bash -$ reth db version --help +$ reth db path --help -Usage: reth db version [OPTIONS] +Usage: reth db path [OPTIONS] Options: --datadir From adce7eae0e10eeaf72773dc62cf5ce786e484768 Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Thu, 27 Jul 2023 01:48:06 +0800 Subject: [PATCH 260/722] test: cover index storage history stage with stage_test_suite_ext tests (#3898) --- .../src/stages/index_storage_history.rs | 156 +++++++++++++++++- 1 file changed, 153 insertions(+), 3 deletions(-) diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 945bafc5f33a..a17c5f14e7c9 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -73,17 +73,29 @@ mod tests { use std::collections::BTreeMap; use super::*; - use crate::test_utils::TestTransaction; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, + TestTransaction, UnwindStageTestRunner, + }; + use itertools::Itertools; use reth_db::{ + cursor::DbCursorRO, models::{ + sharded_key, storage_sharded_key::{StorageShardedKey, NUM_OF_INDICES_IN_SHARD}, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, }, tables, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, BlockNumberList, }; - use reth_primitives::{hex_literal::hex, StorageEntry, H160, H256, MAINNET, U256}; + use reth_interfaces::test_utils::{ + generators, + generators::{random_block_range, random_contract_account_range, random_transition_range}, + }; + use reth_primitives::{ + hex_literal::hex, Address, BlockNumber, StorageEntry, H160, H256, MAINNET, U256, + }; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); const STORAGE_KEY: H256 = @@ -368,4 +380,142 @@ mod tests { ]) ); } + + stage_test_suite_ext!(IndexStorageHistoryTestRunner, index_storage_history); + + struct IndexStorageHistoryTestRunner { + pub(crate) tx: TestTransaction, + commit_threshold: u64, + } + + impl Default for IndexStorageHistoryTestRunner { + fn default() -> Self { + Self { tx: TestTransaction::default(), commit_threshold: 1000 } + } + } + + impl StageTestRunner for IndexStorageHistoryTestRunner { + type S = IndexStorageHistoryStage; + + fn tx(&self) -> &TestTransaction { + &self.tx + } + + fn stage(&self) -> Self::S { + Self::S { commit_threshold: self.commit_threshold } + } + } + + impl ExecuteStageTestRunner for IndexStorageHistoryTestRunner { + type Seed = (); + + fn seed_execution(&mut self, input: ExecInput) -> Result { + let stage_process = input.checkpoint().block_number; + let start = stage_process + 1; + let end = input.target(); + let mut rng = generators::rng(); + + let num_of_accounts = 31; + let accounts = random_contract_account_range(&mut rng, &mut (0..num_of_accounts)) + .into_iter() + .collect::>(); + + let blocks = random_block_range(&mut rng, start..=end, H256::zero(), 0..3); + + let (transitions, _) = random_transition_range( + &mut rng, + blocks.iter(), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 0..3, + 0..256, + ); + + // add block changeset from block 1. + self.tx.insert_transitions(transitions, Some(start))?; + + Ok(()) + } + + fn validate_execution( + &self, + input: ExecInput, + output: Option, + ) -> Result<(), TestRunnerError> { + if let Some(output) = output { + let start_block = input.next_block(); + let end_block = output.checkpoint.block_number; + if start_block > end_block { + return Ok(()) + } + + assert_eq!( + output, + ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true } + ); + + let provider = self.tx.inner(); + let mut changeset_cursor = + provider.tx_ref().cursor_read::()?; + + let storage_transitions = changeset_cursor + .walk_range(BlockNumberAddress::range(start_block..=end_block))? + .try_fold( + BTreeMap::new(), + |mut storages: BTreeMap<(Address, H256), Vec>, + entry| + -> Result<_, TestRunnerError> { + let (index, storage) = entry?; + storages + .entry((index.address(), storage.key)) + .or_default() + .push(index.block_number()); + Ok(storages) + }, + )?; + + let mut result = BTreeMap::new(); + for (partial_key, indices) in storage_transitions { + // chunk indices and insert them in shards of N size. + let mut chunks = indices + .iter() + .chunks(sharded_key::NUM_OF_INDICES_IN_SHARD) + .into_iter() + .map(|chunks| chunks.map(|i| *i as usize).collect::>()) + .collect::>(); + let last_chunk = chunks.pop(); + + chunks.into_iter().for_each(|list| { + result.insert( + StorageShardedKey::new( + partial_key.0, + partial_key.1, + *list.last().expect("Chuck does not return empty list") + as BlockNumber, + ), + list, + ); + }); + + if let Some(last_list) = last_chunk { + result.insert( + StorageShardedKey::new(partial_key.0, partial_key.1, u64::MAX), + last_list, + ); + }; + } + + let table = cast(self.tx.table::().unwrap()); + assert_eq!(table, result); + } + Ok(()) + } + } + + impl UnwindStageTestRunner for IndexStorageHistoryTestRunner { + fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { + let table = self.tx.table::().unwrap(); + assert!(table.is_empty()); + Ok(()) + } + } } From 9adab0ba7c1624946ce0e6174a58be74619e1256 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 26 Jul 2023 18:51:31 +0100 Subject: [PATCH 261/722] Revert "test(ethereum): ignore invalid string sequence in ethereum state tests (#3307)" (#3937) --- Cargo.lock | 10 ---------- testing/ef-tests/Cargo.toml | 3 +-- testing/ef-tests/src/models.rs | 14 -------------- 3 files changed, 1 insertion(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 119ae278d573..f2ff533ab902 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1742,7 +1742,6 @@ dependencies = [ "reth-rlp", "reth-stages", "serde", - "serde_bytes", "serde_json", "thiserror", "tokio", @@ -6376,15 +6375,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_bytes" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" -dependencies = [ - "serde", -] - [[package]] name = "serde_derive" version = "1.0.171" diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index d415acde44c8..7145273a9310 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -23,5 +23,4 @@ tokio = "1.28.1" walkdir = "2.3.3" serde = "1.0.163" serde_json.workspace = true -thiserror.workspace = true -serde_bytes = "0.11.9" \ No newline at end of file +thiserror.workspace = true \ No newline at end of file diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 52da1fc02599..bca586bd866b 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -36,20 +36,6 @@ pub struct BlockchainTest { #[serde(default)] /// Engine spec. pub self_engine: SealEngine, - #[serde(rename = "_info")] - #[allow(unused)] - info: BlockchainTestInfo, -} - -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct BlockchainTestInfo { - #[serde(rename = "filling-rpc-server")] - #[allow(unused)] - // One test has an invalid string in this field, which breaks our CI: - // https://github.com/ethereum/tests/blob/6c252923bdd1bd5a70f680df1214f866f76839db/GeneralStateTests/stTransactionTest/ValueOverflow.json#L5 - // By using `serde_bytes::ByteBuf`, we ignore the validation of this field as a string. - // TODO(alexey): remove when `ethereum/tests` is fixed - filling_rpc_server: serde_bytes::ByteBuf, } /// A block header in an Ethereum blockchain test. From b5a44aeb3764265eb26b5c9794371710513f4875 Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Thu, 27 Jul 2023 07:00:38 +0800 Subject: [PATCH 262/722] feat: metric of reorg depth of blockchain tree (#3860) --- crates/blockchain-tree/src/blockchain_tree.rs | 10 +++++++++- crates/blockchain-tree/src/metrics.rs | 2 ++ crates/storage/provider/src/chain.rs | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ce92f48b2839..8edee773badf 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1001,9 +1001,12 @@ impl BlockchainTree old: Arc::new(old_canon_chain.clone()), new: Arc::new(new_canon_chain.clone()), }; + let reorg_depth = old_canon_chain.len(); + // insert old canon chain self.insert_chain(AppendableChain::new(old_canon_chain)); - self.metrics.reorgs.increment(1); + + self.update_reorg_metrics(reorg_depth as f64); } else { // error here to confirm that we are reverting nothing from db. error!(target: "blockchain_tree", "Reverting nothing from db on block: #{:?}", block_hash); @@ -1094,6 +1097,11 @@ impl BlockchainTree } } + fn update_reorg_metrics(&mut self, reorg_depth: f64) { + self.metrics.reorgs.increment(1); + self.metrics.latest_reorg_depth.set(reorg_depth); + } + /// Update blockchain tree chains (canonical and sidechains) and sync metrics. /// /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index fd48307d4be2..b49ad3c5b921 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -13,6 +13,8 @@ pub struct TreeMetrics { pub canonical_chain_height: Gauge, /// The number of reorgs pub reorgs: Counter, + /// The latest reorg depth + pub latest_reorg_depth: Gauge, /// Longest sidechain height pub longest_sidechain_height: Gauge, } diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index b5d596c0164b..1633d360f065 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -136,6 +136,11 @@ impl Chain { Self { state, blocks: block_num_hash } } + /// Returns length of the chain. + pub fn len(&self) -> usize { + self.blocks.len() + } + /// Get all receipts for the given block. pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<&[Receipt]> { let num = self.block_number(block_hash)?; From 72ab361d67b2cddf767956f9a9d93805c3c3fb64 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 08:44:05 +0200 Subject: [PATCH 263/722] feat: add eip4844 tx type id (#3928) --- crates/primitives/src/transaction/mod.rs | 13 +++++++------ crates/primitives/src/transaction/tx_type.rs | 4 ++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 5a22b60f9bca..92df183f2ba8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,5 +1,3 @@ -use std::mem; - use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, keccak256, Address, Bytes, ChainId, TxHash, H256, @@ -15,6 +13,7 @@ use reth_rlp::{ }; use serde::{Deserialize, Serialize}; pub use signature::Signature; +use std::mem; pub use tx_type::{TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID}; mod access_list; @@ -265,10 +264,12 @@ pub struct TxEip4844 { pub access_list: AccessList, /// It contains a vector of fixed size hash(32 bytes) - pub blob_hashes: Vec, + pub blob_versioned_hashes: Vec, /// Max fee per data gas - pub max_fee_per_blob: u128, + /// + /// aka BlobFeeCap + pub max_fee_per_blob_gas: u128, /// Input has two uses depending if transaction is Create or Call (if `to` field is None or /// Some). pub init: An unlimited size byte array specifying the @@ -291,8 +292,8 @@ impl TxEip4844 { mem::size_of::() + // value self.access_list.size() + // access_list self.input.len() + // input - self.blob_hashes.capacity() * mem::size_of::() + // blob hashes size - mem::size_of::() // blob fee cap + self.blob_versioned_hashes.capacity() * mem::size_of::() + // blob hashes size + mem::size_of::() // max_fee_per_data_gas } } diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 428e06f45347..c0a6b71a0e80 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -12,6 +12,10 @@ pub const EIP2930_TX_TYPE_ID: u8 = 1; /// Identifier for [TxEip1559](crate::TxEip1559) transaction. pub const EIP1559_TX_TYPE_ID: u8 = 2; +/// Identifier for [TxEip4844](crate::TxEip4844) transaction. +#[allow(unused)] +pub(crate) const EIP4844_TX_TYPE_ID: u8 = 3; + /// Transaction Type #[derive_arbitrary(compact)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize)] From f98b1524bba6ef3e48fbd421eb540ef363db8b27 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 10:15:24 +0200 Subject: [PATCH 264/722] feat: add fcu v3 skeleton (#3940) --- crates/consensus/beacon/src/engine/message.rs | 1 + crates/rpc/rpc-api/src/engine.rs | 18 ++++++++++ crates/rpc/rpc-engine-api/src/engine_api.rs | 33 ++++++++++++++++++- crates/rpc/rpc-engine-api/src/message.rs | 6 ++++ .../rpc/rpc-types/src/eth/engine/payload.rs | 19 +++++++---- 5 files changed, 69 insertions(+), 8 deletions(-) diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 2e5e542aab17..76808558595f 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -145,6 +145,7 @@ impl Future for PendingPayloadId { /// A message for the beacon engine from other components of the node (engine RPC API invoked by the /// consensus layer). #[derive(Debug)] +#[allow(clippy::large_enum_variant)] pub enum BeaconEngineMessage { /// Message with new payload. NewPayload { diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 65b5039965ac..9905957e91da 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -50,6 +50,16 @@ pub trait EngineApi { payload_attributes: Option, ) -> RpcResult; + /// Same as `forkchoiceUpdatedV2` but supports additional [PayloadAttributes] field. + /// + /// See also + #[method(name = "forkchoiceUpdatedV3")] + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult; + /// See also /// /// Returns the most recent version of the payload that is available in the corresponding @@ -72,6 +82,8 @@ pub trait EngineApi { /// Post Cancun payload handler which also returns a blobs bundle. /// + /// See also + /// /// Returns the most recent version of the payload that is available in the corresponding /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. @@ -105,6 +117,12 @@ pub trait EngineApi { ) -> RpcResult; /// See also + /// + /// Note: This method will be deprecated after the cancun hardfork: + /// + /// > Consensus and execution layer clients MAY remove support of this method after Cancun. If + /// > no longer supported, this method MUST be removed from the engine_exchangeCapabilities + /// > request or response list depending on whether it is consensus or execution layer client. #[method(name = "exchangeTransitionConfigurationV1")] async fn exchange_transition_configuration( &self, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 5cb0b9bedac2..56ddc71cdf90 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -130,6 +130,26 @@ where Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } + /// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals, + /// but only _after_ cancun. + /// + /// See also + pub async fn fork_choice_updated_v3( + &self, + state: ForkchoiceState, + payload_attrs: Option, + ) -> EngineApiResult { + if let Some(ref attrs) = payload_attrs { + self.validate_withdrawals_presence( + EngineApiMessageVersion::V3, + attrs.timestamp.as_u64(), + attrs.withdrawals.is_some(), + )?; + } + + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + } + /// Returns the most recent version of the payload that is available in the corresponding /// payload build process at the time of receiving this call. /// @@ -321,7 +341,7 @@ where return Err(EngineApiError::NoWithdrawalsPostShanghai) } } - EngineApiMessageVersion::V2 => { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { if is_shanghai && !has_withdrawals { return Err(EngineApiError::NoWithdrawalsPostShanghai) } @@ -388,6 +408,17 @@ where Ok(EngineApi::fork_choice_updated_v2(self, fork_choice_state, payload_attributes).await?) } + /// Handler for `engine_forkchoiceUpdatedV2` + /// + /// See also + async fn fork_choice_updated_v3( + &self, + _fork_choice_state: ForkchoiceState, + _payload_attributes: Option, + ) -> RpcResult { + Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + } + /// Handler for `engine_getPayloadV1` /// /// Returns the most recent version of the payload that is available in the corresponding diff --git a/crates/rpc/rpc-engine-api/src/message.rs b/crates/rpc/rpc-engine-api/src/message.rs index 9d000ed7440f..c0d6b85d5118 100644 --- a/crates/rpc/rpc-engine-api/src/message.rs +++ b/crates/rpc/rpc-engine-api/src/message.rs @@ -4,5 +4,11 @@ pub enum EngineApiMessageVersion { /// Version 1 V1, /// Version 2 + /// + /// Added for shanghai hardfork. V2, + /// Version 3 + /// + /// Added for cancun hardfork. + V3, } diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 6085377bee59..f187ec662a99 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -267,6 +267,11 @@ pub struct PayloadAttributes { /// See #[serde(default, skip_serializing_if = "Option::is_none")] pub withdrawals: Option>, + /// Root of the parent beacon block enabled with V3. + /// + /// See also + #[serde(default, skip_serializing_if = "Option::is_none")] + pub parent_beacon_block_root: Option, } /// This structure contains the result of processing a payload or fork choice update. @@ -347,25 +352,25 @@ impl From for PayloadStatusEnum { #[serde(tag = "status", rename_all = "SCREAMING_SNAKE_CASE")] pub enum PayloadStatusEnum { /// VALID is returned by the engine API in the following calls: - /// - newPayloadV1: if the payload was already known or was just validated and executed - /// - forkchoiceUpdateV1: if the chain accepted the reorg (might ignore if it's stale) + /// - newPayload: if the payload was already known or was just validated and executed + /// - forkchoiceUpdate: if the chain accepted the reorg (might ignore if it's stale) Valid, /// INVALID is returned by the engine API in the following calls: - /// - newPayloadV1: if the payload failed to execute on top of the local chain - /// - forkchoiceUpdateV1: if the new head is unknown, pre-merge, or reorg to it fails + /// - newPayload: if the payload failed to execute on top of the local chain + /// - forkchoiceUpdate: if the new head is unknown, pre-merge, or reorg to it fails Invalid { #[serde(rename = "validationError")] validation_error: String, }, /// SYNCING is returned by the engine API in the following calls: - /// - newPayloadV1: if the payload was accepted on top of an active sync - /// - forkchoiceUpdateV1: if the new head was seen before, but not part of the chain + /// - newPayload: if the payload was accepted on top of an active sync + /// - forkchoiceUpdate: if the new head was seen before, but not part of the chain Syncing, /// ACCEPTED is returned by the engine API in the following calls: - /// - newPayloadV1: if the payload was accepted, but not processed (side chain) + /// - newPayload: if the payload was accepted, but not processed (side chain) Accepted, } From 0147e5033001069f258aa1e03fa1a7166f989e4c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 11:56:53 +0200 Subject: [PATCH 265/722] feat: more blobtx functions (#3943) --- crates/primitives/src/transaction/mod.rs | 40 +++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 92df183f2ba8..1a0a752322eb 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,5 +1,6 @@ use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Address, Bytes, ChainId, TxHash, H256, }; pub use access_list::{AccessList, AccessListItem, AccessListWithGasUsed}; @@ -165,12 +166,16 @@ pub struct TxEip1559 { /// As ethereum circulation is around 120mil eth as of 2022 that is around /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasFeeCap` pub max_fee_per_gas: u128, /// Max Priority fee that transaction is paying /// /// As ethereum circulation is around 120mil eth as of 2022 that is around /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasTipCap` pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. @@ -214,6 +219,8 @@ impl TxEip1559 { } } +/// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) +/// /// A transaction with blob hashes and max blob fee #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] @@ -237,12 +244,16 @@ pub struct TxEip4844 { /// As ethereum circulation is around 120mil eth as of 2022 that is around /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasFeeCap` pub max_fee_per_gas: u128, /// Max Priority fee that transaction is paying /// /// As ethereum circulation is around 120mil eth as of 2022 that is around /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasTipCap` pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. @@ -280,6 +291,33 @@ pub struct TxEip4844 { } impl TxEip4844 { + /// Returns the effective gas price for the given `base_fee`. + /// + /// Note: this is the same as [TxEip1559::effective_gas_price]. + pub fn effective_gas_price(&self, base_fee: Option) -> u128 { + match base_fee { + None => self.max_fee_per_gas, + Some(base_fee) => { + // if the tip is greater than the max priority fee per gas, set it to the max + // priority fee per gas + base fee + let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); + if tip > self.max_priority_fee_per_gas { + self.max_priority_fee_per_gas + base_fee as u128 + } else { + // otherwise return the max fee per gas + self.max_fee_per_gas + } + } + } + } + + /// Returns the total gas for all blobs in this transaction. + #[inline] + pub fn blob_gas(&self) -> u64 { + // SAFETY: we don't expect u64::MAX / DATA_GAS_PER_BLOB hashes in a single transaction + self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB + } + /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. #[inline] pub fn size(&self) -> usize { @@ -849,7 +887,7 @@ impl TxEip1559 { Some(base_fee) => { // if the tip is greater than the max priority fee per gas, set it to the max // priority fee per gas + base fee - let tip = self.max_fee_per_gas - base_fee as u128; + let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); if tip > self.max_priority_fee_per_gas { self.max_priority_fee_per_gas + base_fee as u128 } else { From 4ab924c5d361bbfdcdad9f997d16d67b4a1730b7 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 27 Jul 2023 12:31:01 +0200 Subject: [PATCH 266/722] feat(doc): Document `JsTracerBuilder` (#3949) --- crates/rpc/rpc-testing-util/src/debug.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 2aad67e1754f..aedb83ac16fa 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -40,11 +40,20 @@ impl DebugApiExt for T { /// A helper type that can be used to build a javascript tracer. #[derive(Debug, Clone, Default)] pub struct JsTracerBuilder { + /// `setup_body` is invoked once at the beginning, during the construction of a given + /// transaction. setup_body: Option, + /// `fault_body` is invoked when an error happens during the execution of an opcode which + /// wasn't reported in step. fault_body: Option, + /// `result_body` returns a JSON-serializable value to the RPC caller. result_body: Option, + /// `enter_body` is invoked on stepping in of an internal call. enter_body: Option, + /// `step_body` is called for each step of the EVM, or when an error occurs, as the specified + /// transaction is traced. step_body: Option, + /// `exit_body` is invoked on stepping out of an internal call. exit_body: Option, } From f577e147807a783438a3f16aad968b4396274483 Mon Sep 17 00:00:00 2001 From: Peter Davies Date: Thu, 27 Jul 2023 13:25:49 +0100 Subject: [PATCH 267/722] refactor(storage): historical state lookup (better comments) (#3867) Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz --- .../src/providers/state/historical.rs | 81 ++++++++++--------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index f62ab7b98c17..458a4c6421ea 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -5,8 +5,10 @@ use crate::{ use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + table::Table, tables, transaction::DbTx, + BlockNumberList, }; use reth_interfaces::Result; use reth_primitives::{ @@ -17,11 +19,11 @@ use std::marker::PhantomData; /// State provider for a given transition id which takes a tx reference. /// /// Historical state provider reads the following tables: -/// [tables::AccountHistory] -/// [tables::Bytecodes] -/// [tables::StorageHistory] -/// [tables::AccountChangeSet] -/// [tables::StorageChangeSet] +/// - [tables::AccountHistory] +/// - [tables::Bytecodes] +/// - [tables::StorageHistory] +/// - [tables::AccountChangeSet] +/// - [tables::StorageChangeSet] pub struct HistoricalStateProviderRef<'a, 'b, TX: DbTx<'a>> { /// Transaction tx: &'b TX, @@ -32,7 +34,7 @@ pub struct HistoricalStateProviderRef<'a, 'b, TX: DbTx<'a>> { } pub enum HistoryInfo { - NotWritten, + NotYetWritten, InChangeset(u64), InPlainState, } @@ -47,24 +49,7 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { pub fn account_history_lookup(&self, address: Address) -> Result { // history key to search IntegerList of block number changesets. let history_key = ShardedKey::new(address, self.block_number); - let mut cursor = self.tx.cursor_read::()?; - - if let Some(chunk) = - cursor.seek(history_key)?.filter(|(key, _)| key.key == address).map(|x| x.1 .0) - { - let chunk = chunk.enable_rank(); - let rank = chunk.rank(self.block_number as usize); - if rank == 0 && !cursor.prev()?.is_some_and(|(key, _)| key.key == address) { - return Ok(HistoryInfo::NotWritten) - } - if rank < chunk.len() { - Ok(HistoryInfo::InChangeset(chunk.select(rank) as u64)) - } else { - Ok(HistoryInfo::InPlainState) - } - } else { - Ok(HistoryInfo::NotWritten) - } + self.history_info::(history_key, |key| key.key == address) } /// Lookup a storage key in the StorageHistory table @@ -75,29 +60,47 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { ) -> Result { // history key to search IntegerList of block number changesets. let history_key = StorageShardedKey::new(address, storage_key, self.block_number); - let mut cursor = self.tx.cursor_read::()?; + self.history_info::(history_key, |key| { + key.address == address && key.sharded_key.key == storage_key + }) + } - if let Some(chunk) = cursor - .seek(history_key)? - .filter(|(key, _)| key.address == address && key.sharded_key.key == storage_key) - .map(|x| x.1 .0) - { + fn history_info(&self, key: K, key_filter: impl Fn(&K) -> bool) -> Result + where + T: Table, + { + let mut cursor = self.tx.cursor_read::()?; + + // Lookup the history chunk in the history index. If they key does not appear in the + // index, the first chunk for the next key will be returned so we filter out chunks that + // have a different key. + if let Some(chunk) = cursor.seek(key)?.filter(|(key, _)| key_filter(key)).map(|x| x.1 .0) { let chunk = chunk.enable_rank(); + + // Get the rank of the first entry after our block. let rank = chunk.rank(self.block_number as usize); - if rank == 0 && - !cursor.prev()?.is_some_and(|(key, _)| { - key.address == address && key.sharded_key.key == storage_key - }) - { - return Ok(HistoryInfo::NotWritten) + + // If our block is before the first entry in the index chunk, it might be before + // the first write ever. To check, we look at the previous entry and check if the + // key is the same. + // This check is worth it, the `cursor.prev()` check is rarely triggered (the if will + // short-circuit) and when it passes we save a full seek into the changeset/plain state + // table. + if rank == 0 && !cursor.prev()?.is_some_and(|(key, _)| key_filter(&key)) { + // The key is written to, but only after our block. + return Ok(HistoryInfo::NotYetWritten) } if rank < chunk.len() { + // The chunk contains an entry for a write after our block, return it. Ok(HistoryInfo::InChangeset(chunk.select(rank) as u64)) } else { + // The chunk does not contain an entry for a write after our block. This can only + // happen if this is the last chunk and so we need to look in the plain state. Ok(HistoryInfo::InPlainState) } } else { - Ok(HistoryInfo::NotWritten) + // The key has not been written to at all. + Ok(HistoryInfo::NotYetWritten) } } } @@ -106,7 +109,7 @@ impl<'a, 'b, TX: DbTx<'a>> AccountReader for HistoricalStateProviderRef<'a, 'b, /// Get basic account information. fn basic_account(&self, address: Address) -> Result> { match self.account_history_lookup(address)? { - HistoryInfo::NotWritten => Ok(None), + HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self .tx .cursor_dup_read::()? @@ -152,7 +155,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for HistoricalStateProviderRef<'a, 'b, /// Get storage. fn storage(&self, address: Address, storage_key: StorageKey) -> Result> { match self.storage_history_lookup(address, storage_key)? { - HistoryInfo::NotWritten => Ok(None), + HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( self.tx .cursor_dup_read::()? From 717bad8b6a1e3ac1647fd3645186c6ddc3f36c3f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 27 Jul 2023 16:09:28 +0200 Subject: [PATCH 268/722] feat(rpc): perform js inspector on `spawn_with_call_at` async tracing task (#3957) --- crates/rpc/rpc/src/debug.rs | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 3c2da93f61db..fa29fbce3985 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -2,8 +2,7 @@ use crate::{ eth::{ error::{EthApiError, EthResult}, revm_utils::{ - clone_into_empty_db, inspect, prepare_call_env, replay_transactions_until, - result_output, EvmOverrides, + clone_into_empty_db, inspect, replay_transactions_until, result_output, EvmOverrides, }, EthTransactions, TransactionSource, }, @@ -266,18 +265,10 @@ where // for JS tracing we need to setup all async work before we can start tracing // because JSTracer and all JS types are not Send - let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; + let (_, _, at) = self.inner.eth_api.evm_env_at(at).await?; let state = self.inner.eth_api.state_at(at)?; - let mut db = SubState::new(State::new(state)); + let db = SubState::new(State::new(state)); let has_state_overrides = overrides.has_state(); - let env = prepare_call_env( - cfg, - block_env, - call, - self.inner.eth_api.call_gas_limit(), - &mut db, - overrides, - )?; // If the caller provided state overrides we need to clone the DB so the js // service has access these modifications @@ -288,11 +279,17 @@ where let to_db_service = self.spawn_js_trace_service(at, maybe_override_db)?; - let mut inspector = JsInspector::new(code, config, to_db_service)?; - let (res, env) = inspect(db, env, &mut inspector)?; + let res = self + .inner + .eth_api + .spawn_with_call_at(call, at, overrides, move |db, env| { + let mut inspector = JsInspector::new(code, config, to_db_service)?; + let (res, _) = inspect(db, env.clone(), &mut inspector)?; + Ok(inspector.json_result(res, &env)?) + }) + .await?; - let result = inspector.json_result(res, &env)?; - Ok(GethTrace::JS(result)) + Ok(GethTrace::JS(res)) } } } From fc431bc545fb2def2b956c6524afe0c4494b4d2f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 17:28:16 +0200 Subject: [PATCH 269/722] chore: make auth-port default (#3962) --- bin/reth/src/args/rpc_server_args.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index d05ce42b431e..7e46c36ce779 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -108,8 +108,8 @@ pub struct RpcServerArgs { pub auth_addr: Option, /// Auth server port to listen on - #[arg(long = "authrpc.port")] - pub auth_port: Option, + #[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)] + pub auth_port: u16, /// Path to a JWT secret to use for authenticated RPC endpoints #[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)] @@ -358,7 +358,7 @@ impl RpcServerArgs { { let socket_address = SocketAddr::new( self.auth_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.auth_port.unwrap_or(constants::DEFAULT_AUTH_PORT), + self.auth_port, ); reth_rpc_builder::auth::launch( @@ -459,7 +459,7 @@ impl RpcServerArgs { fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result { let address = SocketAddr::new( self.auth_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.auth_port.unwrap_or(constants::DEFAULT_AUTH_PORT), + self.auth_port, ); Ok(AuthServerConfig::builder(jwt_secret).socket_addr(address).build()) From 2aae8c82e7b82421b353d5557bf512a69f62756f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 18:13:45 +0200 Subject: [PATCH 270/722] chore: move some functions to Cli struct (#3961) --- Cargo.toml | 1 + bin/reth/src/cli.rs | 93 +++++++++++++++++++++++---------------- crates/tracing/Cargo.toml | 2 +- crates/tracing/src/lib.rs | 1 + 4 files changed, 58 insertions(+), 39 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ff1fe7c89eeb..ee547d2256a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ ethers-middleware = { version = "2.0.8", default-features = false } ## misc bytes = "1.4" tracing = "0.1.0" +tracing-appender = "0.2" thiserror = "1.0.37" serde_json = "1.0.94" serde = { version = "1.0", default-features = false } diff --git a/bin/reth/src/cli.rs b/bin/reth/src/cli.rs index c32b1ae17ed2..ce5d4a42ca7c 100644 --- a/bin/reth/src/cli.rs +++ b/bin/reth/src/cli.rs @@ -14,33 +14,64 @@ use reth_tracing::{ BoxedLayer, FileWorkerGuard, }; -/// Parse CLI options, set up logging and run the chosen command. -pub fn run() -> eyre::Result<()> { - let opt = Cli::parse(); - - let mut layers = vec![reth_tracing::stdout(opt.verbosity.directive())]; - let _guard = opt.logs.layer()?.map(|(layer, guard)| { - layers.push(layer); - guard - }); - - reth_tracing::init(layers); - - let runner = CliRunner::default(); - - match opt.command { - Commands::Node(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), - Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), - Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), +/// The main reth cli interface. +/// +/// This is the entrypoint to the executable. +#[derive(Debug, Parser)] +#[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] +pub struct Cli { + /// The command to run + #[clap(subcommand)] + command: Commands, + + #[clap(flatten)] + logs: Logs, + + #[clap(flatten)] + verbosity: Verbosity, +} + +impl Cli { + /// Execute the configured cli command. + pub fn run(self) -> eyre::Result<()> { + let _guard = self.init_tracing()?; + + let runner = CliRunner::default(); + match self.command { + Commands::Node(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Stage(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function returns a guard that must be kept alive to ensure + /// that all logs are flushed to disk. + pub fn init_tracing(&self) -> eyre::Result> { + let mut layers = vec![reth_tracing::stdout(self.verbosity.directive())]; + let guard = self.logs.layer()?.map(|(layer, guard)| { + layers.push(layer); + guard + }); + + reth_tracing::init(layers); + Ok(guard.flatten()) } } +/// Convenience function for parsing CLI options, set up logging and run the chosen command. +#[inline] +pub fn run() -> eyre::Result<()> { + Cli::parse().run() +} + /// Commands to be executed #[derive(Debug, Subcommand)] pub enum Commands { @@ -73,20 +104,6 @@ pub enum Commands { Debug(debug_cmd::Command), } -#[derive(Debug, Parser)] -#[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] -struct Cli { - /// The command to run - #[clap(subcommand)] - command: Commands, - - #[clap(flatten)] - logs: Logs, - - #[clap(flatten)] - verbosity: Verbosity, -} - /// The log configuration. #[derive(Debug, Args)] #[command(next_help_heading = "Logging")] diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 444f1b23afd0..8f652da2592a 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -11,5 +11,5 @@ description = "tracing helpers" [dependencies] tracing.workspace = true tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt"] } -tracing-appender = "0.2" +tracing-appender.workspace = true tracing-journald = "0.3" diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index 1695c336e2e4..074b6dc9f072 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -68,6 +68,7 @@ where /// /// The boxed layer and a guard is returned. When the guard is dropped the buffer for the log /// file is immediately flushed to disk. Any events after the guard is dropped may be missed. +#[must_use = "tracing guard must be kept alive to flush events to disk"] pub fn file( filter: EnvFilter, dir: impl AsRef, From e651a184d1b0e47cb907c9cb63523b388e36a942 Mon Sep 17 00:00:00 2001 From: Panagiotis Ganelis <50522617+PanGan21@users.noreply.github.com> Date: Thu, 27 Jul 2023 19:20:17 +0300 Subject: [PATCH 271/722] refactor(transactions): move TxEip1559, TxEip1559 and TxEip4844 to separate files (#3946) --- crates/primitives/src/transaction/eip1559.rs | 123 +++++ crates/primitives/src/transaction/eip2930.rs | 127 ++++++ crates/primitives/src/transaction/eip4844.rs | 118 +++++ crates/primitives/src/transaction/legacy.rs | 101 +++++ crates/primitives/src/transaction/mod.rs | 445 +------------------ 5 files changed, 482 insertions(+), 432 deletions(-) create mode 100644 crates/primitives/src/transaction/eip1559.rs create mode 100644 crates/primitives/src/transaction/eip2930.rs create mode 100644 crates/primitives/src/transaction/eip4844.rs create mode 100644 crates/primitives/src/transaction/legacy.rs diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs new file mode 100644 index 000000000000..3108c6e51e69 --- /dev/null +++ b/crates/primitives/src/transaction/eip1559.rs @@ -0,0 +1,123 @@ +use super::access_list::AccessList; +use crate::{Bytes, ChainId, TransactionKind}; +use reth_codecs::{main_codec, Compact}; +use std::mem; + +/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct TxEip1559 { + /// Added as EIP-pub 155: Simple replay attack protection + pub chain_id: u64, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + pub nonce: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + pub gas_limit: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasFeeCap` + pub max_fee_per_gas: u128, + /// Max Priority fee that transaction is paying + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasTipCap` + pub max_priority_fee_per_gas: u128, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + pub to: TransactionKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub value: u128, + /// The accessList specifies a list of addresses and storage keys; + /// these addresses and storage keys are added into the `accessed_addresses` + /// and `accessed_storage_keys` global sets (introduced in EIP-2929). + /// A gas cost is charged, though at a discount relative to the cost of + /// accessing outside the list. + pub access_list: AccessList, + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + pub input: Bytes, +} + +impl TxEip1559 { + /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_limit + mem::size_of::() + // max_fee_per_gas + mem::size_of::() + // max_priority_fee_per_gas + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() // input + } +} + +#[cfg(test)] +mod tests { + use super::TxEip1559; + use crate::{ + transaction::{signature::Signature, TransactionKind}, + AccessList, Address, Transaction, TransactionSigned, H256, U256, + }; + use std::str::FromStr; + + #[test] + fn recover_signer_eip1559() { + use crate::hex_literal::hex; + + let signer: Address = hex!("dd6b8b3dc6b7ad97db52f08a275ff4483e024cea").into(); + let hash: H256 = + hex!("0ec0b6a2df4d87424e5f6ad2a654e27aaeb7dac20ae9e8385cc09087ad532ee0").into(); + + let tx = Transaction::Eip1559( TxEip1559 { + chain_id: 1, + nonce: 0x42, + gas_limit: 44386, + to: TransactionKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + value: 0, + input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), + max_fee_per_gas: 0x4a817c800, + max_priority_fee_per_gas: 0x3b9aca00, + access_list: AccessList::default(), + }); + + let sig = Signature { + r: U256::from_str("0x840cfc572845f5786e702984c2a582528cad4b49b2a10b9db1be7fca90058565") + .unwrap(), + s: U256::from_str("0x25e7109ceb98168d95b09b18bbf6b685130e0562f233877d492b94eee0c5b6d1") + .unwrap(), + odd_y_parity: false, + }; + + let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); + assert_eq!(signed_tx.hash(), hash, "Expected same hash"); + assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); + } +} diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs new file mode 100644 index 000000000000..ecd6ecb7ec20 --- /dev/null +++ b/crates/primitives/src/transaction/eip2930.rs @@ -0,0 +1,127 @@ +use super::access_list::AccessList; +use crate::{Bytes, ChainId, TransactionKind}; +use reth_codecs::{main_codec, Compact}; +use std::mem; + +/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct TxEip2930 { + /// Added as EIP-pub 155: Simple replay attack protection + pub chain_id: ChainId, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + pub nonce: u64, + /// A scalar value equal to the number of + /// Wei to be paid per unit of gas for all computation + /// costs incurred as a result of the execution of this transaction; formally Tp. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub gas_price: u128, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + pub gas_limit: u64, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + pub to: TransactionKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub value: u128, + /// The accessList specifies a list of addresses and storage keys; + /// these addresses and storage keys are added into the `accessed_addresses` + /// and `accessed_storage_keys` global sets (introduced in EIP-2929). + /// A gas cost is charged, though at a discount relative to the cost of + /// accessing outside the list. + pub access_list: AccessList, + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + pub input: Bytes, +} + +impl TxEip2930 { + /// Calculates a heuristic for the in-memory size of the [TxEip2930] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_price + mem::size_of::() + // gas_limit + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() // input + } +} + +#[cfg(test)] +mod tests { + use super::TxEip2930; + use crate::{ + transaction::{signature::Signature, TransactionKind}, + Address, Bytes, Transaction, TransactionSigned, U256, + }; + use bytes::BytesMut; + use reth_rlp::{Decodable, Encodable}; + + #[test] + fn test_decode_create() { + // tests that a contract creation tx encodes and decodes properly + let request = Transaction::Eip2930(TxEip2930 { + chain_id: 1u64, + nonce: 0, + gas_price: 1, + gas_limit: 2, + to: TransactionKind::Create, + value: 3, + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + }); + let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; + let tx = TransactionSigned::from_transaction_and_signature(request, signature); + + let mut encoded = BytesMut::new(); + tx.encode(&mut encoded); + assert_eq!(encoded.len(), tx.length()); + + let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); + assert_eq!(decoded, tx); + } + + #[test] + fn test_decode_call() { + let request = Transaction::Eip2930(TxEip2930 { + chain_id: 1u64, + nonce: 0, + gas_price: 1, + gas_limit: 2, + to: TransactionKind::Call(Address::default()), + value: 3, + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + }); + + let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; + + let tx = TransactionSigned::from_transaction_and_signature(request, signature); + + let mut encoded = BytesMut::new(); + tx.encode(&mut encoded); + assert_eq!(encoded.len(), tx.length()); + + let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); + assert_eq!(decoded, tx); + } +} diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs new file mode 100644 index 000000000000..34b43e19bb66 --- /dev/null +++ b/crates/primitives/src/transaction/eip4844.rs @@ -0,0 +1,118 @@ +use super::access_list::AccessList; +use crate::{constants::eip4844::DATA_GAS_PER_BLOB, Bytes, ChainId, TransactionKind, H256}; +use reth_codecs::{main_codec, Compact}; +use std::mem; + +/// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) +/// +/// A transaction with blob hashes and max blob fee +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct TxEip4844 { + /// Added as EIP-pub 155: Simple replay attack protection + pub chain_id: u64, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + pub nonce: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + pub gas_limit: u64, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasFeeCap` + pub max_fee_per_gas: u128, + /// Max Priority fee that transaction is paying + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + /// + /// This is also known as `GasTipCap` + pub max_priority_fee_per_gas: u128, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + pub to: TransactionKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub value: u128, + /// The accessList specifies a list of addresses and storage keys; + /// these addresses and storage keys are added into the `accessed_addresses` + /// and `accessed_storage_keys` global sets (introduced in EIP-2929). + /// A gas cost is charged, though at a discount relative to the cost of + /// accessing outside the list. + pub access_list: AccessList, + + /// It contains a vector of fixed size hash(32 bytes) + pub blob_versioned_hashes: Vec, + + /// Max fee per data gas + /// + /// aka BlobFeeCap + pub max_fee_per_blob_gas: u128, + + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + pub input: Bytes, +} + +impl TxEip4844 { + /// Returns the effective gas price for the given `base_fee`. + pub fn effective_gas_price(&self, base_fee: Option) -> u128 { + match base_fee { + None => self.max_fee_per_gas, + Some(base_fee) => { + // if the tip is greater than the max priority fee per gas, set it to the max + // priority fee per gas + base fee + let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); + if tip > self.max_priority_fee_per_gas { + self.max_priority_fee_per_gas + base_fee as u128 + } else { + // otherwise return the max fee per gas + self.max_fee_per_gas + } + } + } + } + + /// Returns the total gas for all blobs in this transaction. + #[inline] + pub fn blob_gas(&self) -> u64 { + // SAFETY: we don't expect u64::MAX / DATA_GAS_PER_BLOB hashes in a single transaction + self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB + } + + /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_limit + mem::size_of::() + // max_fee_per_gas + mem::size_of::() + // max_priority_fee_per_gas + self.to.size() + // to + mem::size_of::() + // value + self.access_list.size() + // access_list + self.input.len() + // input + self.blob_versioned_hashes.capacity() * mem::size_of::() + // blob hashes size + mem::size_of::() // max_fee_per_data_gas + } +} diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs new file mode 100644 index 000000000000..cd324732bd5e --- /dev/null +++ b/crates/primitives/src/transaction/legacy.rs @@ -0,0 +1,101 @@ +use crate::{Bytes, ChainId, TransactionKind}; +use reth_codecs::{main_codec, Compact}; +use std::mem; + +/// Legacy transaction. +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct TxLegacy { + /// Added as EIP-155: Simple replay attack protection + pub chain_id: Option, + /// A scalar value equal to the number of transactions sent by the sender; formally Tn. + pub nonce: u64, + /// A scalar value equal to the number of + /// Wei to be paid per unit of gas for all computation + /// costs incurred as a result of the execution of this transaction; formally Tp. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub gas_price: u128, + /// A scalar value equal to the maximum + /// amount of gas that should be used in executing + /// this transaction. This is paid up-front, before any + /// computation is done and may not be increased + /// later; formally Tg. + pub gas_limit: u64, + /// The 160-bit address of the message call’s recipient or, for a contract creation + /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. + pub to: TransactionKind, + /// A scalar value equal to the number of Wei to + /// be transferred to the message call’s recipient or, + /// in the case of contract creation, as an endowment + /// to the newly created account; formally Tv. + /// + /// As ethereum circulation is around 120mil eth as of 2022 that is around + /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: + /// 340282366920938463463374607431768211455 + pub value: u128, + /// Input has two uses depending if transaction is Create or Call (if `to` field is None or + /// Some). pub init: An unlimited size byte array specifying the + /// EVM-code for the account initialisation procedure CREATE, + /// data: An unlimited size byte array specifying the + /// input data of the message call, formally Td. + pub input: Bytes, +} + +impl TxLegacy { + /// Calculates a heuristic for the in-memory size of the [TxLegacy] transaction. + #[inline] + pub fn size(&self) -> usize { + mem::size_of::>() + // chain_id + mem::size_of::() + // nonce + mem::size_of::() + // gas_price + mem::size_of::() + // gas_limit + self.to.size() + // to + mem::size_of::() + // value + self.input.len() // input + } +} + +#[cfg(test)] +mod tests { + use super::TxLegacy; + use crate::{ + transaction::{signature::Signature, TransactionKind}, + Address, Transaction, TransactionSigned, H256, U256, + }; + + #[test] + fn recover_signer_legacy() { + use crate::hex_literal::hex; + + let signer: Address = hex!("398137383b3d25c92898c656696e41950e47316b").into(); + let hash: H256 = + hex!("bb3a336e3f823ec18197f1e13ee875700f08f03e2cab75f0d0b118dabb44cba0").into(); + + let tx = Transaction::Legacy(TxLegacy { + chain_id: Some(1), + nonce: 0x18, + gas_price: 0xfa56ea00, + gas_limit: 119902, + to: TransactionKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + value: 0x1c6bf526340000u64.into(), + input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), + }); + + let sig = Signature { + r: U256::from_be_bytes(hex!( + "2a378831cf81d99a3f06a18ae1b6ca366817ab4d88a70053c41d7a8f0368e031" + )), + s: U256::from_be_bytes(hex!( + "450d831a05b6e418724436c05c155e0a1b7b921015d0fbc2f667aed709ac4fb5" + )), + odd_y_parity: false, + }; + + let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); + assert_eq!(signed_tx.hash(), hash, "Expected same hash"); + assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 1a0a752322eb..4f6fdc0335e8 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,14 +1,13 @@ use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, - constants::eip4844::DATA_GAS_PER_BLOB, - keccak256, Address, Bytes, ChainId, TxHash, H256, + keccak256, Address, Bytes, TxHash, H256, }; pub use access_list::{AccessList, AccessListItem, AccessListWithGasUsed}; use bytes::{Buf, BytesMut}; use derive_more::{AsRef, Deref}; pub use error::InvalidTransactionError; pub use meta::TransactionMeta; -use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; +use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use reth_rlp::{ length_of_length, Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -17,324 +16,22 @@ pub use signature::Signature; use std::mem; pub use tx_type::{TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID}; +pub use eip1559::TxEip1559; +pub use eip2930::TxEip2930; +pub use eip4844::TxEip4844; +pub use legacy::TxLegacy; + mod access_list; +mod eip1559; +mod eip2930; +mod eip4844; mod error; +mod legacy; mod meta; mod signature; mod tx_type; pub(crate) mod util; -/// Legacy transaction. -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] -pub struct TxLegacy { - /// Added as EIP-155: Simple replay attack protection - pub chain_id: Option, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the number of - /// Wei to be paid per unit of gas for all computation - /// costs incurred as a result of the execution of this transaction; formally Tp. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub gas_price: u128, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub value: u128, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). pub init: An unlimited size byte array specifying the - /// EVM-code for the account initialisation procedure CREATE, - /// data: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxLegacy { - /// Calculates a heuristic for the in-memory size of the [TxLegacy] transaction. - #[inline] - fn size(&self) -> usize { - mem::size_of::>() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_price - mem::size_of::() + // gas_limit - self.to.size() + // to - mem::size_of::() + // value - self.input.len() // input - } -} - -/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] -pub struct TxEip2930 { - /// Added as EIP-pub 155: Simple replay attack protection - pub chain_id: ChainId, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the number of - /// Wei to be paid per unit of gas for all computation - /// costs incurred as a result of the execution of this transaction; formally Tp. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub gas_price: u128, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub value: u128, - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). pub init: An unlimited size byte array specifying the - /// EVM-code for the account initialisation procedure CREATE, - /// data: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip2930 { - /// Calculates a heuristic for the in-memory size of the [TxEip2930] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_price - mem::size_of::() + // gas_limit - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() // input - } -} - -/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] -pub struct TxEip1559 { - /// Added as EIP-pub 155: Simple replay attack protection - pub chain_id: u64, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasFeeCap` - pub max_fee_per_gas: u128, - /// Max Priority fee that transaction is paying - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasTipCap` - pub max_priority_fee_per_gas: u128, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub value: u128, - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). pub init: An unlimited size byte array specifying the - /// EVM-code for the account initialisation procedure CREATE, - /// data: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip1559 { - /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_limit - mem::size_of::() + // max_fee_per_gas - mem::size_of::() + // max_priority_fee_per_gas - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() // input - } -} - -/// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) -/// -/// A transaction with blob hashes and max blob fee -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] -pub struct TxEip4844 { - /// Added as EIP-pub 155: Simple replay attack protection - pub chain_id: u64, - /// A scalar value equal to the number of transactions sent by the sender; formally Tn. - pub nonce: u64, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - pub gas_limit: u64, - /// A scalar value equal to the maximum - /// amount of gas that should be used in executing - /// this transaction. This is paid up-front, before any - /// computation is done and may not be increased - /// later; formally Tg. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasFeeCap` - pub max_fee_per_gas: u128, - /// Max Priority fee that transaction is paying - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - /// - /// This is also known as `GasTipCap` - pub max_priority_fee_per_gas: u128, - /// The 160-bit address of the message call’s recipient or, for a contract creation - /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, - /// A scalar value equal to the number of Wei to - /// be transferred to the message call’s recipient or, - /// in the case of contract creation, as an endowment - /// to the newly created account; formally Tv. - /// - /// As ethereum circulation is around 120mil eth as of 2022 that is around - /// 120000000000000000000000000 wei we are safe to use u128 as its max number is: - /// 340282366920938463463374607431768211455 - pub value: u128, - /// The accessList specifies a list of addresses and storage keys; - /// these addresses and storage keys are added into the `accessed_addresses` - /// and `accessed_storage_keys` global sets (introduced in EIP-2929). - /// A gas cost is charged, though at a discount relative to the cost of - /// accessing outside the list. - pub access_list: AccessList, - - /// It contains a vector of fixed size hash(32 bytes) - pub blob_versioned_hashes: Vec, - - /// Max fee per data gas - /// - /// aka BlobFeeCap - pub max_fee_per_blob_gas: u128, - - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). pub init: An unlimited size byte array specifying the - /// EVM-code for the account initialisation procedure CREATE, - /// data: An unlimited size byte array specifying the - /// input data of the message call, formally Td. - pub input: Bytes, -} - -impl TxEip4844 { - /// Returns the effective gas price for the given `base_fee`. - /// - /// Note: this is the same as [TxEip1559::effective_gas_price]. - pub fn effective_gas_price(&self, base_fee: Option) -> u128 { - match base_fee { - None => self.max_fee_per_gas, - Some(base_fee) => { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); - if tip > self.max_priority_fee_per_gas { - self.max_priority_fee_per_gas + base_fee as u128 - } else { - // otherwise return the max fee per gas - self.max_fee_per_gas - } - } - } - } - - /// Returns the total gas for all blobs in this transaction. - #[inline] - pub fn blob_gas(&self) -> u64 { - // SAFETY: we don't expect u64::MAX / DATA_GAS_PER_BLOB hashes in a single transaction - self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB - } - - /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_limit - mem::size_of::() + // max_fee_per_gas - mem::size_of::() + // max_priority_fee_per_gas - self.to.size() + // to - mem::size_of::() + // value - self.access_list.size() + // access_list - self.input.len() + // input - self.blob_versioned_hashes.capacity() * mem::size_of::() + // blob hashes size - mem::size_of::() // max_fee_per_data_gas - } -} - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -1523,9 +1220,8 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TransactionKind, TxEip1559, TxEip2930, TxLegacy}, - AccessList, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, - H256, U256, + transaction::{signature::Signature, TransactionKind, TxEip1559, TxLegacy}, + Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, H256, U256, }; use bytes::BytesMut; use ethers_core::utils::hex; @@ -1539,30 +1235,6 @@ mod tests { assert_eq!(DecodeError::InputTooShort, res); } - #[test] - fn test_decode_create() { - // tests that a contract creation tx encodes and decodes properly - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce: 0, - gas_price: 1, - gas_limit: 2, - to: TransactionKind::Create, - value: 3, - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = BytesMut::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } - #[test] fn test_decode_create_goerli() { // test that an example create tx from goerli decodes properly @@ -1579,31 +1251,6 @@ mod tests { assert_eq!(tx_bytes, encoded); } - #[test] - fn test_decode_call() { - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce: 0, - gas_price: 1, - gas_limit: 2, - to: TransactionKind::Call(Address::default()), - value: 3, - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - - let mut encoded = BytesMut::new(); - tx.encode(&mut encoded); - assert_eq!(encoded.len(), tx.length()); - - let decoded = TransactionSigned::decode(&mut &*encoded).unwrap(); - assert_eq!(decoded, tx); - } - #[test] fn decode_transaction_consumes_buffer() { let bytes = &mut &hex::decode("b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469").unwrap()[..]; @@ -1765,72 +1412,6 @@ mod tests { assert_eq!(tx.recover_signer(), Some(signer), "Recovering signer should pass."); } - #[test] - fn recover_signer_legacy() { - use crate::hex_literal::hex; - - let signer: Address = hex!("398137383b3d25c92898c656696e41950e47316b").into(); - let hash: H256 = - hex!("bb3a336e3f823ec18197f1e13ee875700f08f03e2cab75f0d0b118dabb44cba0").into(); - - let tx = Transaction::Legacy(TxLegacy { - chain_id: Some(1), - nonce: 0x18, - gas_price: 0xfa56ea00, - gas_limit: 119902, - to: TransactionKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), - value: 0x1c6bf526340000u64.into(), - input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), - }); - - let sig = Signature { - r: U256::from_be_bytes(hex!( - "2a378831cf81d99a3f06a18ae1b6ca366817ab4d88a70053c41d7a8f0368e031" - )), - s: U256::from_be_bytes(hex!( - "450d831a05b6e418724436c05c155e0a1b7b921015d0fbc2f667aed709ac4fb5" - )), - odd_y_parity: false, - }; - - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); - assert_eq!(signed_tx.hash(), hash, "Expected same hash"); - assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); - } - - #[test] - fn recover_signer_eip1559() { - use crate::hex_literal::hex; - - let signer: Address = hex!("dd6b8b3dc6b7ad97db52f08a275ff4483e024cea").into(); - let hash: H256 = - hex!("0ec0b6a2df4d87424e5f6ad2a654e27aaeb7dac20ae9e8385cc09087ad532ee0").into(); - - let tx = Transaction::Eip1559( TxEip1559 { - chain_id: 1, - nonce: 0x42, - gas_limit: 44386, - to: TransactionKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), - value: 0, - input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), - max_fee_per_gas: 0x4a817c800, - max_priority_fee_per_gas: 0x3b9aca00, - access_list: AccessList::default(), - }); - - let sig = Signature { - r: U256::from_str("0x840cfc572845f5786e702984c2a582528cad4b49b2a10b9db1be7fca90058565") - .unwrap(), - s: U256::from_str("0x25e7109ceb98168d95b09b18bbf6b685130e0562f233877d492b94eee0c5b6d1") - .unwrap(), - odd_y_parity: false, - }; - - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, sig); - assert_eq!(signed_tx.hash(), hash, "Expected same hash"); - assert_eq!(signed_tx.recover_signer(), Some(signer), "Recovering signer should pass."); - } - #[test] fn test_envelop_encode() { // random tx: From a298756d9578f32fe66e0cee2794b3a55c8f8d83 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 27 Jul 2023 19:36:07 +0300 Subject: [PATCH 272/722] bench(txpool): reordering (#3882) --- Cargo.lock | 2 + crates/transaction-pool/Cargo.toml | 8 + crates/transaction-pool/benches/reorder.rs | 220 ++++++++++++++++++ crates/transaction-pool/src/lib.rs | 3 +- .../transaction-pool/src/test_utils/mock.rs | 77 +++++- crates/transaction-pool/src/traits.rs | 14 ++ crates/transaction-pool/src/validate/mod.rs | 8 + 7 files changed, 329 insertions(+), 3 deletions(-) create mode 100644 crates/transaction-pool/benches/reorder.rs diff --git a/Cargo.lock b/Cargo.lock index f2ff533ab902..9919f959ae5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5870,10 +5870,12 @@ dependencies = [ "async-trait", "auto_impl", "bitflags 1.3.2", + "criterion", "fnv", "futures-util", "parking_lot 0.12.1", "paste", + "proptest", "rand 0.8.5", "reth-interfaces", "reth-metrics", diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 6866cf1ea20b..0275c0313d9f 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -44,12 +44,20 @@ auto_impl = "1.0" # testing rand = { workspace = true, optional = true } paste = { version = "1.0", optional = true } +proptest = { version = "1.0", optional = true } [dev-dependencies] paste = "1.0" rand = "0.8" +proptest = "1.0" +criterion = "0.5" [features] default = ["serde"] serde = ["dep:serde"] test-utils = ["rand", "paste", "serde"] +arbitrary = ["proptest", "reth-primitives/arbitrary"] + +[[bench]] +name = "reorder" +harness = false diff --git a/crates/transaction-pool/benches/reorder.rs b/crates/transaction-pool/benches/reorder.rs new file mode 100644 index 000000000000..129f1c17ff56 --- /dev/null +++ b/crates/transaction-pool/benches/reorder.rs @@ -0,0 +1,220 @@ +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, +}; +use proptest::{ + prelude::*, + strategy::{Strategy, ValueTree}, + test_runner::TestRunner, +}; +use reth_transaction_pool::test_utils::MockTransaction; + +/// Transaction Pool trait for benching. +pub trait BenchTxPool: Default { + fn add_transaction(&mut self, tx: MockTransaction); + fn reorder(&mut self, base_fee: u64); +} + +pub fn txpool_reordering(c: &mut Criterion) { + let mut group = c.benchmark_group("Transaction Pool Reordering"); + + for seed_size in [1_000, 10_000, 50_000, 100_000] { + for input_size in [10, 100, 1_000] { + let (txs, new_txs, base_fee) = generate_test_data(seed_size, input_size); + + use implementations::*; + + // Vanilla sorting of unsorted collection + txpool_reordering_bench::( + &mut group, + "VecTxPoolSortStable", + txs.clone(), + new_txs.clone(), + base_fee, + ); + + // Unstable sorting of unsorted collection + txpool_reordering_bench::( + &mut group, + "VecTxPoolSortUnstable", + txs.clone(), + new_txs.clone(), + base_fee, + ); + + // BinaryHeap that is resorted on each update + txpool_reordering_bench::( + &mut group, + "BinaryHeapTxPool", + txs, + new_txs, + base_fee, + ); + } + } +} + +fn txpool_reordering_bench( + group: &mut BenchmarkGroup, + description: &str, + seed: Vec, + new_txs: Vec, + base_fee: u64, +) { + let setup = || { + let mut txpool = T::default(); + txpool.reorder(base_fee); + + for tx in seed.iter() { + txpool.add_transaction(tx.clone()); + } + (txpool, new_txs.clone()) + }; + + let group_id = format!( + "txpool | seed size: {} | input size: {} | {}", + seed.len(), + new_txs.len(), + description + ); + group.bench_function(group_id, |b| { + b.iter_with_setup(setup, |(mut txpool, new_txs)| { + black_box({ + // Reorder with new base fee + let bigger_base_fee = base_fee.saturating_add(10); + txpool.reorder(bigger_base_fee); + + // Reorder with new base fee after adding transactions. + for new_tx in new_txs { + txpool.add_transaction(new_tx); + } + let smaller_base_fee = base_fee.saturating_sub(10); + txpool.reorder(smaller_base_fee); + }) + }); + }); +} + +fn generate_test_data( + seed_size: usize, + input_size: usize, +) -> (Vec, Vec, u64) { + let config = ProptestConfig::default(); + let mut runner = TestRunner::new(config); + + let txs = prop::collection::vec(any::(), seed_size) + .new_tree(&mut runner) + .unwrap() + .current(); + + let new_txs = prop::collection::vec(any::(), input_size) + .new_tree(&mut runner) + .unwrap() + .current(); + + let base_fee = any::().new_tree(&mut runner).unwrap().current(); + + (txs, new_txs, base_fee) +} + +mod implementations { + use super::*; + use reth_transaction_pool::PoolTransaction; + use std::collections::BinaryHeap; + + /// This implementation appends the transactions and uses [Vec::sort_by] function for sorting. + #[derive(Default)] + pub struct VecTxPoolSortStable { + inner: Vec, + } + + impl BenchTxPool for VecTxPoolSortStable { + fn add_transaction(&mut self, tx: MockTransaction) { + self.inner.push(tx); + } + + fn reorder(&mut self, base_fee: u64) { + self.inner.sort_by(|a, b| { + a.effective_tip_per_gas(base_fee) + .expect("exists") + .cmp(&b.effective_tip_per_gas(base_fee).expect("exists")) + }) + } + } + + /// This implementation appends the transactions and uses [Vec::sort_unstable_by] function for + /// sorting. + #[derive(Default)] + pub struct VecTxPoolSortUnstable { + inner: Vec, + } + + impl BenchTxPool for VecTxPoolSortUnstable { + fn add_transaction(&mut self, tx: MockTransaction) { + self.inner.push(tx); + } + + fn reorder(&mut self, base_fee: u64) { + self.inner.sort_unstable_by(|a, b| { + a.effective_tip_per_gas(base_fee) + .expect("exists") + .cmp(&b.effective_tip_per_gas(base_fee).expect("exists")) + }) + } + } + + struct MockTransactionWithPriority { + tx: MockTransaction, + priority: u128, + } + + impl PartialEq for MockTransactionWithPriority { + fn eq(&self, other: &Self) -> bool { + self.priority.eq(&other.priority) + } + } + + impl Eq for MockTransactionWithPriority {} + + impl PartialOrd for MockTransactionWithPriority { + fn partial_cmp(&self, other: &Self) -> Option { + self.priority.partial_cmp(&other.priority) + } + } + + impl Ord for MockTransactionWithPriority { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority) + } + } + + /// This implementation uses BinaryHeap which is drained and reconstructed on each reordering. + #[derive(Default)] + pub struct BinaryHeapTxPool { + inner: BinaryHeap, + base_fee: Option, + } + + impl BenchTxPool for BinaryHeapTxPool { + fn add_transaction(&mut self, tx: MockTransaction) { + let priority = self + .base_fee + .as_ref() + .map(|bf| tx.effective_tip_per_gas(*bf).expect("set")) + .unwrap_or_default(); + self.inner.push(MockTransactionWithPriority { tx, priority }); + } + + fn reorder(&mut self, base_fee: u64) { + self.base_fee = Some(base_fee); + + let drained = self.inner.drain(); + self.inner = BinaryHeap::from_iter(drained.map(|mock| { + let priority = mock.tx.effective_tip_per_gas(base_fee).expect("set"); + MockTransactionWithPriority { tx: mock.tx, priority } + })); + } + } +} + +criterion_group!(reorder, txpool_reordering); +criterion_main!(reorder); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index d4c25f047e75..81b5e224f731 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -10,8 +10,7 @@ rust_2018_idioms, unreachable_pub, missing_debug_implementations, - rustdoc::broken_intra_doc_links, - unused_crate_dependencies + rustdoc::broken_intra_doc_links )] #![doc(test( no_crate_inject, diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 62be5c66ef11..94d305032342 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -14,7 +14,7 @@ use rand::{ use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, hex, Address, FromRecoveredTransaction, IntoRecoveredTransaction, Signature, Transaction, TransactionKind, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, TxHash, TxLegacy, TxType, H256, U128, U256, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxHash, TxLegacy, TxType, H256, U128, U256, }; use std::{ops::Range, sync::Arc, time::Instant}; @@ -360,6 +360,21 @@ impl PoolTransaction for MockTransaction { } } + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + let base_fee = base_fee as u128; + let max_fee_per_gas = self.max_fee_per_gas(); + if max_fee_per_gas < base_fee { + return None + } + + let fee = max_fee_per_gas - base_fee; + if let Some(priority_fee) = self.max_priority_fee_per_gas() { + return Some(fee.min(priority_fee)) + } + + Some(fee) + } + fn kind(&self) -> &TransactionKind { match self { MockTransaction::Legacy { to, .. } => to, @@ -461,6 +476,66 @@ impl IntoRecoveredTransaction for MockTransaction { } } +#[cfg(any(test, feature = "arbitrary"))] +impl proptest::arbitrary::Arbitrary for MockTransaction { + type Parameters = (); + type Strategy = proptest::strategy::BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + use proptest::prelude::{any, Strategy}; + + any::<(Transaction, Address, H256)>() + .prop_map(|(tx, sender, tx_hash)| match &tx { + Transaction::Legacy(TxLegacy { + nonce, + gas_price, + gas_limit, + to, + value, + input, + .. + }) | + Transaction::Eip2930(TxEip2930 { + nonce, + gas_price, + gas_limit, + to, + value, + input, + .. + }) => MockTransaction::Legacy { + sender, + hash: tx_hash, + nonce: *nonce, + gas_price: *gas_price, + gas_limit: *gas_limit, + to: *to, + value: U256::from(*value), + }, + Transaction::Eip1559(TxEip1559 { + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + input, + .. + }) => MockTransaction::Eip1559 { + sender, + hash: tx_hash, + nonce: *nonce, + max_fee_per_gas: *max_fee_per_gas, + max_priority_fee_per_gas: *max_priority_fee_per_gas, + gas_limit: *gas_limit, + to: *to, + value: U256::from(*value), + }, + }) + .boxed() + } +} + #[derive(Default)] pub struct MockTransactionFactory { pub(crate) ids: SenderIdentifiers, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9d3319832ab2..c4c1b513a44c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -491,6 +491,12 @@ pub trait PoolTransaction: /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option; + /// Returns the effective tip for this transaction. + /// + /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. + /// For legacy transactions: `gas_price - base_fee`. + fn effective_tip_per_gas(&self, base_fee: u64) -> Option; + /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or /// [`TransactionKind::Create`] if the transaction is a contract creation. fn kind(&self) -> &TransactionKind; @@ -599,6 +605,14 @@ impl PoolTransaction for PooledTransaction { } } + /// Returns the effective tip for this transaction. + /// + /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. + /// For legacy transactions: `gas_price - base_fee`. + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.transaction.effective_tip_per_gas(base_fee) + } + /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or /// [`TransactionKind::Create`] if the transaction is a contract creation. fn kind(&self) -> &TransactionKind { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 2e96099f9354..3b374e5ab811 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -179,6 +179,14 @@ impl ValidPoolTransaction { self.transaction.max_fee_per_gas() } + /// Returns the effective tip for this transaction. + /// + /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. + /// For legacy transactions: `gas_price - base_fee`. + pub fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.transaction.effective_tip_per_gas(base_fee) + } + /// Maximum amount of gas that the transaction is allowed to consume. pub fn gas_limit(&self) -> u64 { self.transaction.gas_limit() From 68a74b2e104c4c976afa1d0c3c5c5fcec733b4ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 20:57:21 +0200 Subject: [PATCH 273/722] chore: move some tests to engine crate (#3968) --- Cargo.lock | 3 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/lib.rs | 7 + crates/rpc/rpc-engine-api/tests/it/main.rs | 3 + crates/rpc/rpc-engine-api/tests/it/payload.rs | 127 ++++++++++++++++++ crates/rpc/rpc-types/Cargo.toml | 4 - .../rpc/rpc-types/src/eth/engine/payload.rs | 124 ----------------- 7 files changed, 139 insertions(+), 130 deletions(-) create mode 100644 crates/rpc/rpc-engine-api/tests/it/main.rs create mode 100644 crates/rpc/rpc-engine-api/tests/it/payload.rs diff --git a/Cargo.lock b/Cargo.lock index 9919f959ae5c..f41fa41feacb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5779,6 +5779,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-provider", + "reth-rlp", "reth-rpc-api", "reth-rpc-types", "reth-tasks", @@ -5791,10 +5792,8 @@ dependencies = [ name = "reth-rpc-types" version = "0.1.0-alpha.4" dependencies = [ - "assert_matches", "jsonrpsee-types", "rand 0.8.5", - "reth-interfaces", "reth-primitives", "reth-rlp", "serde", diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index fc09c60010a4..aa5157add0d0 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -30,6 +30,7 @@ jsonrpsee-core = "0.18" tracing.workspace = true [dev-dependencies] +reth-rlp.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-engine-api/src/lib.rs b/crates/rpc/rpc-engine-api/src/lib.rs index fa440801b118..ba36670c5aa3 100644 --- a/crates/rpc/rpc-engine-api/src/lib.rs +++ b/crates/rpc/rpc-engine-api/src/lib.rs @@ -29,3 +29,10 @@ pub use message::EngineApiMessageVersion; // re-export server trait for convenience pub use reth_rpc_api::EngineApiServer; + +#[cfg(test)] +#[allow(unused_imports)] +mod tests { + // silence unused import warning + use reth_rlp as _; +} diff --git a/crates/rpc/rpc-engine-api/tests/it/main.rs b/crates/rpc/rpc-engine-api/tests/it/main.rs new file mode 100644 index 000000000000..cf02e709823c --- /dev/null +++ b/crates/rpc/rpc-engine-api/tests/it/main.rs @@ -0,0 +1,3 @@ +mod payload; + +fn main() {} diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs new file mode 100644 index 000000000000..bc9f1f4249ae --- /dev/null +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -0,0 +1,127 @@ +//! Some payload tests + +use assert_matches::assert_matches; +use reth_interfaces::test_utils::generators::{ + self, random_block, random_block_range, random_header, +}; +use reth_primitives::{ + bytes::{Bytes, BytesMut}, + proofs::{self}, + Block, SealedBlock, TransactionSigned, H256, U256, +}; +use reth_rlp::{Decodable, DecodeError}; +use reth_rpc_types::engine::{ExecutionPayload, ExecutionPayloadBodyV1, PayloadError}; + +fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { + let unsealed = src.unseal(); + let mut transformed: Block = f(unsealed); + // Recalculate roots + transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body); + transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); + SealedBlock { + header: transformed.header.seal_slow(), + body: transformed.body, + ommers: transformed.ommers, + withdrawals: transformed.withdrawals, + } + .into() +} + +#[test] +fn payload_body_roundtrip() { + let mut rng = generators::rng(); + for block in random_block_range(&mut rng, 0..=99, H256::default(), 0..2) { + let unsealed = block.clone().unseal(); + let payload_body: ExecutionPayloadBodyV1 = unsealed.into(); + + assert_eq!( + Ok(block.body), + payload_body + .transactions + .iter() + .map(|x| TransactionSigned::decode(&mut &x[..])) + .collect::, _>>(), + ); + + assert_eq!(block.withdrawals, payload_body.withdrawals); + } +} + +#[test] +fn payload_validation() { + let mut rng = generators::rng(); + let block = random_block(&mut rng, 100, Some(H256::random()), Some(3), Some(0)); + + // Valid extra data + let block_with_valid_extra_data = transform_block(block.clone(), |mut b| { + b.header.extra_data = BytesMut::zeroed(32).freeze().into(); + b + }); + assert_matches!(TryInto::::try_into(block_with_valid_extra_data), Ok(_)); + + // Invalid extra data + let block_with_invalid_extra_data: Bytes = BytesMut::zeroed(33).freeze(); + let invalid_extra_data_block = transform_block(block.clone(), |mut b| { + b.header.extra_data = block_with_invalid_extra_data.clone().into(); + b + }); + assert_matches!( + TryInto::::try_into(invalid_extra_data_block), + Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data + ); + + // Zero base fee + let block_with_zero_base_fee = transform_block(block.clone(), |mut b| { + b.header.base_fee_per_gas = Some(0); + b + }); + assert_matches!( + TryInto::::try_into(block_with_zero_base_fee), + Err(PayloadError::BaseFee(val)) if val == U256::ZERO + ); + + // Invalid encoded transactions + let mut payload_with_invalid_txs: ExecutionPayload = block.clone().into(); + payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { + *tx = Bytes::new().into(); + }); + assert_matches!( + TryInto::::try_into(payload_with_invalid_txs), + Err(PayloadError::Decode(DecodeError::InputTooShort)) + ); + + // Non empty ommers + let block_with_ommers = transform_block(block.clone(), |mut b| { + b.ommers.push(random_header(&mut rng, 100, None).unseal()); + b + }); + assert_matches!( + TryInto::::try_into(block_with_ommers.clone()), + Err(PayloadError::BlockHash { consensus, .. }) + if consensus == block_with_ommers.block_hash + ); + + // None zero difficulty + let block_with_difficulty = transform_block(block.clone(), |mut b| { + b.header.difficulty = U256::from(1); + b + }); + assert_matches!( + TryInto::::try_into(block_with_difficulty.clone()), + Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash + ); + + // None zero nonce + let block_with_nonce = transform_block(block.clone(), |mut b| { + b.header.nonce = 1; + b + }); + assert_matches!( + TryInto::::try_into(block_with_nonce.clone()), + Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash + ); + + // Valid block + let valid_block = block; + assert_matches!(TryInto::::try_into(valid_block), Ok(_)); +} diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 8ca59c36ca8d..71c1b80be372 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -24,10 +24,6 @@ serde_json.workspace = true jsonrpsee-types = { version = "0.18" } [dev-dependencies] -# reth -reth-interfaces = { workspace = true, features = ["test-utils"] } - # misc rand.workspace = true -assert_matches = "1.5" similar-asserts = "1.4" diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index f187ec662a99..037d5739ecfd 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -446,130 +446,6 @@ pub enum PayloadValidationError { #[cfg(test)] mod tests { use super::*; - use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::{ - self, random_block, random_block_range, random_header, - }; - use reth_primitives::{ - bytes::{Bytes, BytesMut}, - TransactionSigned, H256, - }; - use reth_rlp::{Decodable, DecodeError}; - - fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { - let unsealed = src.unseal(); - let mut transformed: Block = f(unsealed); - // Recalculate roots - transformed.header.transactions_root = - proofs::calculate_transaction_root(&transformed.body); - transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); - SealedBlock { - header: transformed.header.seal_slow(), - body: transformed.body, - ommers: transformed.ommers, - withdrawals: transformed.withdrawals, - } - .into() - } - - #[test] - fn payload_body_roundtrip() { - let mut rng = generators::rng(); - for block in random_block_range(&mut rng, 0..=99, H256::default(), 0..2) { - let unsealed = block.clone().unseal(); - let payload_body: ExecutionPayloadBodyV1 = unsealed.into(); - - assert_eq!( - Ok(block.body), - payload_body - .transactions - .iter() - .map(|x| TransactionSigned::decode(&mut &x[..])) - .collect::, _>>(), - ); - - assert_eq!(block.withdrawals, payload_body.withdrawals); - } - } - - #[test] - fn payload_validation() { - let mut rng = generators::rng(); - let block = random_block(&mut rng, 100, Some(H256::random()), Some(3), Some(0)); - - // Valid extra data - let block_with_valid_extra_data = transform_block(block.clone(), |mut b| { - b.header.extra_data = BytesMut::zeroed(32).freeze().into(); - b - }); - assert_matches!(TryInto::::try_into(block_with_valid_extra_data), Ok(_)); - - // Invalid extra data - let block_with_invalid_extra_data: Bytes = BytesMut::zeroed(33).freeze(); - let invalid_extra_data_block = transform_block(block.clone(), |mut b| { - b.header.extra_data = block_with_invalid_extra_data.clone().into(); - b - }); - assert_matches!( - TryInto::::try_into(invalid_extra_data_block), - Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data - ); - - // Zero base fee - let block_with_zero_base_fee = transform_block(block.clone(), |mut b| { - b.header.base_fee_per_gas = Some(0); - b - }); - assert_matches!( - TryInto::::try_into(block_with_zero_base_fee), - Err(PayloadError::BaseFee(val)) if val == U256::ZERO - ); - - // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayload = block.clone().into(); - payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { - *tx = Bytes::new().into(); - }); - assert_matches!( - TryInto::::try_into(payload_with_invalid_txs), - Err(PayloadError::Decode(DecodeError::InputTooShort)) - ); - - // Non empty ommers - let block_with_ommers = transform_block(block.clone(), |mut b| { - b.ommers.push(random_header(&mut rng, 100, None).unseal()); - b - }); - assert_matches!( - TryInto::::try_into(block_with_ommers.clone()), - Err(PayloadError::BlockHash { consensus, .. }) - if consensus == block_with_ommers.block_hash - ); - - // None zero difficulty - let block_with_difficulty = transform_block(block.clone(), |mut b| { - b.header.difficulty = U256::from(1); - b - }); - assert_matches!( - TryInto::::try_into(block_with_difficulty.clone()), - Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash - ); - - // None zero nonce - let block_with_nonce = transform_block(block.clone(), |mut b| { - b.header.nonce = 1; - b - }); - assert_matches!( - TryInto::::try_into(block_with_nonce.clone()), - Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash - ); - - // Valid block - let valid_block = block; - assert_matches!(TryInto::::try_into(valid_block), Ok(_)); - } #[test] fn serde_payload_status() { From 465f0dc2a846c9669d40ad561d757b6fe89fea84 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 27 Jul 2023 22:51:27 +0300 Subject: [PATCH 274/722] fix(cli): eta div by zero (#3971) --- bin/reth/src/node/events.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/node/events.rs b/bin/reth/src/node/events.rs index 4f0c673e097a..cc8fe657b54b 100644 --- a/bin/reth/src/node/events.rs +++ b/bin/reth/src/node/events.rs @@ -290,9 +290,10 @@ impl Eta { let elapsed = last_checkpoint_time.elapsed(); let per_second = processed_since_last as f64 / elapsed.as_secs_f64(); - self.eta = Some(Duration::from_secs_f64( - (current.total - current.processed) as f64 / per_second, - )); + self.eta = Duration::try_from_secs_f64( + ((current.total - current.processed) as f64) / per_second, + ) + .ok(); } self.last_checkpoint = current; From 369b9a79c5dafde63c394d4e335fa29f49f0e17a Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 27 Jul 2023 22:00:57 +0200 Subject: [PATCH 275/722] feat(eip4844): add `EIP4844` to the `TxType` enum (#3953) --- crates/primitives/src/receipt.rs | 8 +++++++- crates/primitives/src/transaction/tx_type.rs | 7 ++++++- crates/rpc/rpc-types/src/eth/transaction/mod.rs | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 15a5a2308dfb..57cbdcc6d0d5 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -147,6 +147,9 @@ impl Decodable for ReceiptWithBloom { } else if receipt_type == 0x02 { buf.advance(1); Self::decode_receipt(buf, TxType::EIP1559) + } else if receipt_type == 0x03 { + buf.advance(1); + Self::decode_receipt(buf, TxType::EIP4844) } else { Err(reth_rlp::DecodeError::Custom("invalid receipt type")) } @@ -251,6 +254,9 @@ impl<'a> ReceiptWithBloomEncoder<'a> { TxType::EIP1559 => { out.put_u8(0x02); } + TxType::EIP4844 => { + out.put_u8(0x03); + } _ => unreachable!("legacy handled; qed."), } out.put_slice(payload.as_ref()); @@ -270,7 +276,7 @@ impl<'a> Encodable for ReceiptWithBloomEncoder<'a> { fn length(&self) -> usize { let mut payload_len = self.receipt_length(); // account for eip-2718 type prefix and set the list - if matches!(self.receipt.tx_type, TxType::EIP1559 | TxType::EIP2930) { + if matches!(self.receipt.tx_type, TxType::EIP1559 | TxType::EIP2930 | TxType::EIP4844) { payload_len += 1; // we include a string header for typed receipts, so include the length here payload_len += length_of_length(payload_len); diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index c0a6b71a0e80..64788e46568c 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -27,6 +27,8 @@ pub enum TxType { EIP2930 = 1_isize, /// Transaction with Priority fee EIP1559 = 2_isize, + /// Shard Blob Transactions - EIP-4844 + EIP4844 = 3_isize, } impl From for u8 { @@ -35,6 +37,7 @@ impl From for u8 { TxType::Legacy => LEGACY_TX_TYPE_ID, TxType::EIP2930 => EIP2930_TX_TYPE_ID, TxType::EIP1559 => EIP1559_TX_TYPE_ID, + TxType::EIP4844 => EIP4844_TX_TYPE_ID, } } } @@ -54,6 +57,7 @@ impl Compact for TxType { TxType::Legacy => 0, TxType::EIP2930 => 1, TxType::EIP1559 => 2, + TxType::EIP4844 => 3, } } @@ -62,7 +66,8 @@ impl Compact for TxType { match identifier { 0 => TxType::Legacy, 1 => TxType::EIP2930, - _ => TxType::EIP1559, + 2 => TxType::EIP1559, + _ => TxType::EIP4844, }, buf, ) diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index 24005376c82f..cb920df7cf0a 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -112,7 +112,7 @@ impl Transaction { let (gas_price, max_fee_per_gas) = match signed_tx.tx_type() { TxType::Legacy => (Some(U128::from(signed_tx.max_fee_per_gas())), None), TxType::EIP2930 => (Some(U128::from(signed_tx.max_fee_per_gas())), None), - TxType::EIP1559 => { + TxType::EIP1559 | TxType::EIP4844 => { // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + // baseFee` let gas_price = base_fee From f3a7ae12589feea29a526dad3cca8748d84b22fe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 27 Jul 2023 23:58:50 +0200 Subject: [PATCH 276/722] perf(rpc): move frame gen into task (#3950) --- crates/rpc/rpc/src/debug.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index fa29fbce3985..5a72e5eedb7a 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -242,18 +242,17 @@ where .set_record_logs(call_config.with_log.unwrap_or_default()), ); - let inspector = self + let frame = self .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { inspect(db, env, &mut inspector)?; - Ok(inspector) + let frame = + inspector.into_geth_builder().geth_call_traces(call_config); + Ok(frame.into()) }) .await?; - - let frame = inspector.into_geth_builder().geth_call_traces(call_config); - - return Ok(frame.into()) + return Ok(frame) } GethDebugBuiltInTracerType::PreStateTracer => { Err(EthApiError::Unsupported("pre state tracer currently unsupported.")) From 3601e7dfa1af31cd1106aecd0d20301741e8125f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 28 Jul 2023 12:03:40 +0300 Subject: [PATCH 277/722] fix(txpool): pendind pool reordering (#3955) --- crates/transaction-pool/src/lib.rs | 8 +- crates/transaction-pool/src/ordering.rs | 53 +++++-- crates/transaction-pool/src/pool/best.rs | 17 ++- crates/transaction-pool/src/pool/parked.rs | 5 +- crates/transaction-pool/src/pool/pending.rs | 129 +++++++++--------- crates/transaction-pool/src/pool/txpool.rs | 14 +- .../transaction-pool/src/test_utils/mock.rs | 32 ++--- crates/transaction-pool/src/traits.rs | 30 ++-- crates/transaction-pool/src/validate/mod.rs | 14 +- examples/network-txpool.rs | 4 +- 10 files changed, 170 insertions(+), 136 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 81b5e224f731..768bb0b5cd09 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -157,7 +157,7 @@ pub use crate::{ TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }, error::PoolResult, - ordering::{GasCostOrdering, TransactionOrdering}, + ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, pool::{ state::SubPool, AllTransactionsEvents, FullTransactionEvent, TransactionEvent, TransactionEvents, @@ -280,12 +280,12 @@ where } impl - Pool, GasCostOrdering> + Pool, CoinbaseTipOrdering> where Client: StateProviderFactory + Clone + 'static, { /// Returns a new [Pool] that uses the default [EthTransactionValidator] when validating - /// [PooledTransaction]s and ords via [GasCostOrdering] + /// [PooledTransaction]s and ords via [CoinbaseTipOrdering] /// /// # Example /// @@ -305,7 +305,7 @@ where validator: EthTransactionValidator, config: PoolConfig, ) -> Self { - Self::new(validator, GasCostOrdering::default(), config) + Self::new(validator, CoinbaseTipOrdering::default(), config) } } diff --git a/crates/transaction-pool/src/ordering.rs b/crates/transaction-pool/src/ordering.rs index 40ee1700661d..ffa185f1f0e9 100644 --- a/crates/transaction-pool/src/ordering.rs +++ b/crates/transaction-pool/src/ordering.rs @@ -2,6 +2,26 @@ use crate::traits::PoolTransaction; use reth_primitives::U256; use std::{fmt, marker::PhantomData}; +/// Priority of the transaction that can be missing. +/// +/// Transactions with missing priorities are ranked lower. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] +pub enum Priority { + /// The value of the priority of the transaction. + Value(T), + /// Missing priority due to ordering internals. + None, +} + +impl From> for Priority { + fn from(value: Option) -> Self { + match value { + Some(val) => Priority::Value(val), + None => Priority::None, + } + } +} + /// Transaction ordering trait to determine the order of transactions. /// /// Decides how transactions should be ordered within the pool, depending on a `Priority` value. @@ -11,42 +31,53 @@ pub trait TransactionOrdering: Send + Sync + 'static { /// Priority of a transaction. /// /// Higher is better. - type Priority: Ord + Clone + Default + fmt::Debug + Send + Sync; + type PriorityValue: Ord + Clone + Default + fmt::Debug + Send + Sync; /// The transaction type to determine the priority of. type Transaction: PoolTransaction; /// Returns the priority score for the given transaction. - fn priority(&self, transaction: &Self::Transaction) -> Self::Priority; + fn priority( + &self, + transaction: &Self::Transaction, + base_fee: u64, + ) -> Priority; } /// Default ordering for the pool. /// -/// The transactions are ordered by their gas cost. The higher the gas cost, -/// the higher the priority of this transaction is. +/// The transactions are ordered by their coinbase tip. +/// The higher the coinbase tip is, the higher the priority of the transaction. #[derive(Debug)] #[non_exhaustive] -pub struct GasCostOrdering(PhantomData); +pub struct CoinbaseTipOrdering(PhantomData); -impl TransactionOrdering for GasCostOrdering +impl TransactionOrdering for CoinbaseTipOrdering where T: PoolTransaction + 'static, { - type Priority = U256; + type PriorityValue = U256; type Transaction = T; - fn priority(&self, transaction: &Self::Transaction) -> Self::Priority { - transaction.gas_cost() + /// Source: . + /// + /// NOTE: The implementation is incomplete for missing base fee. + fn priority( + &self, + transaction: &Self::Transaction, + base_fee: u64, + ) -> Priority { + transaction.effective_tip_per_gas(base_fee).map(U256::from).into() } } -impl Default for GasCostOrdering { +impl Default for CoinbaseTipOrdering { fn default() -> Self { Self(Default::default()) } } -impl Clone for GasCostOrdering { +impl Clone for CoinbaseTipOrdering { fn clone(&self) -> Self { Self::default() } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 071063aeda5d..c5190327f09c 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,7 +1,6 @@ use crate::{ - identifier::TransactionId, - pool::pending::{PendingTransaction, PendingTransactionRef}, - PoolTransaction, TransactionOrdering, ValidPoolTransaction, + identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, + TransactionOrdering, ValidPoolTransaction, }; use reth_primitives::H256 as TxHash; use std::{ @@ -54,12 +53,12 @@ impl Iterator for BestTransactionsWithBasefee { pub(crate) struct BestTransactions { /// Contains a copy of _all_ transactions of the pending pool at the point in time this /// iterator was created. - pub(crate) all: BTreeMap>>, + pub(crate) all: BTreeMap>, /// Transactions that can be executed right away: these have the expected nonce. /// /// Once an `independent` transaction with the nonce `N` is returned, it unlocks `N+1`, which /// then can be moved from the `all` set to the `independent` set. - pub(crate) independent: BTreeSet>, + pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. pub(crate) invalid: HashSet, } @@ -74,7 +73,7 @@ impl BestTransactions { /// /// Note: for a transaction with nonce higher than the current on chain nonce this will always /// return an ancestor since all transaction in this pool are gapless. - pub(crate) fn ancestor(&self, id: &TransactionId) -> Option<&Arc>> { + pub(crate) fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { self.all.get(&id.unchecked_ancestor()?) } } @@ -106,7 +105,7 @@ impl Iterator for BestTransactions { // Insert transactions that just got unlocked. if let Some(unlocked) = self.all.get(&best.unlocks()) { - self.independent.insert(unlocked.transaction.clone()); + self.independent.insert(unlocked.clone()); } return Some(best.transaction) @@ -133,7 +132,7 @@ mod tests { for nonce in 0..num_tx { let tx = tx.clone().rng_hash().with_nonce(nonce); let valid_tx = f.validated(tx); - pool.add_transaction(Arc::new(valid_tx)); + pool.add_transaction(Arc::new(valid_tx), 0); } let mut best = pool.best(); @@ -159,7 +158,7 @@ mod tests { for nonce in 0..num_tx { let tx = tx.clone().rng_hash().with_nonce(nonce); let valid_tx = f.validated(tx); - pool.add_transaction(Arc::new(valid_tx)); + pool.add_transaction(Arc::new(valid_tx), 0); } let mut best = pool.best(); diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 1965fe8c8c11..740af02a9717 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -306,10 +306,11 @@ pub(crate) struct QueuedOrd(Arc>); impl_ord_wrapper!(QueuedOrd); +// TODO: temporary solution for ordering the queued pool. impl Ord for QueuedOrd { fn cmp(&self, other: &Self) -> Ordering { - // Higher cost is better - self.gas_cost().cmp(&other.gas_cost()).then_with(|| + // Higher price is better + self.max_fee_per_gas().cmp(&self.max_fee_per_gas()).then_with(|| // Lower timestamp is better other.timestamp.cmp(&self.timestamp)) } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index dd77997dcc77..0ec5be69b56e 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -1,7 +1,7 @@ use crate::{ identifier::TransactionId, pool::{best::BestTransactions, size::SizeTracker}, - TransactionOrdering, ValidPoolTransaction, + Priority, TransactionOrdering, ValidPoolTransaction, }; use crate::pool::best::BestTransactionsWithBasefee; @@ -30,14 +30,14 @@ pub(crate) struct PendingPool { /// This way we can determine when transactions where submitted to the pool. submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. - by_id: BTreeMap>>, + by_id: BTreeMap>, /// _All_ transactions sorted by priority - all: BTreeSet>, + all: BTreeSet>, /// Independent transactions that can be included directly and don't require other /// transactions. /// /// Sorted by their scoring value. - independent_transactions: BTreeSet>, + independent_transactions: BTreeSet>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -103,19 +103,19 @@ impl PendingPool { pub(crate) fn best_with_unlocked( &self, unlocked: Vec>>, + base_fee: u64, ) -> BestTransactions { let mut best = self.best(); let mut submission_id = self.submission_id; for tx in unlocked { submission_id += 1; debug_assert!(!best.all.contains_key(tx.id()), "transaction already included"); - let priority = self.ordering.priority(&tx.transaction); + let priority = self.ordering.priority(&tx.transaction, base_fee); let tx_id = *tx.id(); - let transaction = PendingTransactionRef { submission_id, transaction: tx, priority }; + let transaction = PendingTransaction { submission_id, transaction: tx, priority }; if best.ancestor(&tx_id).is_none() { best.independent.insert(transaction.clone()); } - let transaction = Arc::new(PendingTransaction { transaction }); best.all.insert(tx_id, transaction); } @@ -126,25 +126,32 @@ impl PendingPool { pub(crate) fn all( &self, ) -> impl Iterator>> + '_ { - self.by_id.values().map(|tx| tx.transaction.transaction.clone()) + self.by_id.values().map(|tx| tx.transaction.clone()) } - /// Removes all transactions and their dependent transaction from the subpool that no longer - /// satisfy the given basefee (`tx.fee < basefee`) + /// Updates the pool with the new base fee. Reorders transactions by new priorities. Removes + /// from the subpool all transactions and their dependents that no longer satisfy the given + /// base fee (`tx.fee < base_fee`). /// /// Note: the transactions are not returned in a particular order. - pub(crate) fn enforce_basefee( + /// + /// # Returns + /// + /// Removed transactions that no longer satisfy the base fee. + pub(crate) fn update_base_fee( &mut self, - basefee: u64, + base_fee: u64, ) -> Vec>> { + // Create a collection for txs to remove . let mut to_remove = Vec::new(); + // Iterate over transactions, find the ones we need to remove and update others in place. { - let mut iter = self.by_id.iter().peekable(); + let mut iter = self.by_id.iter_mut().peekable(); while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee as u128 { - // this transaction no longer satisfies the basefee: remove it and all its - // descendants + if tx.transaction.max_fee_per_gas() < base_fee as u128 { + // This transaction no longer satisfies the basefee: remove it and all its + // descendants. to_remove.push(*id); 'this: while let Some((peek, _)) = iter.peek() { if peek.sender != id.sender { @@ -153,6 +160,13 @@ impl PendingPool { to_remove.push(**peek); iter.next(); } + } else { + // Update the transaction with new priority. + let new_priority = + self.ordering.priority(&tx.transaction.transaction, base_fee); + tx.priority = new_priority; + + self.all.insert(tx.clone()); } } } @@ -162,6 +176,10 @@ impl PendingPool { removed.push(self.remove_transaction(&id).expect("transaction exists")); } + // Clear ordered lists since the priority would be changed. + self.independent_transactions.clear(); + self.all.clear(); + removed } @@ -169,7 +187,7 @@ impl PendingPool { /// /// Note: for a transaction with nonce higher than the current on chain nonce this will always /// return an ancestor since all transaction in this pool are gapless. - fn ancestor(&self, id: &TransactionId) -> Option<&Arc>> { + fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { self.by_id.get(&id.unchecked_ancestor()?) } @@ -178,33 +196,34 @@ impl PendingPool { /// # Panics /// /// if the transaction is already included - pub(crate) fn add_transaction(&mut self, tx: Arc>) { + pub(crate) fn add_transaction( + &mut self, + tx: Arc>, + base_fee: u64, + ) { assert!( !self.by_id.contains_key(tx.id()), "transaction already included {:?}", self.by_id.contains_key(tx.id()) ); - let tx_id = *tx.id(); - let submission_id = self.next_id(); - - let priority = self.ordering.priority(&tx.transaction); - // keep track of size self.size_of += tx.size(); - let transaction = PendingTransactionRef { submission_id, transaction: tx, priority }; + let tx_id = *tx.id(); + + let submission_id = self.next_id(); + let priority = self.ordering.priority(&tx.transaction, base_fee); + let tx = PendingTransaction { submission_id, transaction: tx, priority }; // If there's __no__ ancestor in the pool, then this transaction is independent, this is // guaranteed because this pool is gapless. if self.ancestor(&tx_id).is_none() { - self.independent_transactions.insert(transaction.clone()); + self.independent_transactions.insert(tx.clone()); } - self.all.insert(transaction.clone()); - - let transaction = Arc::new(PendingTransaction { transaction }); + self.all.insert(tx.clone()); - self.by_id.insert(tx_id, transaction); + self.by_id.insert(tx_id, tx); } /// Removes a _mined_ transaction from the pool. @@ -216,7 +235,7 @@ impl PendingPool { ) -> Option>> { // mark the next as independent if it exists if let Some(unlocked) = self.by_id.get(&id.descendant()) { - self.independent_transactions.insert(unlocked.transaction.clone()); + self.independent_transactions.insert(unlocked.clone()); }; self.remove_transaction(id) } @@ -229,10 +248,10 @@ impl PendingPool { id: &TransactionId, ) -> Option>> { let tx = self.by_id.remove(id)?; - self.all.remove(&tx.transaction); - self.size_of -= tx.transaction.transaction.size(); - self.independent_transactions.remove(&tx.transaction); - Some(tx.transaction.transaction.clone()) + self.all.remove(&tx); + self.size_of -= tx.transaction.size(); + self.independent_transactions.remove(&tx); + Some(tx.transaction.clone()) } fn next_id(&mut self) -> u64 { @@ -266,34 +285,22 @@ impl PendingPool { /// A transaction that is ready to be included in a block. pub(crate) struct PendingTransaction { - /// Reference to the actual transaction. - pub(crate) transaction: PendingTransactionRef, -} - -impl Clone for PendingTransaction { - fn clone(&self) -> Self { - Self { transaction: self.transaction.clone() } - } -} - -/// A transaction that is ready to be included in a block. -pub(crate) struct PendingTransactionRef { /// Identifier that tags when transaction was submitted in the pool. pub(crate) submission_id: u64, /// Actual transaction. pub(crate) transaction: Arc>, /// The priority value assigned by the used `Ordering` function. - pub(crate) priority: T::Priority, + pub(crate) priority: Priority, } -impl PendingTransactionRef { +impl PendingTransaction { /// The next transaction of the sender: `nonce + 1` pub(crate) fn unlocks(&self) -> TransactionId { self.transaction.transaction_id.descendant() } } -impl Clone for PendingTransactionRef { +impl Clone for PendingTransaction { fn clone(&self) -> Self { Self { submission_id: self.submission_id, @@ -303,21 +310,21 @@ impl Clone for PendingTransactionRef { } } -impl Eq for PendingTransactionRef {} +impl Eq for PendingTransaction {} -impl PartialEq for PendingTransactionRef { +impl PartialEq for PendingTransaction { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } -impl PartialOrd for PendingTransactionRef { +impl PartialOrd for PendingTransaction { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for PendingTransactionRef { +impl Ord for PendingTransaction { fn cmp(&self, other: &Self) -> Ordering { // This compares by `priority` and only if two tx have the exact same priority this compares // the unique `submission_id`. This ensures that transactions with same priority are not @@ -338,15 +345,15 @@ mod tests { let mut f = MockTransactionFactory::default(); let mut pool = PendingPool::new(MockOrdering::default()); let tx = f.validated_arc(MockTransaction::eip1559().inc_price()); - pool.add_transaction(tx.clone()); + pool.add_transaction(tx.clone(), 0); assert!(pool.by_id.contains_key(tx.id())); assert_eq!(pool.len(), 1); - let removed = pool.enforce_basefee(0); + let removed = pool.update_base_fee(0); assert!(removed.is_empty()); - let removed = pool.enforce_basefee((tx.max_fee_per_gas() + 1) as u64); + let removed = pool.update_base_fee((tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 1); assert!(pool.is_empty()); } @@ -357,10 +364,10 @@ mod tests { let mut pool = PendingPool::new(MockOrdering::default()); let t = MockTransaction::eip1559().inc_price_by(10); let root_tx = f.validated_arc(t.clone()); - pool.add_transaction(root_tx.clone()); + pool.add_transaction(root_tx.clone(), 0); let descendant_tx = f.validated_arc(t.inc_nonce().decr_price()); - pool.add_transaction(descendant_tx.clone()); + pool.add_transaction(descendant_tx.clone(), 0); assert!(pool.by_id.contains_key(root_tx.id())); assert!(pool.by_id.contains_key(descendant_tx.id())); @@ -368,14 +375,14 @@ mod tests { assert_eq!(pool.independent_transactions.len(), 1); - let removed = pool.enforce_basefee(0); + let removed = pool.update_base_fee(0); assert!(removed.is_empty()); // two dependent tx in the pool with decreasing fee { let mut pool2 = pool.clone(); - let removed = pool2.enforce_basefee((descendant_tx.max_fee_per_gas() + 1) as u64); + let removed = pool2.update_base_fee((descendant_tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 1); assert_eq!(pool2.len(), 1); // descendant got popped @@ -384,7 +391,7 @@ mod tests { } // remove root transaction via fee - let removed = pool.enforce_basefee((root_tx.max_fee_per_gas() + 1) as u64); + let removed = pool.update_base_fee((root_tx.max_fee_per_gas() + 1) as u64); assert_eq!(removed.len(), 2); assert!(pool.is_empty()); } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index cd0d607387d9..c42ad7dfc014 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -149,7 +149,8 @@ impl TxPool { } Ordering::Greater => { // increased base fee: recheck pending pool and remove all that are no longer valid - for tx in self.pending_pool.enforce_basefee(pending_basefee) { + let removed = self.pending_pool.update_base_fee(pending_basefee); + for tx in removed { let to = { let tx = self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set"); @@ -162,7 +163,8 @@ impl TxPool { } Ordering::Less => { // decreased base fee: recheck basefee pool and promote all that are now valid - for tx in self.basefee_pool.enforce_basefee(pending_basefee) { + let removed = self.basefee_pool.enforce_basefee(pending_basefee); + for tx in removed { let to = { let tx = self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set"); @@ -183,6 +185,7 @@ impl TxPool { let BlockInfo { last_seen_block_hash, last_seen_block_number, pending_basefee } = info; self.all_transactions.last_seen_block_hash = last_seen_block_hash; self.all_transactions.last_seen_block_number = last_seen_block_number; + self.all_transactions.pending_basefee = pending_basefee; self.update_basefee(pending_basefee) } @@ -211,7 +214,10 @@ impl TxPool { // base fee decreased, we need to move transactions from the basefee pool to the // pending pool let unlocked = self.basefee_pool.satisfy_base_fee_transactions(basefee); - Box::new(self.pending_pool.best_with_unlocked(unlocked)) + Box::new( + self.pending_pool + .best_with_unlocked(unlocked, self.all_transactions.pending_basefee), + ) } } } @@ -545,7 +551,7 @@ impl TxPool { self.queued_pool.add_transaction(tx); } SubPool::Pending => { - self.pending_pool.add_transaction(tx); + self.pending_pool.add_transaction(tx, self.all_transactions.pending_basefee); } SubPool::BaseFee => { self.basefee_pool.add_transaction(tx); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 94d305032342..ed38faf49d27 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -4,7 +4,7 @@ use crate::{ identifier::{SenderIdentifiers, TransactionId}, pool::txpool::TxPool, traits::TransactionOrigin, - PoolTransaction, TransactionOrdering, ValidPoolTransaction, + PoolTransaction, Priority, TransactionOrdering, ValidPoolTransaction, }; use paste::paste; use rand::{ @@ -329,17 +329,6 @@ impl PoolTransaction for MockTransaction { } } - fn gas_cost(&self) -> U256 { - match self { - MockTransaction::Legacy { gas_price, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*gas_price) - } - MockTransaction::Eip1559 { max_fee_per_gas, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*max_fee_per_gas) - } - } - } - fn gas_limit(&self) -> u64 { self.get_gas_limit() } @@ -375,6 +364,13 @@ impl PoolTransaction for MockTransaction { Some(fee) } + fn priority_fee_or_price(&self) -> u128 { + match self { + MockTransaction::Legacy { gas_price, .. } => *gas_price, + MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, + } + } + fn kind(&self) -> &TransactionKind { match self { MockTransaction::Legacy { to, .. } => to, @@ -588,11 +584,15 @@ impl MockTransactionFactory { pub struct MockOrdering; impl TransactionOrdering for MockOrdering { - type Priority = U256; + type PriorityValue = U256; type Transaction = MockTransaction; - fn priority(&self, transaction: &Self::Transaction) -> Self::Priority { - transaction.gas_cost() + fn priority( + &self, + transaction: &Self::Transaction, + base_fee: u64, + ) -> Priority { + transaction.effective_tip_per_gas(base_fee).map(U256::from).into() } } @@ -634,5 +634,5 @@ fn test_mock_priority() { let o = MockOrdering; let lo = MockTransaction::eip1559().with_gas_limit(100_000); let hi = lo.next().inc_price(); - assert!(o.priority(&hi) > o.priority(&lo)); + assert!(o.priority(&hi, 0) > o.priority(&lo, 0)); } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c4c1b513a44c..2599b0db0d98 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -470,12 +470,6 @@ pub trait PoolTransaction: /// For legacy transactions: `gas_price * gas_limit + tx_value`. fn cost(&self) -> U256; - /// Returns the gas cost for this transaction. - /// - /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit`. - /// For legacy transactions: `gas_price * gas_limit`. - fn gas_cost(&self) -> U256; - /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64; @@ -497,6 +491,10 @@ pub trait PoolTransaction: /// For legacy transactions: `gas_price - base_fee`. fn effective_tip_per_gas(&self, base_fee: u64) -> Option; + /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and + /// otherwise returns the gas price. + fn priority_fee_or_price(&self) -> u128; + /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or /// [`TransactionKind::Create`] if the transaction is a contract creation. fn kind(&self) -> &TransactionKind; @@ -531,10 +529,6 @@ pub struct PooledTransaction { /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. pub(crate) cost: U256, - - /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit`. - /// For legacy transactions: `gas_price * gas_limit`. - pub(crate) gas_cost: U256, } impl PooledTransaction { @@ -568,14 +562,6 @@ impl PoolTransaction for PooledTransaction { self.cost } - /// Returns the gas cost for this transaction. - /// - /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. - /// For legacy transactions: `gas_price * gas_limit + tx_value`. - fn gas_cost(&self) -> U256 { - self.gas_cost - } - /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64 { self.transaction.gas_limit() @@ -613,6 +599,12 @@ impl PoolTransaction for PooledTransaction { self.transaction.effective_tip_per_gas(base_fee) } + /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and + /// otherwise returns the gas price. + fn priority_fee_or_price(&self) -> u128 { + self.transaction.priority_fee_or_price() + } + /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or /// [`TransactionKind::Create`] if the transaction is a contract creation. fn kind(&self) -> &TransactionKind { @@ -649,7 +641,7 @@ impl FromRecoveredTransaction for PooledTransaction { }; let cost = gas_cost + U256::from(tx.value()); - PooledTransaction { transaction: tx, cost, gas_cost } + PooledTransaction { transaction: tx, cost } } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 3b374e5ab811..78c904aa0c10 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -164,14 +164,6 @@ impl ValidPoolTransaction { self.transaction.cost() } - /// Returns the effective tip for this transaction. - /// - /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit`. - /// For legacy transactions: `gas_price * gas_limit`. - pub fn gas_cost(&self) -> U256 { - self.transaction.gas_cost() - } - /// Returns the EIP-1559 Max base fee the caller is willing to pay. /// /// For legacy transactions this is `gas_price`. @@ -187,6 +179,12 @@ impl ValidPoolTransaction { self.transaction.effective_tip_per_gas(base_fee) } + /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and + /// otherwise returns the gas price. + pub fn priority_fee_or_price(&self) -> u128 { + self.transaction.priority_fee_or_price() + } + /// Maximum amount of gas that the transaction is allowed to consume. pub fn gas_limit(&self) -> u64 { self.transaction.gas_limit() diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index d85426d1fa6f..afabcb17f1af 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -10,7 +10,7 @@ use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ - GasCostOrdering, PoolTransaction, PooledTransaction, TransactionOrigin, TransactionPool, + CoinbaseTipOrdering, PoolTransaction, PooledTransaction, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, }; @@ -24,7 +24,7 @@ async fn main() -> eyre::Result<()> { let pool = reth_transaction_pool::Pool::new( OkValidator::default(), - GasCostOrdering::default(), + CoinbaseTipOrdering::default(), Default::default(), ); From efd4895c487b6e631def905ef4f54825a521ab82 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 28 Jul 2023 05:39:46 -0400 Subject: [PATCH 278/722] chore(rpc-types): remove EngineRpcError (#3975) --- crates/rpc/rpc-types/src/eth/engine/error.rs | 58 -------------------- crates/rpc/rpc-types/src/eth/engine/mod.rs | 3 +- 2 files changed, 1 insertion(+), 60 deletions(-) delete mode 100644 crates/rpc/rpc-types/src/eth/engine/error.rs diff --git a/crates/rpc/rpc-types/src/eth/engine/error.rs b/crates/rpc/rpc-types/src/eth/engine/error.rs deleted file mode 100644 index 36feba4dfd88..000000000000 --- a/crates/rpc/rpc-types/src/eth/engine/error.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Commonly used errors for the `engine_` namespace. - -/// List of Engine API errors used in RPC, see -/// -/// Note: These are all errors that can be returned by the `engine_` namespace in the error case. -/// -/// TODO: get rid of this -#[derive(Debug, Copy, PartialEq, Eq, Clone, thiserror::Error)] -pub enum EngineRpcError { - /// Invalid JSON was received by the server. - #[error("Invalid JSON was received by the server")] - ParseError, - /// The JSON sent is not a valid Request object. - #[error("The JSON sent is not a valid Request object")] - InvalidRequest, - /// The method does not exist / is not available. - #[error("The method does not exist / is not available")] - MethodNotFound, - /// Invalid method parameter(s). - #[error("Invalid method parameter(s)")] - InvalidParams, - /// Internal JSON-RPC error. - #[error("Internal JSON-RPC error")] - InternalError, - /// Generic client error while processing request. - #[error("Server error")] - ServerError, - /// Payload does not exist / is not available. - #[error("Unknown payload")] - UnknownPayload, - /// Forkchoice state is invalid / inconsistent. - #[error("Invalid forkchoice state")] - InvalidForkchoiceState, - /// Payload attributes are invalid / inconsistent. - #[error("Invalid payload attributes")] - InvalidPayloadAttributes, - /// Number of requested entities is too large. - #[error("Too large request")] - TooLargeRequest, -} - -impl EngineRpcError { - /// Returns the error code as `i32` - pub const fn code(&self) -> i32 { - match *self { - EngineRpcError::ParseError => -32700, - EngineRpcError::InvalidRequest => -32600, - EngineRpcError::MethodNotFound => -32601, - EngineRpcError::InvalidParams => -32602, - EngineRpcError::InternalError => -32603, - EngineRpcError::ServerError => -32000, - EngineRpcError::UnknownPayload => -38001, - EngineRpcError::InvalidForkchoiceState => -38002, - EngineRpcError::InvalidPayloadAttributes => -38003, - EngineRpcError::TooLargeRequest => -38004, - } - } -} diff --git a/crates/rpc/rpc-types/src/eth/engine/mod.rs b/crates/rpc/rpc-types/src/eth/engine/mod.rs index 5212588decca..2a814374fb26 100644 --- a/crates/rpc/rpc-types/src/eth/engine/mod.rs +++ b/crates/rpc/rpc-types/src/eth/engine/mod.rs @@ -2,12 +2,11 @@ #![allow(missing_docs)] -mod error; mod forkchoice; mod payload; mod transition; -pub use self::{error::*, forkchoice::*, payload::*, transition::*}; +pub use self::{forkchoice::*, payload::*, transition::*}; /// The list of supported Engine capabilities pub const CAPABILITIES: [&str; 9] = [ From 4d31b3ae613707ffbe22db38f5fcac06a7ca90b0 Mon Sep 17 00:00:00 2001 From: abdullathedruid Date: Fri, 28 Jul 2023 10:45:07 +0100 Subject: [PATCH 279/722] chore: update link to contributing/code of conduct in bug report (#3978) --- .github/ISSUE_TEMPLATE/bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index d17ebe3c1765..76e9bfcaaa2c 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -76,7 +76,7 @@ body: id: terms attributes: label: Code of Conduct - description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/foundry-rs/reth/blob/main/CODE_OF_CONDUCT.md) + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md#code-of-conduct) options: - label: I agree to follow the Code of Conduct required: true From d2cdd10ed268737c6a3c86321c8d6e320af9b179 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Altu=C4=9F=20Bakan?= Date: Fri, 28 Jul 2023 11:47:15 +0200 Subject: [PATCH 280/722] Add price bump config (#3967) --- bin/reth/src/args/txpool_args.rs | 7 +++++- crates/transaction-pool/src/config.rs | 27 ++++++++++++++++++++++ crates/transaction-pool/src/lib.rs | 8 +++---- crates/transaction-pool/src/pool/txpool.rs | 5 ++-- 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/bin/reth/src/args/txpool_args.rs b/bin/reth/src/args/txpool_args.rs index d1bd2326b692..2edfb20af698 100644 --- a/bin/reth/src/args/txpool_args.rs +++ b/bin/reth/src/args/txpool_args.rs @@ -2,7 +2,7 @@ use clap::Args; use reth_transaction_pool::{ - PoolConfig, SubPoolLimit, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + PoolConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; @@ -33,6 +33,10 @@ pub struct TxPoolArgs { /// Max number of executable transaction slots guaranteed per account #[arg(long = "txpool.max_account_slots", help_heading = "TxPool", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, + + /// Price bump (in %) for the transaction pool underpriced check. + #[arg(long = "txpool.price_bump", help_heading = "TxPool", default_value_t = DEFAULT_PRICE_BUMP)] + pub price_bump: u128, } impl TxPoolArgs { @@ -52,6 +56,7 @@ impl TxPoolArgs { max_size: self.queued_max_size * 1024 * 1024, }, max_account_slots: self.max_account_slots, + price_bump: self.price_bump, } } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index faf0c0156b41..aa6896305b40 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -7,6 +7,12 @@ pub const TXPOOL_SUBPOOL_MAX_TXS_DEFAULT: usize = 10_000; /// The default maximum allowed size of the given subpool. pub const TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT: usize = 20; +/// Default price bump (in %) for the transaction pool underpriced check. +pub const DEFAULT_PRICE_BUMP: u128 = 10; + +/// Replace blob price bump (in %) for the transaction pool underpriced check. +pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100; + /// Configuration options for the Transaction pool. #[derive(Debug, Clone)] pub struct PoolConfig { @@ -18,6 +24,8 @@ pub struct PoolConfig { pub queued_limit: SubPoolLimit, /// Max number of executable transaction slots guaranteed per account pub max_account_slots: usize, + /// Price bump (in %) for the transaction pool underpriced check. + pub price_bump: u128, } impl Default for PoolConfig { @@ -27,6 +35,7 @@ impl Default for PoolConfig { basefee_limit: Default::default(), queued_limit: Default::default(), max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + price_bump: PriceBumpConfig::default().default_price_bump, } } } @@ -57,3 +66,21 @@ impl Default for SubPoolLimit { } } } + +/// Price bump config (in %) for the transaction pool underpriced check. +#[derive(Debug, Clone)] +pub struct PriceBumpConfig { + /// Default price bump (in %) for the transaction pool underpriced check. + pub default_price_bump: u128, + /// Replace blob price bump (in %) for the transaction pool underpriced check. + pub replace_blob_tx_price_bump: u128, +} + +impl Default for PriceBumpConfig { + fn default() -> Self { + Self { + default_price_bump: DEFAULT_PRICE_BUMP, + replace_blob_tx_price_bump: REPLACE_BLOB_PRICE_BUMP, + } + } +} diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 768bb0b5cd09..104be7d9a249 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -153,8 +153,9 @@ use tracing::{instrument, trace}; pub use crate::{ config::{ - PoolConfig, SubPoolLimit, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, - TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, + TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }, error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, @@ -207,9 +208,6 @@ pub(crate) const MAX_CODE_SIZE: usize = 24576; // Maximum initcode to permit in a creation transaction and create instructions pub(crate) const MAX_INIT_CODE_SIZE: usize = 2 * MAX_CODE_SIZE; -// Price bump (in %) for the transaction pool underpriced check -pub(crate) const PRICE_BUMP: u128 = 10; - /// A shareable, generic, customizable `TransactionPool` implementation. #[derive(Debug)] pub struct Pool { diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index c42ad7dfc014..292206998766 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -13,8 +13,7 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, TransactionOrdering, ValidPoolTransaction, PRICE_BUMP, - U256, + PoolConfig, PoolResult, PoolTransaction, TransactionOrdering, ValidPoolTransaction, U256, }; use fnv::FnvHashMap; use reth_primitives::{ @@ -1111,7 +1110,7 @@ impl AllTransactions { if Self::is_underpriced( transaction.as_ref(), entry.get().transaction.as_ref(), - PRICE_BUMP, + PoolConfig::default().price_bump, ) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, From 0892833842c3e7f5c4efeb725cafc190b8dc20b0 Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Fri, 28 Jul 2023 13:00:37 +0200 Subject: [PATCH 281/722] refactor(rpc): simplify the inner definitions of topics & address filters (#3876) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/primitives/Cargo.toml | 2 +- crates/rpc/rpc-types/Cargo.toml | 1 + crates/rpc/rpc-types/src/eth/filter.rs | 643 +++++++++++-------------- crates/rpc/rpc-types/src/eth/pubsub.rs | 2 +- crates/rpc/rpc/src/eth/filter.rs | 4 +- examples/db-access.rs | 4 +- 7 files changed, 276 insertions(+), 381 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f41fa41feacb..4f6843560f4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5792,6 +5792,7 @@ dependencies = [ name = "reth-rpc-types" version = "0.1.0-alpha.4" dependencies = [ + "itertools 0.10.5", "jsonrpsee-types", "rand 0.8.5", "reth-primitives", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index cd4913698e21..d181a462f1f3 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -22,7 +22,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" ruint = { version = "1.9.0", features = ["primitive-types", "rlp"] } # Bloom -fixed-hash = { version = "0.8", default-features = false, features = ["rustc-hex"] } + fixed-hash = { version = "0.8", default-features = false, features = ["rustc-hex"] } # crypto secp256k1 = { workspace = true, default-features = false, features = [ diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 71c1b80be372..0ebeeff0cefb 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -19,6 +19,7 @@ reth-rlp.workspace = true thiserror.workspace = true # misc +itertools = "0.10" serde = { workspace = true, features = ["derive"] } serde_json.workspace = true jsonrpsee-types = { version = "0.18" } diff --git a/crates/rpc/rpc-types/src/eth/filter.rs b/crates/rpc/rpc-types/src/eth/filter.rs index d4b6365ccc1c..29ca1f4124d7 100644 --- a/crates/rpc/rpc-types/src/eth/filter.rs +++ b/crates/rpc/rpc-types/src/eth/filter.rs @@ -1,4 +1,5 @@ use crate::Log as RpcLog; +use itertools::{EitherOrBoth::*, Itertools}; use jsonrpsee_types::SubscriptionId; use reth_primitives::{ bloom::{Bloom, Input}, @@ -9,13 +10,120 @@ use serde::{ ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; -use std::ops::{Range, RangeFrom, RangeTo}; +use std::{ + collections::HashSet, + hash::Hash, + ops::{Range, RangeFrom, RangeTo}, +}; /// Helper type to represent a bloom filter used for matching logs. -pub type BloomFilter = Vec>; +#[derive(Default, Debug)] +pub struct BloomFilter(Vec); + +impl From> for BloomFilter { + fn from(src: Vec) -> Self { + BloomFilter(src) + } +} + +impl BloomFilter { + /// Returns whether the given bloom matches the list of Blooms in the current filter. + /// If the filter is empty (the list is empty), then any bloom matches + /// Otherwise, there must be at least one matche for the BloomFilter to match. + pub fn matches(&self, bloom: Bloom) -> bool { + self.0.is_empty() || self.0.iter().any(|a| bloom.contains_bloom(a)) + } +} + +#[derive(Default, Debug, PartialEq, Eq, Clone, Deserialize)] +/// FilterSet is a set of values that will be used to filter logs +pub struct FilterSet(HashSet); + +impl From for FilterSet { + fn from(src: T) -> Self { + FilterSet(HashSet::from([src])) + } +} + +impl From> for FilterSet { + fn from(src: Vec) -> Self { + FilterSet(HashSet::from_iter(src.into_iter().map(Into::into))) + } +} + +impl From> for FilterSet { + fn from(src: ValueOrArray) -> Self { + match src { + ValueOrArray::Value(val) => val.into(), + ValueOrArray::Array(arr) => arr.into(), + } + } +} + +impl From>> for FilterSet { + fn from(src: ValueOrArray>) -> Self { + match src { + ValueOrArray::Value(None) => FilterSet(HashSet::new()), + ValueOrArray::Value(Some(val)) => val.into(), + ValueOrArray::Array(arr) => { + // If the array contains at least one `null` (ie. None), as it's considered + // a "wildcard" value, the whole filter should be treated as matching everything, + // thus is empty. + if arr.iter().contains(&None) { + FilterSet(HashSet::new()) + } else { + // Otherwise, we flatten the array, knowing there are no `None` values + arr.into_iter().flatten().collect::>().into() + } + } + } + } +} + +impl FilterSet { + /// Returns wheter the filter is empty + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns whether the given value matches the filter. It the filter is empty + /// any value matches. Otherwise, the filter must include the value + pub fn matches(&self, value: &T) -> bool { + self.is_empty() || self.0.contains(value) + } +} + +impl + Eq + Hash> FilterSet { + /// Returns a list of Bloom (BloomFilter) corresponding to the filter's values + pub fn to_bloom_filter(&self) -> BloomFilter { + self.0.iter().map(|a| Input::Raw(a.as_ref()).into()).collect::>().into() + } +} + +impl FilterSet { + /// Returns a ValueOrArray inside an Option, so that: + /// - If the filter is empty, it returns None + /// - If the filter has only 1 value, it returns the single value + /// - Otherwise it returns an array of values + /// This should be useful for serialization + pub fn to_value_or_array(&self) -> Option> { + let mut values = self.0.iter().cloned().collect::>(); + match values.len() { + 0 => None, + 1 => Some(ValueOrArray::Value(values.pop().expect("values length is one"))), + _ => Some(ValueOrArray::Array(values)), + } + } +} /// A single topic -pub type Topic = ValueOrArray>; +pub type Topic = FilterSet; + +impl From for Topic { + fn from(src: U256) -> Self { + Into::::into(src).into() + } +} /// Represents the target range of blocks for the filter #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -141,16 +249,16 @@ impl FilterBlockOption { } /// Filter for -#[derive(Default, Debug, PartialEq, Eq, Clone, Hash)] +#[derive(Default, Debug, PartialEq, Eq, Clone)] pub struct Filter { /// Filter block options, specifying on which blocks the filter should /// match. // https://eips.ethereum.org/EIPS/eip-234 pub block_option: FilterBlockOption, /// Address - pub address: Option>, + pub address: FilterSet
, /// Topics (maxmimum of 4) - pub topics: [Option; 4], + pub topics: [Topic; 4], } impl Filter { @@ -279,7 +387,7 @@ impl Filter { /// ``` #[must_use] pub fn address>>(mut self, address: T) -> Self { - self.address = Some(address.into()); + self.address = address.into().into(); self } @@ -300,28 +408,28 @@ impl Filter { /// Sets topic0 (the event name for non-anonymous events) #[must_use] pub fn topic0>(mut self, topic: T) -> Self { - self.topics[0] = Some(topic.into()); + self.topics[0] = topic.into(); self } /// Sets the 1st indexed topic #[must_use] pub fn topic1>(mut self, topic: T) -> Self { - self.topics[1] = Some(topic.into()); + self.topics[1] = topic.into(); self } /// Sets the 2nd indexed topic #[must_use] pub fn topic2>(mut self, topic: T) -> Self { - self.topics[2] = Some(topic.into()); + self.topics[2] = topic.into(); self } /// Sets the 3rd indexed topic #[must_use] pub fn topic3>(mut self, topic: T) -> Self { - self.topics[3] = Some(topic.into()); + self.topics[3] = topic.into(); self } @@ -348,64 +456,9 @@ impl Filter { } } - /// Flattens the topics using the cartesian product - fn flatten(&self) -> Vec>> { - fn cartesian(lists: &[Vec>]) -> Vec>> { - let mut res = Vec::new(); - let mut list_iter = lists.iter(); - if let Some(first_list) = list_iter.next() { - for &i in first_list { - res.push(vec![i]); - } - } - for l in list_iter { - let mut tmp = Vec::new(); - for r in res { - for &el in l { - let mut tmp_el = r.clone(); - tmp_el.push(el); - tmp.push(tmp_el); - } - } - res = tmp; - } - res - } - let mut out = Vec::new(); - let mut tmp = Vec::new(); - for v in self.topics.iter() { - let v = if let Some(v) = v { - match v { - ValueOrArray::Value(s) => { - vec![*s] - } - ValueOrArray::Array(s) => { - if s.is_empty() { - vec![None] - } else { - s.clone() - } - } - } - } else { - vec![None] - }; - tmp.push(v); - } - for v in cartesian(&tmp) { - out.push(ValueOrArray::Array(v)); - } - out - } - - /// Returns an iterator over all existing topics - pub fn topics(&self) -> impl Iterator + '_ { - self.topics.iter().flatten() - } - /// Returns true if at least one topic is set pub fn has_topics(&self) -> bool { - self.topics.iter().any(|t| t.is_some()) + self.topics.iter().any(|t| !t.is_empty()) } } @@ -429,27 +482,28 @@ impl Serialize for Filter { FilterBlockOption::AtBlockHash(ref h) => s.serialize_field("blockHash", h)?, } - if let Some(ref address) = self.address { - s.serialize_field("address", address)?; + if let Some(address) = self.address.to_value_or_array() { + s.serialize_field("address", &address)?; } let mut filtered_topics = Vec::new(); - for i in 0..4 { - if self.topics[i].is_some() { - filtered_topics.push(&self.topics[i]); - } else { - // TODO: This can be optimized - if self.topics[i + 1..].iter().any(|x| x.is_some()) { - filtered_topics.push(&None); - } + let mut filtered_topics_len = 0; + for (i, topic) in self.topics.iter().enumerate() { + if !topic.is_empty() { + filtered_topics_len = i + 1; } + filtered_topics.push(topic.to_value_or_array()); } + filtered_topics.truncate(filtered_topics_len); s.serialize_field("topics", &filtered_topics)?; s.end() } } +type RawAddressFilter = ValueOrArray>; +type RawTopicsFilter = Vec>>>; + impl<'de> Deserialize<'de> for Filter { fn deserialize(deserializer: D) -> Result where @@ -471,8 +525,8 @@ impl<'de> Deserialize<'de> for Filter { let mut from_block: Option> = None; let mut to_block: Option> = None; let mut block_hash: Option> = None; - let mut address: Option>> = None; - let mut topics: Option>>> = None; + let mut address: Option> = None; + let mut topics: Option> = None; while let Some(key) = map.next_key::()? { match key.as_str() { @@ -534,16 +588,21 @@ impl<'de> Deserialize<'de> for Filter { let from_block = from_block.unwrap_or_default(); let to_block = to_block.unwrap_or_default(); let block_hash = block_hash.unwrap_or_default(); - let address = address.unwrap_or_default(); + let address = address.flatten().map(|a| a.into()).unwrap_or_default(); let topics_vec = topics.flatten().unwrap_or_default(); // maximum allowed filter len if topics_vec.len() > 4 { return Err(serde::de::Error::custom("exceeded maximum topics len")) } - let mut topics: [Option; 4] = [None, None, None, None]; + let mut topics: [Topic; 4] = [ + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ]; for (idx, topic) in topics_vec.into_iter().enumerate() { - topics[idx] = topic; + topics[idx] = topic.map(|t| t.into()).unwrap_or_default(); } let block_option = if let Some(block_hash) = block_hash { @@ -581,47 +640,12 @@ impl From> for ValueOrArray { } } -impl From for Topic { - fn from(src: H256) -> Self { - ValueOrArray::Value(Some(src)) - } -} - impl From> for ValueOrArray { fn from(src: Vec) -> Self { ValueOrArray::Array(src) } } -impl From> for Topic { - fn from(src: ValueOrArray) -> Self { - match src { - ValueOrArray::Value(val) => ValueOrArray::Value(Some(val)), - ValueOrArray::Array(arr) => arr.into(), - } - } -} - -impl> From> for Topic { - fn from(src: Vec) -> Self { - ValueOrArray::Array(src.into_iter().map(Into::into).map(Some).collect()) - } -} - -impl From
for Topic { - fn from(src: Address) -> Self { - let mut bytes = [0; 32]; - bytes[12..32].copy_from_slice(src.as_bytes()); - ValueOrArray::Value(Some(H256::from(bytes))) - } -} - -impl From for Topic { - fn from(src: U256) -> Self { - ValueOrArray::Value(Some(src.into())) - } -} - impl Serialize for ValueOrArray where T: Serialize, @@ -672,8 +696,6 @@ where pub struct FilteredParams { /// The original filter, if any pub filter: Option, - /// Flattened topics of the `filter` used to determine if the the filter matches a log. - pub flat_topics: Vec>>, } impl FilteredParams { @@ -681,86 +703,43 @@ impl FilteredParams { /// for matching pub fn new(filter: Option) -> Self { if let Some(filter) = filter { - let flat_topics = filter.flatten(); - FilteredParams { filter: Some(filter), flat_topics } + FilteredParams { filter: Some(filter) } } else { Default::default() } } /// Returns the [BloomFilter] for the given address - pub fn address_filter(address: &Option>) -> BloomFilter { - address.as_ref().map(address_to_bloom_filter).unwrap_or_default() + pub fn address_filter(address: &FilterSet
) -> BloomFilter { + address.to_bloom_filter() } /// Returns the [BloomFilter] for the given topics - pub fn topics_filter(topics: &Option>>>) -> Vec { - let mut output = Vec::new(); - if let Some(topics) = topics { - output.extend(topics.iter().map(topics_to_bloom_filter)); - } - output + pub fn topics_filter(topics: &[FilterSet]) -> Vec { + topics.iter().map(|t| t.to_bloom_filter()).collect() } /// Returns `true` if the bloom matches the topics - pub fn matches_topics(bloom: Bloom, topic_filters: &[BloomFilter]) -> bool { + pub fn matches_topics(bloom: Bloom, topic_filters: &Vec) -> bool { if topic_filters.is_empty() { return true } - // returns true if a filter matches + // for each filter, iterate through the list of filter blooms. for each set of filter + // (each BloomFilter), the given `bloom` must match at least one of them, unless the list is + // empty (no filters). for filter in topic_filters.iter() { - let mut is_match = false; - for maybe_bloom in filter { - is_match = maybe_bloom.as_ref().map(|b| bloom.contains_bloom(b)).unwrap_or(true); - if !is_match { - break - } - } - if is_match { - return true + if !filter.matches(bloom) { + return false } } - false + true } - /// Returns `true` if the bloom contains the address + /// Returns `true` if the bloom contains one of the address blooms, or the address blooms + /// list is empty (thus, no filters) pub fn matches_address(bloom: Bloom, address_filter: &BloomFilter) -> bool { - if address_filter.is_empty() { - return true - } else { - for maybe_bloom in address_filter { - if maybe_bloom.as_ref().map(|b| bloom.contains_bloom(b)).unwrap_or(true) { - return true - } - } - } - false - } - - /// Replace None values - aka wildcards - for the log input value in that position. - pub fn replace(&self, log: &Log, topic: Topic) -> Option> { - let mut out: Vec = Vec::new(); - match topic { - ValueOrArray::Value(value) => { - if let Some(value) = value { - out.push(value); - } - } - ValueOrArray::Array(value) => { - for (k, v) in value.into_iter().enumerate() { - if let Some(v) = v { - out.push(v); - } else { - out.push(log.topics[k]); - } - } - } - }; - if out.is_empty() { - return None - } - Some(out) + address_filter.matches(bloom) } /// Returns true if the filter matches the given block number @@ -805,117 +784,37 @@ impl FilteredParams { /// Returns `true` if the filter matches the given log. pub fn filter_address(&self, log: &Log) -> bool { - if let Some(input_address) = &self.filter.as_ref().and_then(|f| f.address.clone()) { - match input_address { - ValueOrArray::Value(x) => { - if log.address != *x { - return false - } - } - ValueOrArray::Array(x) => { - if x.is_empty() { - return true - } - if !x.contains(&log.address) { - return false - } - } - } - } - true + self.filter.as_ref().map(|f| f.address.matches(&log.address)).unwrap_or(true) } - /// Returns `true` if the log matches any topic + /// Returns `true` if the log matches the filter's topics pub fn filter_topics(&self, log: &Log) -> bool { - let mut out: bool = true; - for topic in self.flat_topics.iter().cloned() { - match topic { - ValueOrArray::Value(single) => { - if let Some(single) = single { - if !log.topics.starts_with(&[single]) { - out = false; - } - } - } - ValueOrArray::Array(multi) => { - if multi.is_empty() { - out = true; - continue - } - // Shrink the topics until the last item is Some. - let mut new_multi = multi; - while new_multi.iter().last().unwrap_or(&Some(H256::default())).is_none() { - new_multi.pop(); - } - // We can discard right away any logs with lesser topics than the filter. - if new_multi.len() > log.topics.len() { - out = false; - break - } - let replaced: Option> = - self.replace(log, ValueOrArray::Array(new_multi)); - if let Some(replaced) = replaced { - out = false; - if log.topics.starts_with(&replaced[..]) { - out = true; - break - } + let topics = match self.filter.as_ref() { + None => return true, + Some(f) => &f.topics, + }; + for topic_tuple in topics.iter().zip_longest(log.topics.iter()) { + match topic_tuple { + // We exhausted the `log.topics`, so if there's a filter set for + // this topic index, there is no match. Otherwise (empty filter), continue. + Left(filter_topic) => { + if !filter_topic.is_empty() { + return false } } - } - } - out - } -} - -fn topics_to_bloom_filter(topics: &ValueOrArray>) -> BloomFilter { - let mut blooms = BloomFilter::new(); - match topics { - ValueOrArray::Value(topic) => { - if let Some(topic) = topic { - let bloom: Bloom = Input::Raw(topic.as_ref()).into(); - blooms.push(Some(bloom)); - } else { - blooms.push(None); - } - } - ValueOrArray::Array(topics) => { - if topics.is_empty() { - blooms.push(None); - } else { - for topic in topics.iter() { - if let Some(topic) = topic { - let bloom: Bloom = Input::Raw(topic.as_ref()).into(); - blooms.push(Some(bloom)); - } else { - blooms.push(None); + // We exhausted the filter topics, therefore any subsequent log topic + // will match. + Right(_) => return true, + // Check that `log_topic` is included in `filter_topic` + Both(filter_topic, log_topic) => { + if !filter_topic.matches(log_topic) { + return false } } } } + true } - blooms -} - -fn address_to_bloom_filter(address: &ValueOrArray
) -> BloomFilter { - let mut blooms = BloomFilter::new(); - match address { - ValueOrArray::Value(address) => { - let bloom: Bloom = Input::Raw(address.as_ref()).into(); - blooms.push(Some(bloom)) - } - ValueOrArray::Array(addresses) => { - if addresses.is_empty() { - blooms.push(None); - } else { - for address in addresses.iter() { - let bloom: Bloom = Input::Raw(address.as_ref()).into(); - blooms.push(Some(bloom)); - } - } - } - } - blooms } /// Response of the `eth_getFilterChanges` RPC. @@ -1007,6 +906,7 @@ impl From> for FilterId { #[cfg(test)] mod tests { use super::*; + use reth_primitives::U256; use serde_json::json; fn serialize(t: &T) -> serde_json::Value { @@ -1020,40 +920,36 @@ mod tests { similar_asserts::assert_eq!( filter.topics, [ - Some(ValueOrArray::Array(vec![Some( - "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" - .parse() - .unwrap() - ),])), - Some(ValueOrArray::Array(vec![])), - Some(ValueOrArray::Array(vec![Some( - "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778" - .parse() - .unwrap() - )])), - None + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" + .parse::() + .unwrap() + .into(), + Default::default(), + "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778" + .parse::() + .unwrap() + .into(), + Default::default(), ] ); + } - let filtered_params = FilteredParams::new(Some(filter)); - let topics = filtered_params.flat_topics; - assert_eq!( - topics, - vec![ValueOrArray::Array(vec![ - Some( - "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" - .parse() - .unwrap() - ), - None, - Some( - "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778" - .parse() - .unwrap() - ), - None - ])] - ) + #[test] + fn test_filter_topics_middle_wildcard() { + let s = r#"{"fromBlock": "0xfc359e", "toBlock": "0xfc359e", "topics": [["0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925"], [], [null, "0x0000000000000000000000000c17e776cd218252adfca8d4e761d3fe757e9778"]]}"#; + let filter = serde_json::from_str::(s).unwrap(); + similar_asserts::assert_eq!( + filter.topics, + [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" + .parse::() + .unwrap() + .into(), + Default::default(), + Default::default(), + Default::default(), + ] + ); } #[test] @@ -1076,11 +972,13 @@ mod tests { #[test] fn filter_serialization_test() { - let t1 = "9729a6fbefefc8f6005933898b13dc45c3a2c8b7".parse::
().unwrap(); + let t1 = "0000000000000000000000009729a6fbefefc8f6005933898b13dc45c3a2c8b7" + .parse::() + .unwrap(); let t2 = H256::from([0; 32]); let t3 = U256::from(123); - let t1_padded = H256::from(t1); + let t1_padded = t1; let t3_padded = H256::from({ let mut x = [0; 32]; x[31] = 123; @@ -1143,24 +1041,17 @@ mod tests { block_bloom } - fn topic_filter( - topic1: H256, - topic2: H256, - topic3: H256, - ) -> (Filter, Option>>>) { - let filter = Filter { + fn topic_filter(topic1: H256, topic2: H256, topic3: H256) -> Filter { + Filter { block_option: Default::default(), - address: None, + address: Default::default(), topics: [ - Some(ValueOrArray::Value(Some(topic1))), - Some(ValueOrArray::Array(vec![Some(topic2), Some(topic3)])), - None, - None, + topic1.into(), + vec![topic2, topic3].into(), + Default::default(), + Default::default(), ], - }; - let filtered_params = FilteredParams::new(Some(filter.clone())); - - (filter, Some(filtered_params.flat_topics)) + } } #[test] @@ -1169,7 +1060,7 @@ mod tests { let topic2 = H256::random(); let topic3 = H256::random(); - let (_, topics) = topic_filter(topic1, topic2, topic3); + let topics = topic_filter(topic1, topic2, topic3).topics; let topics_bloom = FilteredParams::topics_filter(&topics); assert!(!FilteredParams::matches_topics( build_bloom(Address::random(), H256::random(), H256::random()), @@ -1183,7 +1074,7 @@ mod tests { let topic2 = H256::random(); let topic3 = H256::random(); - let (_, topics) = topic_filter(topic1, topic2, topic3); + let topics = topic_filter(topic1, topic2, topic3).topics; let _topics_bloom = FilteredParams::topics_filter(&topics); let topics_bloom = FilteredParams::topics_filter(&topics); @@ -1195,11 +1086,12 @@ mod tests { #[test] fn can_match_empty_topics() { - let filter = - Filter { block_option: Default::default(), address: None, topics: Default::default() }; - - let filtered_params = FilteredParams::new(Some(filter)); - let topics = Some(filtered_params.flat_topics); + let filter = Filter { + block_option: Default::default(), + address: Default::default(), + topics: Default::default(), + }; + let topics = filter.topics; let topics_bloom = FilteredParams::topics_filter(&topics); assert!(FilteredParams::matches_topics( @@ -1217,16 +1109,16 @@ mod tests { let filter = Filter { block_option: Default::default(), - address: Some(ValueOrArray::Value(rng_address)), + address: rng_address.into(), topics: [ - Some(ValueOrArray::Value(Some(topic1))), - Some(ValueOrArray::Array(vec![Some(topic2), Some(topic3)])), - None, - None, + topic1.into(), + vec![topic2, topic3].into(), + Default::default(), + Default::default(), ], }; - let filtered_params = FilteredParams::new(Some(filter.clone())); - let topics = Some(filtered_params.flat_topics); + let topics = filter.topics; + let address_filter = FilteredParams::address_filter(&filter.address); let topics_filter = FilteredParams::topics_filter(&topics); assert!( @@ -1248,11 +1140,16 @@ mod tests { let filter = Filter { block_option: Default::default(), - address: None, - topics: [None, Some(ValueOrArray::Array(vec![Some(topic2), Some(topic3)])), None, None], + address: Default::default(), + topics: [ + Default::default(), + vec![topic2, topic3].into(), + Default::default(), + Default::default(), + ], }; - let filtered_params = FilteredParams::new(Some(filter)); - let topics = Some(filtered_params.flat_topics); + let topics = filter.topics; + let topics_bloom = FilteredParams::topics_filter(&topics); assert!(FilteredParams::matches_topics( build_bloom(Address::random(), topic1, topic2), @@ -1264,16 +1161,16 @@ mod tests { fn can_match_topics_wildcard_mismatch() { let filter = Filter { block_option: Default::default(), - address: None, + address: Default::default(), topics: [ - None, - Some(ValueOrArray::Array(vec![Some(H256::random()), Some(H256::random())])), - None, - None, + Default::default(), + vec![H256::random(), H256::random()].into(), + Default::default(), + Default::default(), ], }; - let filtered_params = FilteredParams::new(Some(filter)); - let topics_input = Some(filtered_params.flat_topics); + let topics_input = filter.topics; + let topics_bloom = FilteredParams::topics_filter(&topics_input); assert!(!FilteredParams::matches_topics( build_bloom(Address::random(), H256::random(), H256::random()), @@ -1286,7 +1183,7 @@ mod tests { let rng_address = Address::random(); let filter = Filter { block_option: Default::default(), - address: Some(ValueOrArray::Value(rng_address)), + address: rng_address.into(), topics: Default::default(), }; let address_bloom = FilteredParams::address_filter(&filter.address); @@ -1302,7 +1199,7 @@ mod tests { let rng_address = Address::random(); let filter = Filter { block_option: Default::default(), - address: Some(ValueOrArray::Value(rng_address)), + address: rng_address.into(), topics: Default::default(), }; let address_bloom = FilteredParams::address_filter(&filter.address); @@ -1335,26 +1232,24 @@ mod tests { from_block: Some(4365627u64.into()), to_block: Some(4365627u64.into()), }, - address: Some(ValueOrArray::Value( - "0xb59f67a8bff5d8cd03f6ac17265c550ed8f33907".parse().unwrap() - )), + address: "0xb59f67a8bff5d8cd03f6ac17265c550ed8f33907" + .parse::
() + .unwrap() + .into(), topics: [ - Some(ValueOrArray::Value(Some( - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef" - .parse() - .unwrap(), - ))), - Some(ValueOrArray::Value(Some( - "0x00000000000000000000000000b46c2526e227482e2ebb8f4c69e4674d262e75" - .parse() - .unwrap(), - ))), - Some(ValueOrArray::Value(Some( - "0x00000000000000000000000054a2d42a40f51259dedd1978f6c118a0f0eff078" - .parse() - .unwrap(), - ))), - None, + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef" + .parse::() + .unwrap() + .into(), + "0x00000000000000000000000000b46c2526e227482e2ebb8f4c69e4674d262e75" + .parse::() + .unwrap() + .into(), + "0x00000000000000000000000054a2d42a40f51259dedd1978f6c118a0f0eff078" + .parse::() + .unwrap() + .into(), + Default::default(), ], } ); @@ -1379,8 +1274,8 @@ mod tests { from_block: Some(4365627u64.into()), to_block: Some(4365627u64.into()), }, - address: None, - topics: [None, None, None, None,], + address: Default::default(), + topics: Default::default(), } ); } diff --git a/crates/rpc/rpc-types/src/eth/pubsub.rs b/crates/rpc/rpc-types/src/eth/pubsub.rs index d54f6f849a86..bf2b3b55d44a 100644 --- a/crates/rpc/rpc-types/src/eth/pubsub.rs +++ b/crates/rpc/rpc-types/src/eth/pubsub.rs @@ -97,7 +97,7 @@ pub enum SubscriptionKind { } /// Any additional parameters for a subscription. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub enum Params { /// No parameters passed. #[default] diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index e7b397307998..280bc0817f1a 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -363,11 +363,9 @@ where let mut all_logs = Vec::new(); let filter_params = FilteredParams::new(Some(filter.clone())); - let topics = filter.has_topics().then(|| filter_params.flat_topics.clone()); - // derive bloom filters from filter input let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&topics); + let topics_filter = FilteredParams::topics_filter(&filter.topics); let is_multi_block_range = from_block != to_block; diff --git a/examples/db-access.rs b/examples/db-access.rs index 5b8aec77f4ff..60089dba75b2 100644 --- a/examples/db-access.rs +++ b/examples/db-access.rs @@ -186,8 +186,8 @@ fn receipts_provider_example Date: Fri, 28 Jul 2023 14:03:07 +0300 Subject: [PATCH 282/722] feat(cli): in-memory merkle debug script (#3895) --- Cargo.lock | 2 + bin/reth/Cargo.toml | 2 + bin/reth/src/debug_cmd/execution.rs | 2 +- bin/reth/src/debug_cmd/in_memory_merkle.rs | 257 +++++++++++++++++++++ bin/reth/src/debug_cmd/mod.rs | 4 + bin/reth/src/utils.rs | 35 ++- crates/trie/src/updates.rs | 2 +- 7 files changed, 301 insertions(+), 3 deletions(-) create mode 100644 bin/reth/src/debug_cmd/in_memory_merkle.rs diff --git a/Cargo.lock b/Cargo.lock index 4f6843560f4c..45770d2efbdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4969,6 +4969,7 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-config", + "reth-consensus-common", "reth-db", "reth-discv4", "reth-downloaders", @@ -4991,6 +4992,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie", "secp256k1", "serde", "serde_json", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 5655605d9437..6a48f0be5a30 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -21,6 +21,7 @@ reth-interfaces = { workspace = true, features = ["test-utils", "clap"] } reth-transaction-pool.workspace = true reth-beacon-consensus = { path = "../../crates/consensus/beacon" } reth-auto-seal-consensus = { path = "../../crates/consensus/auto-seal" } +reth-consensus-common = { path = "../../crates/consensus/common" } reth-blockchain-tree = { path = "../../crates/blockchain-tree" } reth-rpc-engine-api = { path = "../../crates/rpc/rpc-engine-api" } reth-rpc-builder = { path = "../../crates/rpc/rpc-builder" } @@ -37,6 +38,7 @@ reth-basic-payload-builder = { path = "../../crates/payload/basic" } reth-discv4 = { path = "../../crates/net/discv4" } reth-metrics.workspace = true reth-prune = { path = "../../crates/prune" } +reth-trie = { path = "../../crates/trie" } # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 88481b1cc147..82de9e055f41 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -42,7 +42,7 @@ use std::{ use tokio::sync::watch; use tracing::*; -/// `reth execution-debug` command +/// `reth debug execution` command #[derive(Debug, Parser)] pub struct Command { /// The path to the data dir for all reth files and subdirectories. diff --git a/bin/reth/src/debug_cmd/in_memory_merkle.rs b/bin/reth/src/debug_cmd/in_memory_merkle.rs new file mode 100644 index 000000000000..701b2196fa57 --- /dev/null +++ b/bin/reth/src/debug_cmd/in_memory_merkle.rs @@ -0,0 +1,257 @@ +//! Command for debugging in-memory merkle trie calculation. +use crate::{ + args::{get_secret_key, utils::genesis_value_parser, DatabaseArgs, NetworkArgs}, + dirs::{DataDirPath, MaybePlatformPath}, + runner::CliContext, + utils::{get_single_body, get_single_header}, +}; +use backon::{ConstantBuilder, Retryable}; +use clap::Parser; +use reth_config::Config; +use reth_db::{init_db, DatabaseEnv}; +use reth_discv4::DEFAULT_DISCOVERY_PORT; +use reth_network::NetworkHandle; +use reth_network_api::NetworkInfo; +use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; +use reth_provider::{ + AccountExtReader, BlockExecutor, BlockWriter, ExecutorFactory, HashingWriter, HeaderProvider, + LatestStateProviderRef, ProviderFactory, StageCheckpointReader, StorageReader, +}; +use reth_tasks::TaskExecutor; +use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, updates::TrieKey, StateRoot}; +use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + path::PathBuf, + sync::Arc, +}; +use tracing::*; + +/// `reth debug in-memory-merkle` command +/// This debug routine requires that the node is positioned at the block before the target. +/// The script will then download the block from p2p network and attempt to calculate and verify +/// merkle root for it. +#[derive(Debug, Parser)] +pub struct Command { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + /// + /// Built-in chains: + /// - mainnet + /// - goerli + /// - sepolia + #[arg( + long, + value_name = "CHAIN_OR_PATH", + verbatim_doc_comment, + default_value = "mainnet", + value_parser = genesis_value_parser + )] + chain: Arc, + + #[clap(flatten)] + db: DatabaseArgs, + + #[clap(flatten)] + network: NetworkArgs, + + /// The number of retries per request + #[arg(long, default_value = "5")] + retries: usize, + + /// The depth after which we should start comparing branch nodes + #[arg(long)] + skip_node_depth: Option, +} + +impl Command { + async fn build_network( + &self, + config: &Config, + task_executor: TaskExecutor, + db: Arc, + network_secret_path: PathBuf, + default_peers_path: PathBuf, + ) -> eyre::Result { + let secret_key = get_secret_key(&network_secret_path)?; + let network = self + .network + .network_config(config, self.chain.clone(), secret_key, default_peers_path) + .with_task_executor(Box::new(task_executor)) + .listener_addr(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::UNSPECIFIED, + self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + ))) + .discovery_addr(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::UNSPECIFIED, + self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + ))) + .build(ProviderFactory::new(db, self.chain.clone())) + .start_network() + .await?; + info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); + debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); + Ok(network) + } + + /// Execute `debug in-memory-merkle` command + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + let config = Config::default(); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let db_path = data_dir.db_path(); + fs::create_dir_all(&db_path)?; + + // initialize the database + let db = Arc::new(init_db(db_path, self.db.log_level)?); + let factory = ProviderFactory::new(&db, self.chain.clone()); + let provider = factory.provider()?; + + // Look up merkle checkpoint + let merkle_checkpoint = provider + .get_stage_checkpoint(StageId::MerkleExecute)? + .expect("merkle checkpoint exists"); + + let merkle_block_number = merkle_checkpoint.block_number; + + // Configure and build network + let network_secret_path = + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + let network = self + .build_network( + &config, + ctx.task_executor.clone(), + db.clone(), + network_secret_path, + data_dir.known_peers_path(), + ) + .await?; + + let target_block_number = merkle_block_number + 1; + + info!(target: "reth::cli", target_block_number, "Downloading full block"); + let fetch_client = network.fetch_client().await?; + + let retries = self.retries.max(1); + let backoff = ConstantBuilder::default().with_max_times(retries); + + let client = fetch_client.clone(); + let header = (move || { + get_single_header(client.clone(), BlockHashOrNumber::Number(target_block_number)) + }) + .retry(&backoff) + .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) + .await?; + + let client = fetch_client.clone(); + let chain = Arc::clone(&self.chain); + let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) + .retry(&backoff) + .notify( + |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), + ) + .await?; + + let executor_factory = reth_revm::Factory::new(self.chain.clone()); + let mut executor = executor_factory.with_sp(LatestStateProviderRef::new(provider.tx_ref())); + + let merkle_block_td = + provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); + let block_state = executor.execute_and_verify_receipt( + &block.clone().unseal(), + merkle_block_td + block.difficulty, + None, + )?; + + // Unpacked `PostState::state_root_slow` function + let hashed_post_state = block_state.hash_state_slow().sorted(); + let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); + let tx = provider.tx_ref(); + let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_post_state); + let (in_memory_state_root, in_memory_updates) = StateRoot::new(tx) + .with_hashed_cursor_factory(&hashed_cursor_factory) + .with_changed_account_prefixes(account_prefix_set) + .with_changed_storage_prefixes(storage_prefix_set) + .root_with_updates()?; + + if in_memory_state_root == block.state_root { + info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); + return Ok(()) + } + + let provider_rw = factory.provider_rw()?; + + // Insert block, state and hashes + provider_rw.insert_block(block.clone(), None)?; + block_state.write_to_db(provider_rw.tx_ref(), block.number)?; + let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; + let storages = provider_rw.plainstate_storages(storage_lists)?; + provider_rw.insert_storage_for_hashing(storages)?; + let account_lists = provider_rw.changed_accounts_with_range(block.number..=block.number)?; + let accounts = provider_rw.basic_accounts(account_lists)?; + provider_rw.insert_account_for_hashing(accounts)?; + + let (state_root, incremental_trie_updates) = StateRoot::incremental_root_with_updates( + provider_rw.tx_ref(), + block.number..=block.number, + )?; + if state_root != block.state_root { + eyre::bail!( + "Computed incremental state root mismatch. Expected: {:?}. Got: {:?}", + block.state_root, + state_root + ); + } + + // Compare updates + let mut in_mem_mismatched = Vec::new(); + let mut incremental_mismatched = Vec::new(); + let mut in_mem_updates_iter = in_memory_updates.into_iter().peekable(); + let mut incremental_updates_iter = incremental_trie_updates.into_iter().peekable(); + + while in_mem_updates_iter.peek().is_some() || incremental_updates_iter.peek().is_some() { + match (in_mem_updates_iter.next(), incremental_updates_iter.next()) { + (Some(in_mem), Some(incr)) => { + pretty_assertions::assert_eq!(in_mem.0, incr.0, "Nibbles don't match"); + if in_mem.1 != incr.1 && + matches!(in_mem.0, TrieKey::AccountNode(ref nibbles) if nibbles.inner.len() > self.skip_node_depth.unwrap_or_default()) + { + in_mem_mismatched.push(in_mem); + incremental_mismatched.push(incr); + } + } + (Some(in_mem), None) => { + warn!(target: "reth::cli", next = ?in_mem, "In-memory trie updates have more entries"); + } + (None, Some(incr)) => { + tracing::warn!(target: "reth::cli", next = ?incr, "Incremental trie updates have more entries"); + } + (None, None) => { + tracing::info!(target: "reth::cli", "Exhausted all trie updates entries"); + } + } + } + + pretty_assertions::assert_eq!( + incremental_mismatched, + in_mem_mismatched, + "Mismatched trie updates" + ); + + // Drop without comitting. + drop(provider_rw); + + Ok(()) + } +} diff --git a/bin/reth/src/debug_cmd/mod.rs b/bin/reth/src/debug_cmd/mod.rs index 463c423508bd..e624307f68c7 100644 --- a/bin/reth/src/debug_cmd/mod.rs +++ b/bin/reth/src/debug_cmd/mod.rs @@ -4,6 +4,7 @@ use clap::{Parser, Subcommand}; use crate::runner::CliContext; mod execution; +mod in_memory_merkle; mod merkle; /// `reth debug` command @@ -20,6 +21,8 @@ pub enum Subcommands { Execution(execution::Command), /// Debug the clean & incremental state root calculations. Merkle(merkle::Command), + /// Debug in-memory state root calculation. + InMemoryMerkle(in_memory_merkle::Command), } impl Command { @@ -28,6 +31,7 @@ impl Command { match self.command { Subcommands::Execution(command) => command.execute(ctx).await, Subcommands::Merkle(command) => command.execute().await, + Subcommands::InMemoryMerkle(command) => command.execute(ctx).await, } } } diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 1ac4f4d427fb..32cd073ce076 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -1,6 +1,7 @@ //! Common CLI utility functions. use eyre::Result; +use reth_consensus_common::validation::validate_block_standalone; use reth_db::{ cursor::DbCursorRO, database::Database, @@ -8,10 +9,13 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_interfaces::p2p::{ + bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_primitives::{fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedHeader}; +use reth_primitives::{ + fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader, +}; use std::{ env::VarError, path::{Path, PathBuf}, @@ -56,6 +60,35 @@ where Ok(header) } +/// Get a body from network based on header +pub async fn get_single_body( + client: Client, + chain_spec: Arc, + header: SealedHeader, +) -> Result +where + Client: BodiesClient, +{ + let (peer_id, response) = client.get_block_body(header.hash).await?.split(); + + if response.is_none() { + client.report_bad_message(peer_id); + eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") + } + + let block = response.unwrap(); + let block = SealedBlock { + header, + body: block.transactions, + ommers: block.ommers, + withdrawals: block.withdrawals, + }; + + validate_block_standalone(&block, &chain_spec)?; + + Ok(block) +} + /// Wrapper over DB that implements many useful DB queries. pub struct DbTool<'a, DB: Database> { pub(crate) db: &'a DB, diff --git a/crates/trie/src/updates.rs b/crates/trie/src/updates.rs index b60df4aa78a7..dc5c086e0d7a 100644 --- a/crates/trie/src/updates.rs +++ b/crates/trie/src/updates.rs @@ -22,7 +22,7 @@ pub enum TrieKey { } /// The operation to perform on the trie. -#[derive(Debug, Clone)] +#[derive(PartialEq, Eq, Debug, Clone)] pub enum TrieOp { /// Delete the node entry. Delete, From c04f3e443f18a4e6b28bea6334bb69e87bf29ecb Mon Sep 17 00:00:00 2001 From: Plamen Hristov Date: Fri, 28 Jul 2023 13:05:38 +0200 Subject: [PATCH 283/722] WIP: Implement prestateTracer (#3923) Co-authored-by: Matthias Seitz --- .../src/tracing/builder/geth.rs | 79 ++++++++++++++++++- .../revm/revm-inspectors/src/tracing/types.rs | 32 +++++++- .../rpc-types/src/eth/trace/geth/pre_state.rs | 17 +++- crates/rpc/rpc/src/debug.rs | 41 +++++++++- 4 files changed, 160 insertions(+), 9 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index a3eaa3233fd3..2f02b32cfa6f 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -4,8 +4,12 @@ use crate::tracing::{ types::{CallTraceNode, CallTraceStepStackItem}, TracingInspectorConfig, }; -use reth_primitives::{Address, Bytes, H256}; -use reth_rpc_types::trace::geth::*; +use reth_primitives::{Address, Bytes, H256, U256}; +use reth_rpc_types::trace::geth::{ + AccountState, CallConfig, CallFrame, DefaultFrame, DiffMode, GethDefaultTracingOptions, + PreStateConfig, PreStateFrame, PreStateMode, StructLog, +}; +use revm::{db::DatabaseRef, primitives::ResultAndState}; use std::collections::{BTreeMap, HashMap, VecDeque}; /// A type for creating geth style traces @@ -147,4 +151,75 @@ impl GethTraceBuilder { } } } + + /// Returns the accounts necessary for transaction execution. + /// + /// The prestate mode returns the accounts necessary to execute a given transaction. + /// diff_mode returns the differences between the transaction's pre and post-state. + /// + /// * `state` - The state post-transaction execution. + /// * `diff_mode` - if prestate is in diff or prestate mode. + /// * `db` - The database to fetch state pre-transaction execution. + pub fn geth_prestate_traces( + &self, + ResultAndState { state, .. }: &ResultAndState, + prestate_config: PreStateConfig, + db: DB, + ) -> Result + where + DB: DatabaseRef, + { + let account_diffs: Vec<_> = + state.into_iter().map(|(addr, acc)| (*addr, &acc.info)).collect(); + + if prestate_config.is_diff_mode() { + let mut prestate = PreStateMode::default(); + for (addr, _) in account_diffs { + let db_acc = db.basic(addr)?.unwrap_or_default(); + prestate.0.insert( + addr, + AccountState { + balance: Some(db_acc.balance), + nonce: Some(U256::from(db_acc.nonce)), + code: db_acc.code.as_ref().map(|code| Bytes::from(code.original_bytes())), + storage: None, + }, + ); + } + self.update_storage_from_trace(&mut prestate.0, false); + Ok(PreStateFrame::Default(prestate)) + } else { + let mut state_diff = DiffMode::default(); + for (addr, changed_acc) in account_diffs { + let db_acc = db.basic(addr)?.unwrap_or_default(); + let pre_state = AccountState { + balance: Some(db_acc.balance), + nonce: Some(U256::from(db_acc.nonce)), + code: db_acc.code.as_ref().map(|code| Bytes::from(code.original_bytes())), + storage: None, + }; + let post_state = AccountState { + balance: Some(changed_acc.balance), + nonce: Some(U256::from(changed_acc.nonce)), + code: changed_acc.code.as_ref().map(|code| Bytes::from(code.original_bytes())), + storage: None, + }; + state_diff.pre.insert(addr, pre_state); + state_diff.post.insert(addr, post_state); + } + self.update_storage_from_trace(&mut state_diff.pre, false); + self.update_storage_from_trace(&mut state_diff.post, true); + Ok(PreStateFrame::Diff(state_diff)) + } + } + + fn update_storage_from_trace( + &self, + account_states: &mut BTreeMap, + post_value: bool, + ) { + for node in self.nodes.iter() { + node.geth_update_account_storage(account_states, post_value); + } + } } diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 22bebdbb2478..a7cb9ef4aede 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -3,7 +3,7 @@ use crate::tracing::{config::TraceStyle, utils::convert_memory}; use reth_primitives::{abi::decode_revert_reason, bytes::Bytes, Address, H256, U256}; use reth_rpc_types::trace::{ - geth::{CallFrame, CallLogFrame, GethDefaultTracingOptions, StructLog}, + geth::{AccountState, CallFrame, CallLogFrame, GethDefaultTracingOptions, StructLog}, parity::{ Action, ActionType, CallAction, CallOutput, CallType, ChangedType, CreateAction, CreateOutput, Delta, SelfdestructAction, StateDiff, TraceOutput, TransactionTrace, @@ -13,7 +13,7 @@ use revm::interpreter::{ opcode, CallContext, CallScheme, CreateScheme, InstructionResult, Memory, OpCode, Stack, }; use serde::{Deserialize, Serialize}; -use std::collections::{btree_map::Entry, VecDeque}; +use std::collections::{btree_map::Entry, BTreeMap, VecDeque}; /// A unified representation of a call #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] @@ -443,6 +443,34 @@ impl CallTraceNode { call_frame } + + /// Adds storage in-place to account state for all accounts that were touched in the trace + /// [CallTrace] execution. + /// + /// * `account_states` - the account map updated in place. + /// * `post_value` - if true, it adds storage values after trace transaction execution, if + /// false, returns the storage values before trace execution. + pub(crate) fn geth_update_account_storage( + &self, + account_states: &mut BTreeMap, + post_value: bool, + ) { + let addr = self.trace.address; + let acc_state = account_states.entry(addr).or_insert_with(AccountState::default); + for change in self.trace.steps.iter().filter_map(|s| s.storage_change) { + let StorageChange { key, value, had_value } = change; + let storage_map = acc_state.storage.get_or_insert_with(BTreeMap::new); + let value_to_insert = if post_value { + H256::from(value) + } else { + match had_value { + Some(had_value) => H256::from(had_value), + None => continue, + } + }; + storage_map.insert(key.into(), value_to_insert); + } + } } pub(crate) struct CallTraceStepStackItem<'a> { diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/pre_state.rs b/crates/rpc/rpc-types/src/eth/trace/geth/pre_state.rs index b6301da3f2e1..2f8be5ab36b5 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/pre_state.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/pre_state.rs @@ -1,4 +1,4 @@ -use reth_primitives::{serde_helper::num::from_int_or_hex_opt, Address, H256, U256}; +use reth_primitives::{serde_helper::num::from_int_or_hex_opt, Address, Bytes, H256, U256}; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -29,7 +29,7 @@ pub struct AccountState { )] pub balance: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub code: Option, + pub code: Option, #[serde( default, deserialize_with = "from_int_or_hex_opt", @@ -47,6 +47,12 @@ pub struct PreStateConfig { pub diff_mode: Option, } +impl PreStateConfig { + pub fn is_diff_mode(&self) -> bool { + self.diff_mode.unwrap_or_default() + } +} + #[cfg(test)] mod tests { use super::*; @@ -86,4 +92,11 @@ mod tests { _ => unreachable!(), } } + + #[test] + fn test_is_diff_mode() { + assert!(PreStateConfig { diff_mode: Some(true) }.is_diff_mode()); + assert!(!PreStateConfig { diff_mode: Some(false) }.is_diff_mode()); + assert!(!PreStateConfig { diff_mode: None }.is_diff_mode()); + } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 5a72e5eedb7a..3fd5bdef06bd 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -2,7 +2,8 @@ use crate::{ eth::{ error::{EthApiError, EthResult}, revm_utils::{ - clone_into_empty_db, inspect, replay_transactions_until, result_output, EvmOverrides, + clone_into_empty_db, inspect, inspect_and_return_db, replay_transactions_until, + result_output, EvmOverrides, }, EthTransactions, TransactionSource, }, @@ -255,7 +256,26 @@ where return Ok(frame) } GethDebugBuiltInTracerType::PreStateTracer => { - Err(EthApiError::Unsupported("pre state tracer currently unsupported.")) + let prestate_config = tracer_config + .into_pre_state_config() + .map_err(|_| EthApiError::InvalidTracerConfig)?; + let mut inspector = TracingInspector::new( + TracingInspectorConfig::from_geth_config(&config), + ); + + let frame = + self.inner + .eth_api + .spawn_with_call_at(call, at, overrides, move |db, env| { + let (res, _, db) = + inspect_and_return_db(db, env, &mut inspector)?; + let frame = inspector + .into_geth_builder() + .geth_prestate_traces(&res, prestate_config, &db)?; + Ok(frame) + }) + .await?; + return Ok(frame.into()) } GethDebugBuiltInTracerType::NoopTracer => Ok(NoopFrame::default().into()), }, @@ -354,7 +374,22 @@ where return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::PreStateTracer => { - Err(EthApiError::Unsupported("prestate tracer is unimplemented yet.")) + let prestate_config = tracer_config + .into_pre_state_config() + .map_err(|_| EthApiError::InvalidTracerConfig)?; + + let mut inspector = TracingInspector::new( + TracingInspectorConfig::from_geth_config(&config), + ); + let (res, _) = inspect(&mut *db, env, &mut inspector)?; + + let frame = inspector.into_geth_builder().geth_prestate_traces( + &res, + prestate_config, + &*db, + )?; + + return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::NoopTracer => { Ok((NoopFrame::default().into(), Default::default())) From c33f93004be0bd1012fa43ca659aeb864795635e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 28 Jul 2023 14:10:55 +0300 Subject: [PATCH 284/722] chore(txpool): export validation constants (#3979) --- crates/transaction-pool/src/lib.rs | 18 ------------------ .../transaction-pool/src/validate/constants.rs | 17 +++++++++++++++++ crates/transaction-pool/src/validate/eth.rs | 7 +++++-- crates/transaction-pool/src/validate/mod.rs | 4 ++++ 4 files changed, 26 insertions(+), 20 deletions(-) create mode 100644 crates/transaction-pool/src/validate/constants.rs diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 104be7d9a249..8c4ccc502db2 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -190,24 +190,6 @@ mod traits; /// Common test helpers for mocking a pool pub mod test_utils; -// TX_SLOT_SIZE is used to calculate how many data slots a single transaction -// takes up based on its size. The slots are used as DoS protection, ensuring -// that validating a new transaction remains a constant operation (in reality -// O(maxslots), where max slots are 4 currently). -pub(crate) const TX_SLOT_SIZE: usize = 32 * 1024; - -// TX_MAX_SIZE is the maximum size a single transaction can have. This field has -// non-trivial consequences: larger transactions are significantly harder and -// more expensive to propagate; larger transactions also take more resources -// to validate whether they fit into the pool or not. -pub(crate) const TX_MAX_SIZE: usize = 4 * TX_SLOT_SIZE; //128KB - -// Maximum bytecode to permit for a contract -pub(crate) const MAX_CODE_SIZE: usize = 24576; - -// Maximum initcode to permit in a creation transaction and create instructions -pub(crate) const MAX_INIT_CODE_SIZE: usize = 2 * MAX_CODE_SIZE; - /// A shareable, generic, customizable `TransactionPool` implementation. #[derive(Debug)] pub struct Pool { diff --git a/crates/transaction-pool/src/validate/constants.rs b/crates/transaction-pool/src/validate/constants.rs new file mode 100644 index 000000000000..040087bdb020 --- /dev/null +++ b/crates/transaction-pool/src/validate/constants.rs @@ -0,0 +1,17 @@ +/// TX_SLOT_SIZE is used to calculate how many data slots a single transaction +/// takes up based on its size. The slots are used as DoS protection, ensuring +/// that validating a new transaction remains a constant operation (in reality +/// O(maxslots), where max slots are 4 currently). +pub const TX_SLOT_SIZE: usize = 32 * 1024; + +/// TX_MAX_SIZE is the maximum size a single transaction can have. This field has +/// non-trivial consequences: larger transactions are significantly harder and +/// more expensive to propagate; larger transactions also take more resources +/// to validate whether they fit into the pool or not. +pub const TX_MAX_SIZE: usize = 4 * TX_SLOT_SIZE; // 128KB + +/// Maximum bytecode to permit for a contract +pub const MAX_CODE_SIZE: usize = 24576; + +/// Maximum initcode to permit in a creation transaction and create instructions +pub const MAX_INIT_CODE_SIZE: usize = 2 * MAX_CODE_SIZE; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 2a4bbbf8ec2e..24e90e77d68c 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -3,8 +3,11 @@ use crate::{ error::InvalidPoolTransactionError, traits::{PoolTransaction, TransactionOrigin}, - validate::{task::ValidationJobSender, TransactionValidatorError, ValidationTask}, - TransactionValidationOutcome, TransactionValidator, MAX_INIT_CODE_SIZE, TX_MAX_SIZE, + validate::{ + task::ValidationJobSender, TransactionValidatorError, ValidationTask, MAX_INIT_CODE_SIZE, + TX_MAX_SIZE, + }, + TransactionValidationOutcome, TransactionValidator, }; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 78c904aa0c10..aabe58845ccf 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -10,6 +10,7 @@ use reth_primitives::{ }; use std::{fmt, time::Instant}; +mod constants; mod eth; mod task; @@ -19,6 +20,9 @@ pub use eth::{EthTransactionValidator, EthTransactionValidatorBuilder}; /// A spawnable task that performs transaction validation. pub use task::ValidationTask; +/// Validation constants. +pub use constants::{MAX_CODE_SIZE, MAX_INIT_CODE_SIZE, TX_MAX_SIZE, TX_SLOT_SIZE}; + /// A Result type returned after checking a transaction's validity. #[derive(Debug)] pub enum TransactionValidationOutcome { From d8677b4146f77c7c82d659c59b79b38caca78778 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 28 Jul 2023 14:57:44 +0300 Subject: [PATCH 285/722] chore(txpool): pooled tx constructor (#3980) --- crates/transaction-pool/src/traits.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2599b0db0d98..2947cf38fbca 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -532,6 +532,18 @@ pub struct PooledTransaction { } impl PooledTransaction { + /// Create new instance of [Self]. + pub fn new(transaction: TransactionSignedEcRecovered) -> Self { + let gas_cost = match &transaction.transaction { + Transaction::Legacy(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), + Transaction::Eip2930(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), + Transaction::Eip1559(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), + }; + let cost = gas_cost + U256::from(transaction.value()); + + Self { transaction, cost } + } + /// Return the reference to the underlying transaction. pub fn transaction(&self) -> &TransactionSignedEcRecovered { &self.transaction @@ -634,14 +646,7 @@ impl PoolTransaction for PooledTransaction { impl FromRecoveredTransaction for PooledTransaction { fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { - let gas_cost = match &tx.transaction { - Transaction::Legacy(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), - Transaction::Eip2930(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), - Transaction::Eip1559(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), - }; - let cost = gas_cost + U256::from(tx.value()); - - PooledTransaction { transaction: tx, cost } + PooledTransaction::new(tx) } } From b05b84823a9d0f8f3348da346bd95f5456ac6fc3 Mon Sep 17 00:00:00 2001 From: Alessandro <121622391+alessandromazza98@users.noreply.github.com> Date: Fri, 28 Jul 2023 16:42:46 +0200 Subject: [PATCH 286/722] add defaults to some rpc server cli args (#3969) --- bin/reth/src/args/rpc_server_args.rs | 42 ++++++++++------------------ 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 7e46c36ce779..35d9e7945936 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -52,7 +52,7 @@ pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; /// Parameters for configuring the rpc more granularity via CLI -#[derive(Debug, Args, PartialEq, Eq, Default)] +#[derive(Debug, Args, PartialEq, Eq)] #[command(next_help_heading = "RPC")] pub struct RpcServerArgs { /// Enable the HTTP-RPC server @@ -60,12 +60,12 @@ pub struct RpcServerArgs { pub http: bool, /// Http server address to listen on - #[arg(long = "http.addr")] - pub http_addr: Option, + #[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + pub http_addr: IpAddr, /// Http server port to listen on - #[arg(long = "http.port")] - pub http_port: Option, + #[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)] + pub http_port: u16, /// Rpc Modules to be configured for the HTTP server #[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())] @@ -80,12 +80,12 @@ pub struct RpcServerArgs { pub ws: bool, /// Ws server address to listen on - #[arg(long = "ws.addr")] - pub ws_addr: Option, + #[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + pub ws_addr: IpAddr, /// Ws server port to listen on - #[arg(long = "ws.port")] - pub ws_port: Option, + #[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)] + pub ws_port: u16, /// Origins from which to accept WebSocket requests #[arg(long = "ws.origins", name = "ws.origins")] @@ -104,8 +104,8 @@ pub struct RpcServerArgs { pub ipcpath: Option, /// Auth server address to listen on - #[arg(long = "authrpc.addr")] - pub auth_addr: Option, + #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + pub auth_addr: IpAddr, /// Auth server port to listen on #[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)] @@ -356,10 +356,7 @@ impl RpcServerArgs { Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, { - let socket_address = SocketAddr::new( - self.auth_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.auth_port, - ); + let socket_address = SocketAddr::new(self.auth_addr, self.auth_port); reth_rpc_builder::auth::launch( provider, @@ -427,10 +424,7 @@ impl RpcServerArgs { let mut config = RpcServerConfig::default(); if self.http { - let socket_address = SocketAddr::new( - self.http_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.http_port.unwrap_or(constants::DEFAULT_HTTP_RPC_PORT), - ); + let socket_address = SocketAddr::new(self.http_addr, self.http_port); config = config .with_http_address(socket_address) .with_http(self.http_ws_server_builder()) @@ -439,10 +433,7 @@ impl RpcServerArgs { } if self.ws { - let socket_address = SocketAddr::new( - self.ws_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.ws_port.unwrap_or(constants::DEFAULT_WS_RPC_PORT), - ); + let socket_address = SocketAddr::new(self.ws_addr, self.ws_port); config = config.with_ws_address(socket_address).with_ws(self.http_ws_server_builder()); } @@ -457,10 +448,7 @@ impl RpcServerArgs { /// Creates the [AuthServerConfig] from cli args. fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result { - let address = SocketAddr::new( - self.auth_addr.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)), - self.auth_port, - ); + let address = SocketAddr::new(self.auth_addr, self.auth_port); Ok(AuthServerConfig::builder(jwt_secret).socket_addr(address).build()) } From 15bb1c90b8e60dcaaaa1d2cbc82817d135192cbd Mon Sep 17 00:00:00 2001 From: prames <134806363+0xprames@users.noreply.github.com> Date: Fri, 28 Jul 2023 10:45:45 -0400 Subject: [PATCH 287/722] feat(txpool) - add flag for local tx propagation (#3977) --- crates/transaction-pool/src/validate/eth.rs | 29 ++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 24e90e77d68c..bfcf04028ad0 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -140,6 +140,8 @@ pub struct EthTransactionValidatorBuilder { /// /// Default is 1 additional_tasks: usize, + /// Toggle to determine if a local transaction should be propagated + propagate_local_transactions: bool, } impl EthTransactionValidatorBuilder { @@ -153,6 +155,8 @@ impl EthTransactionValidatorBuilder { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, minimum_priority_fee: None, additional_tasks: 1, + // default to true, can potentially take this as a param in the future + propagate_local_transactions: true, } } @@ -188,6 +192,23 @@ impl EthTransactionValidatorBuilder { self.eip1559 = eip1559; self } + /// Sets toggle to propagate transactions received locally by this client (e.g + /// transactions from eth_Sendtransaction to this nodes' RPC server) + /// + /// If set to false, only transactions received by network peers (via + /// p2p) will be marked as propagated in the local transaction pool and returned on a + /// GetPooledTransactions p2p request + pub fn set_propagate_local_transactions(mut self, propagate_local_txs: bool) -> Self { + self.propagate_local_transactions = propagate_local_txs; + self + } + /// Disables propagating transactions recieved locally by this client + /// + /// For more information, check docs for set_propagate_local_transactions + pub fn no_local_transaction_propagation(mut self) -> Self { + self.propagate_local_transactions = false; + self + } /// Sets a minimum priority fee that's enforced for acceptance into the pool. pub fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { @@ -222,6 +243,7 @@ impl EthTransactionValidatorBuilder { block_gas_limit, minimum_priority_fee, additional_tasks, + propagate_local_transactions, } = self; let inner = EthTransactionValidatorInner { @@ -232,6 +254,7 @@ impl EthTransactionValidatorBuilder { eip1559, block_gas_limit, minimum_priority_fee, + propagate_local_transactions, _marker: Default::default(), }; @@ -277,6 +300,8 @@ struct EthTransactionValidatorInner { minimum_priority_fee: Option, /// Marker for the transaction type _marker: PhantomData, + /// Toggle to determine if a local transaction should be propagated + propagate_local_transactions: bool, } // === impl EthTransactionValidatorInner === @@ -435,7 +460,9 @@ where balance: account.balance, state_nonce: account.nonce, transaction, - propagate: true, + // by this point assume all external transactions should be propagated + propagate: matches!(origin, TransactionOrigin::External) || + self.propagate_local_transactions, } } } From 334d6068ad3ff0d383b5e47693945ff980b3ca9d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 29 Jul 2023 08:51:16 -0400 Subject: [PATCH 288/722] feat: add 4844 header fields and consensus checks (#3972) --- crates/consensus/auto-seal/src/lib.rs | 2 + crates/consensus/common/src/validation.rs | 91 ++++++- crates/interfaces/src/consensus.rs | 21 ++ crates/net/eth-wire/src/types/blocks.rs | 8 + crates/payload/basic/src/lib.rs | 4 + crates/primitives/src/blobfee.rs | 12 + crates/primitives/src/header.rs | 226 +++++++++++++++++- crates/primitives/src/lib.rs | 1 + crates/rpc/rpc-types/src/eth/block.rs | 3 + .../rpc/rpc-types/src/eth/engine/payload.rs | 3 + crates/rpc/rpc/src/eth/api/pending_block.rs | 2 + testing/ef-tests/src/models.rs | 2 + 12 files changed, 368 insertions(+), 7 deletions(-) create mode 100644 crates/primitives/src/blobfee.rs diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 668a2117f6b9..29cb6f491771 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -269,6 +269,8 @@ impl StorageInner { nonce: 0, base_fee_per_gas, extra_data: Default::default(), + blob_gas_used: None, + excess_blob_gas: None, }; header.transactions_root = if transactions.is_empty() { diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 485d6146737b..fd7c6140d16c 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,8 +1,13 @@ //! Collection of methods for block validation. use reth_interfaces::{consensus::ConsensusError, Result as RethResult}; use reth_primitives::{ - constants, BlockNumber, ChainSpec, Hardfork, Header, InvalidTransactionError, SealedBlock, - SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxLegacy, + blobfee::calculate_excess_blob_gas, + constants::{ + self, + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + }, + BlockNumber, ChainSpec, Hardfork, Header, InvalidTransactionError, SealedBlock, SealedHeader, + Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxLegacy, }; use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; use std::collections::{hash_map::Entry, HashMap}; @@ -38,6 +43,15 @@ pub fn validate_header_standalone( return Err(ConsensusError::WithdrawalsRootUnexpected) } + // Ensures that EIP-4844 fields are valid once cancun is active. + if chain_spec.fork(Hardfork::Cancun).active_at_timestamp(header.timestamp) { + validate_4844_header_standalone(header)?; + } else if header.blob_gas_used.is_some() { + return Err(ConsensusError::BlobGasUsedUnexpected) + } else if header.excess_blob_gas.is_some() { + return Err(ConsensusError::ExcessBlobGasUnexpected) + } + Ok(()) } @@ -291,6 +305,11 @@ pub fn validate_header_regarding_parent( } } + // ensure that the blob gas fields for this block + if chain_spec.fork(Hardfork::Cancun).active_at_timestamp(child.timestamp) { + validate_4844_header_with_parent(parent, child)?; + } + Ok(()) } @@ -376,6 +395,72 @@ pub fn full_validation Result<(), ConsensusError> { + // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): + // + // > For the first post-fork block, both parent.blob_gas_used and parent.excess_blob_gas + // > are evaluated as 0. + // + // This means in the first post-fork block, calculate_excess_blob_gas will return 0. + let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + + if child.blob_gas_used.is_none() { + return Err(ConsensusError::BlobGasUsedMissing) + } + let excess_blob_gas = child.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + + let expected_excess_blob_gas = + calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); + if expected_excess_blob_gas != excess_blob_gas { + return Err(ConsensusError::ExcessBlobGasDiff { + expected: expected_excess_blob_gas, + got: excess_blob_gas, + parent_excess_blob_gas, + parent_blob_gas_used, + }) + } + + Ok(()) +} + +/// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that: +/// +/// * `blob_gas_used` exists as a header field +/// * `excess_blob_gas` exists as a header field +/// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` +/// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` +pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), ConsensusError> { + let blob_gas_used = header.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; + + if header.excess_blob_gas.is_none() { + return Err(ConsensusError::ExcessBlobGasMissing) + } + + if blob_gas_used > MAX_DATA_GAS_PER_BLOCK { + return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock { + blob_gas_used, + max_blob_gas_per_block: MAX_DATA_GAS_PER_BLOCK, + }) + } + + if blob_gas_used % DATA_GAS_PER_BLOB != 0 { + return Err(ConsensusError::BlobGasUsedNotMultipleOfBlobGasPerBlob { + blob_gas_used, + blob_gas_per_blob: DATA_GAS_PER_BLOB, + }) + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; @@ -530,6 +615,8 @@ mod tests { nonce: 0x0000000000000000, base_fee_per_gas: 0x28f0001df.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }; // size: 0x9b5 diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index 16f4d6c07ad2..76c2175c9af7 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -113,6 +113,27 @@ pub enum ConsensusError { WithdrawalIndexInvalid { got: u64, expected: u64 }, #[error("Missing withdrawals")] BodyWithdrawalsMissing, + #[error("Missing blob gas used")] + BlobGasUsedMissing, + #[error("Unexpected blob gas used")] + BlobGasUsedUnexpected, + #[error("Missing excess blob gas")] + ExcessBlobGasMissing, + #[error("Unexpected excess blob gas")] + ExcessBlobGasUnexpected, + #[error("Blob gas used {blob_gas_used} exceeds maximum allowance {max_blob_gas_per_block}")] + BlobGasUsedExceedsMaxBlobGasPerBlock { blob_gas_used: u64, max_blob_gas_per_block: u64 }, + #[error( + "Blob gas used {blob_gas_used} is not a multiple of blob gas per blob {blob_gas_per_blob}" + )] + BlobGasUsedNotMultipleOfBlobGasPerBlob { blob_gas_used: u64, blob_gas_per_blob: u64 }, + #[error("Invalid excess blob gas. Expected: {expected}, got: {got}. Parent excess blob gas: {parent_excess_blob_gas}, parent blob gas used: {parent_blob_gas_used}.")] + ExcessBlobGasDiff { + expected: u64, + got: u64, + parent_excess_blob_gas: u64, + parent_blob_gas_used: u64, + }, /// Error for a transaction that violates consensus. #[error(transparent)] InvalidTransaction(#[from] InvalidTransactionError), diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index 47777cd71b12..808c8f4a4609 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -258,6 +258,8 @@ mod test { nonce: 0x0000000000000000u64, base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }, ]), }.encode(&mut data); @@ -289,6 +291,8 @@ mod test { nonce: 0x0000000000000000u64, base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }, ]), }; @@ -401,6 +405,8 @@ mod test { nonce: 0x0000000000000000u64, base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }, ], withdrawals: None, @@ -485,6 +491,8 @@ mod test { nonce: 0x0000000000000000u64, base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }, ], withdrawals: None, diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 84da180060da..3e15444d15da 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -714,6 +714,8 @@ fn build_payload( difficulty: U256::ZERO, gas_used: cumulative_gas_used, extra_data: extra_data.into(), + blob_gas_used: None, + excess_blob_gas: None, }; // seal the block @@ -785,6 +787,8 @@ where difficulty: U256::ZERO, gas_used: 0, extra_data: extra_data.into(), + blob_gas_used: None, + excess_blob_gas: None, }; let block = Block { header, body: vec![], ommers: vec![], withdrawals }; diff --git a/crates/primitives/src/blobfee.rs b/crates/primitives/src/blobfee.rs new file mode 100644 index 000000000000..e82b5d2f8c65 --- /dev/null +++ b/crates/primitives/src/blobfee.rs @@ -0,0 +1,12 @@ +//! Helpers for working with EIP-4844 blob fee + +use crate::constants::eip4844::TARGET_DATA_GAS_PER_BLOCK; + +/// Calculates the excess data gas for the next block, after applying the current set of blobs on +/// top of the excess data gas. +/// +/// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) +pub fn calculate_excess_blob_gas(parent_excess_blob_gas: u64, parent_blob_gas_used: u64) -> u64 { + let excess_blob_gas = parent_excess_blob_gas + parent_blob_gas_used; + excess_blob_gas.saturating_sub(TARGET_DATA_GAS_PER_BLOCK) +} diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index ba8e8f31deb4..e39c6a8287ad 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -1,5 +1,6 @@ use crate::{ basefee::calculate_next_block_base_fee, + blobfee::calculate_excess_blob_gas, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, H64, U256, @@ -7,7 +8,7 @@ use crate::{ use bytes::{Buf, BufMut, BytesMut}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; -use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_STRING_CODE}; +use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_LIST_CODE, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; use std::{ mem, @@ -91,6 +92,13 @@ pub struct Header { /// above the gas target, and decreasing when blocks are below the gas target. The base fee per /// gas is burned. pub base_fee_per_gas: Option, + /// The total amount of blob gas consumed by the transactions within the block, added in + /// EIP-4844. + pub blob_gas_used: Option, + /// A running total of blob gas consumed in excess of the target, prior to the block. Blocks + /// with above-target blob gas consumption increase this value, blocks with below-target blob + /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. + pub excess_blob_gas: Option, /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or /// fewer; formally Hx. pub extra_data: Bytes, @@ -116,6 +124,8 @@ impl Default for Header { nonce: 0, base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, } } } @@ -170,6 +180,13 @@ impl Header { Some(calculate_next_block_base_fee(self.gas_used, self.gas_limit, self.base_fee_per_gas?)) } + /// Calculate excess blob gas for the next block according to the EIP-4844 spec. + /// + /// Returns a `None` if no excess blob gas is set, no EIP-4844 support + pub fn next_block_excess_blob_gas(&self) -> Option { + Some(calculate_excess_blob_gas(self.excess_blob_gas?, self.blob_gas_used?)) + } + /// Seal the header with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. @@ -202,6 +219,8 @@ impl Header { mem::size_of::() + // mix hash mem::size_of::() + // nonce mem::size_of::>() + // base fee per gas + mem::size_of::>() + // blob gas used + mem::size_of::>() + // excess blob gas self.extra_data.len() // extra data } @@ -225,11 +244,34 @@ impl Header { if let Some(base_fee) = self.base_fee_per_gas { length += U256::from(base_fee).length(); - } else if self.withdrawals_root.is_some() { - length += 1; // EMTY STRING CODE + } else if self.withdrawals_root.is_some() || + self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() + { + length += 1; // EMPTY STRING CODE } + if let Some(root) = self.withdrawals_root { length += root.length(); + } else if self.blob_gas_used.is_some() || self.excess_blob_gas.is_some() { + length += 1; // EMPTY STRING CODE + } + + if let Some(blob_gas_used) = self.blob_gas_used { + length += U256::from(blob_gas_used).length(); + } else if self.excess_blob_gas.is_some() { + length += 1; // EMPTY STRING CODE + } + + // Encode excess blob gas length. If new fields are added, the above pattern will need to + // be repeated and placeholder length added. Otherwise, it's impossible to tell _which_ + // fields are missing. This is mainly relevant for contrived cases where a header is + // created at random, for example: + // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are + // post-London, so this is technically not valid. However, a tool like proptest would + // generate a block like this. + if let Some(excess_blob_gas) = self.excess_blob_gas { + length += U256::from(excess_blob_gas).length(); } length @@ -261,12 +303,38 @@ impl Encodable for Header { // but withdrawals root is present. if let Some(ref base_fee) = self.base_fee_per_gas { U256::from(*base_fee).encode(out); - } else if self.withdrawals_root.is_some() { + } else if self.withdrawals_root.is_some() || + self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() + { out.put_u8(EMPTY_STRING_CODE); } + // Encode withdrawals root. Put empty string if withdrawals root is missing, + // but blob gas used is present. if let Some(ref root) = self.withdrawals_root { root.encode(out); + } else if self.blob_gas_used.is_some() || self.excess_blob_gas.is_some() { + out.put_u8(EMPTY_STRING_CODE); + } + + // Encode blob gas used. Put empty string if blob gas used is missing, + // but excess blob gas is present. + if let Some(ref blob_gas_used) = self.blob_gas_used { + U256::from(*blob_gas_used).encode(out); + } else if self.excess_blob_gas.is_some() { + out.put_u8(EMPTY_LIST_CODE); + } + + // Encode excess blob gas. If new fields are added, the above pattern will need to be + // repeated and placeholders added. Otherwise, it's impossible to tell _which_ fields + // are missing. This is mainly relevant for contrived cases where a header is created + // at random, for example: + // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are + // post-London, so this is technically not valid. However, a tool like proptest would + // generate a block like this. + if let Some(ref excess_blob_gas) = self.excess_blob_gas { + U256::from(*excess_blob_gas).encode(out); } } @@ -303,7 +371,10 @@ impl Decodable for Header { nonce: H64::decode(buf)?.to_low_u64_be(), base_fee_per_gas: None, withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }; + if started_len - buf.len() < rlp_head.payload_length { if buf.first().map(|b| *b == EMPTY_STRING_CODE).unwrap_or_default() { buf.advance(1) @@ -311,9 +382,36 @@ impl Decodable for Header { this.base_fee_per_gas = Some(U256::decode(buf)?.to::()); } } + + // Withdrawals root for post-shanghai headers + if started_len - buf.len() < rlp_head.payload_length { + if buf.first().map(|b| *b == EMPTY_STRING_CODE).unwrap_or_default() { + buf.advance(1) + } else { + this.withdrawals_root = Some(Decodable::decode(buf)?); + } + } + + // Blob gas used and excess blob gas for post-cancun headers + if started_len - buf.len() < rlp_head.payload_length { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { + buf.advance(1) + } else { + this.blob_gas_used = Some(U256::decode(buf)?.to::()); + } + } + + // Decode excess blob gas. If new fields are added, the above pattern will need to be + // repeated and placeholders decoded. Otherwise, it's impossible to tell _which_ fields are + // missing. This is mainly relevant for contrived cases where a header is created at + // random, for example: + // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are + // post-London, so this is technically not valid. However, a tool like proptest would + // generate a block like this. if started_len - buf.len() < rlp_head.payload_length { - this.withdrawals_root = Some(Decodable::decode(buf)?); + this.excess_blob_gas = Some(U256::decode(buf)?.to::()); } + let consumed = started_len - buf.len(); if consumed != rlp_head.payload_length { return Err(reth_rlp::DecodeError::ListLengthMismatch { @@ -536,6 +634,8 @@ mod ethers_compat { gas_used: block.gas_used.as_u64(), withdrawals_root: None, logs_bloom: block.logs_bloom.unwrap_or_default().0.into(), + blob_gas_used: None, + excess_blob_gas: None, } } } @@ -605,6 +705,8 @@ mod tests { nonce: 0, base_fee_per_gas: Some(0x036b_u64), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, }; assert_eq!(header.hash_slow(), expected_hash); } @@ -683,6 +785,120 @@ mod tests { assert_eq!(header.hash_slow(), expected_hash); } + // Test vector from: https://github.com/ethereum/tests/blob/7e9e0940c0fcdbead8af3078ede70f969109bd85/BlockchainTests/ValidBlocks/bcExample/cancunExample.json + #[test] + fn test_decode_block_header_with_blob_fields_ef_tests() { + let data = hex::decode("f90221a03a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa03c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406aea04409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9cea046cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff8302a86582079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b4218302000080").unwrap(); + let expected = Header { + parent_hash: H256::from_str( + "3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", + ) + .unwrap(), + ommers_hash: H256::from_str( + "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + ) + .unwrap(), + beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), + state_root: H256::from_str( + "3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae", + ) + .unwrap(), + transactions_root: H256::from_str( + "4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce", + ) + .unwrap(), + receipts_root: H256::from_str( + "46cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86", + ) + .unwrap(), + logs_bloom: Default::default(), + difficulty: U256::from(0), + number: 0x1, + gas_limit: 0x7fffffffffffffff, + gas_used: 0x02a865, + timestamp: 0x079e, + extra_data: Bytes::from(vec![0x42]), + mix_hash: H256::from_str( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + ) + .unwrap(), + nonce: 0, + base_fee_per_gas: Some(9), + withdrawals_root: Some( + H256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + .unwrap(), + ), + blob_gas_used: Some(0x020000), + excess_blob_gas: Some(0), + }; + + let header = Header::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + let expected_hash = + H256::from_str("0x10aca3ebb4cf6ddd9e945a5db19385f9c105ede7374380c50d56384c3d233785") + .unwrap(); + assert_eq!(header.hash_slow(), expected_hash); + } + + #[test] + fn test_decode_block_header_with_blob_fields() { + // Block from devnet-7 + let data = hex::decode("f90239a013a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794f97e180c050e5ab072211ad2c213eb5aee4df134a0ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080830305988401c9c380808464c40d5499d883010c01846765746888676f312e32302e35856c696e7578a070ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f232588000000000000000007a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421808401600000").unwrap(); + let expected = Header { + parent_hash: H256::from_str( + "13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5", + ) + .unwrap(), + ommers_hash: H256::from_str( + "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + ) + .unwrap(), + beneficiary: Address::from_str("f97e180c050e5ab072211ad2c213eb5aee4df134").unwrap(), + state_root: H256::from_str( + "ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a", + ) + .unwrap(), + transactions_root: H256::from_str( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + ) + .unwrap(), + receipts_root: H256::from_str( + "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + ) + .unwrap(), + logs_bloom: Default::default(), + difficulty: U256::from(0), + number: 0x30598, + gas_limit: 0x1c9c380, + gas_used: 0, + timestamp: 0x64c40d54, + extra_data: Bytes::from( + hex::decode("d883010c01846765746888676f312e32302e35856c696e7578").unwrap(), + ), + mix_hash: H256::from_str( + "70ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f2325", + ) + .unwrap(), + nonce: 0, + base_fee_per_gas: Some(7), + withdrawals_root: Some( + H256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + .unwrap(), + ), + blob_gas_used: Some(0), + excess_blob_gas: Some(0x1600000), + }; + + let header = Header::decode(&mut data.as_slice()).unwrap(); + assert_eq!(header, expected); + + let expected_hash = + H256::from_str("0x539c9ea0a3ca49808799d3964b8b6607037227de26bc51073c6926963127087b") + .unwrap(); + assert_eq!(header.hash_slow(), expected_hash); + } + #[test] fn sanity_direction() { let reverse = true; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 64faa1e03b11..8e82bdd70730 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -23,6 +23,7 @@ pub mod abi; mod account; pub mod basefee; mod bits; +pub mod blobfee; mod block; pub mod bloom; mod chain; diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index ff47937af4ec..a246baf20a7d 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -267,6 +267,9 @@ impl Header { base_fee_per_gas, extra_data, withdrawals_root, + // TODO: add header fields to the rpc header + blob_gas_used: _, + excess_blob_gas: _, }, hash, } = primitive_header; diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 037d5739ecfd..d2b0625577a5 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -172,6 +172,9 @@ impl TryFrom for SealedBlock { ommers_hash: EMPTY_LIST_HASH, difficulty: Default::default(), nonce: Default::default(), + // TODO: add conversion once ExecutionPayload has 4844 fields + blob_gas_used: None, + excess_blob_gas: None, } .seal_slow(); diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index b5b8bc89c410..c9b74a3de7df 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -143,6 +143,8 @@ impl PendingBlockEnv { difficulty: U256::ZERO, gas_used: cumulative_gas_used, extra_data: Default::default(), + blob_gas_used: None, + excess_blob_gas: None, }; // seal the block diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index bca586bd866b..989a0dd9e7fa 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -100,6 +100,8 @@ impl From
for SealedHeader { parent_hash: value.parent_hash, logs_bloom: value.bloom, withdrawals_root: value.withdrawals_root, + blob_gas_used: None, + excess_blob_gas: None, }; header.seal(value.hash) } From 555a1dd2b53a61d50219284ffc359f7466a0b639 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sat, 29 Jul 2023 15:37:18 +0200 Subject: [PATCH 289/722] ci: deny warnings only in lint jobs (#3982) --- .github/workflows/bench.yml | 1 - .github/workflows/docker.yml | 1 - .github/workflows/fuzz.yml | 1 - .github/workflows/hive.yml | 1 - .github/workflows/integration.yml | 1 - .github/workflows/release.yml | 1 - .github/workflows/unit.yml | 1 - 7 files changed, 7 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index cd5b3f54d6ae..db26044dfa56 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -5,7 +5,6 @@ on: branches: [main] env: - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always concurrency: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 06dbff26f2d0..476d9c821005 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -9,7 +9,6 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth DOCKER_USERNAME: ${{ github.actor }} diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index ebe22fedc72d..07a31797afb9 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -5,7 +5,6 @@ on: branches: [main] env: - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always concurrency: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index b564803e4e41..fbc598b95ac5 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -4,7 +4,6 @@ on: - cron: '0 0 * * *' env: - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always concurrency: diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 20886b9e4ba2..e45643ae55ae 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -5,7 +5,6 @@ on: branches: [main] env: - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always GETH_BUILD: 1.12.0-e501b3b0 SEED: rustethereumethereumrust diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5a5bedaaf7c7..1417695755f1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,6 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always jobs: diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index e46f5a112f2a..6c89dca7160e 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -5,7 +5,6 @@ on: branches: [main] env: - RUSTFLAGS: -D warnings CARGO_TERM_COLOR: always SEED: rustethereumethereumrust From f41386d28e89dd436feea872178452e5302314a5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 29 Jul 2023 15:54:36 +0200 Subject: [PATCH 290/722] chore: cargo update (#3988) --- Cargo.lock | 1697 +++++++++++++++++++++++++++------------------------- 1 file changed, 879 insertions(+), 818 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45770d2efbdd..ca6c689e2304 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -51,12 +51,12 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", - "cipher 0.4.3", + "cipher 0.4.4", "cpufeatures", ] @@ -80,7 +80,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -92,7 +92,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -108,9 +108,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -151,22 +151,72 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "aquamarine" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759d98a5db12e9c9d98ef2b92f794ae5c7ded6ec18d21c3fa485c9c65bec237d" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" dependencies = [ + "include_dir", "itertools 0.10.5", "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -179,11 +229,20 @@ dependencies = [ "derive_arbitrary", ] +[[package]] +name = "array-init" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" +dependencies = [ + "nodrop", +] + [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "assert_matches" @@ -193,9 +252,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-compression" -version = "0.3.15" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" +checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" dependencies = [ "brotli", "flate2", @@ -203,8 +262,8 @@ dependencies = [ "memchr", "pin-project-lite", "tokio", - "zstd 0.11.2+zstd.1.5.2", - "zstd-safe 5.0.2+zstd.1.5.2", + "zstd", + "zstd-safe", ] [[package]] @@ -218,13 +277,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -249,9 +308,9 @@ dependencies = [ [[package]] name = "atomic-polyfill" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c314e70d181aa6053b26e3f7fbf86d1dfff84f816a6175b967666b3506ef7289" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" dependencies = [ "critical-section", ] @@ -276,7 +335,7 @@ checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -288,21 +347,21 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backon" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294" +checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9" dependencies = [ - "futures", + "fastrand 1.9.0", + "futures-core", "pin-project", - "rand 0.8.5", "tokio", ] [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", @@ -327,15 +386,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - -[[package]] -name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -345,9 +398,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "basic-toml" -version = "0.1.1" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e819b667739967cd44d308b8c7b71305d8bb0729ac44a248aa08f33d01950b4" +checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" dependencies = [ "serde", ] @@ -389,7 +442,7 @@ dependencies = [ "lazycell", "peeking_take_while", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "regex", "rustc-hash", "shlex", @@ -410,11 +463,11 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "regex", "rustc-hash", "shlex", - "syn 2.0.26", + "syn 2.0.27", ] [[package]] @@ -487,9 +540,9 @@ dependencies = [ [[package]] name = "block-padding" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a90ec2df9600c28a01c56c4784c9207a96d2451833aeceb8cc97e4c9548bb78" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" dependencies = [ "generic-array", ] @@ -497,7 +550,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "bitflags 2.3.3", "boa_interner", @@ -510,7 +563,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -548,7 +601,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "boa_macros", "boa_profiler", @@ -558,7 +611,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "icu_collections", "icu_normalizer", @@ -571,7 +624,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "boa_gc", "boa_macros", @@ -586,18 +639,18 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", "synstructure 0.13.0", ] [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -617,7 +670,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b51e7cfb84fd6a4dc474d103e5654efedfe94a4e" +source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" [[package]] name = "brotli" @@ -657,14 +710,14 @@ checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", - "regex-automata", + "regex-automata 0.1.10", ] [[package]] name = "bstr" -version = "1.3.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", "serde", @@ -672,9 +725,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -705,18 +758,18 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.3" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6031a462f977dd38968b6f23378356512feeace69cef817e1a4475108093cec3" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -786,9 +839,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -797,15 +850,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -822,9 +875,9 @@ dependencies = [ [[package]] name = "cipher" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", @@ -832,9 +885,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -843,40 +896,44 @@ dependencies = [ [[package]] name = "clap" -version = "4.1.8" +version = "4.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5" +checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d" dependencies = [ - "bitflags 1.3.2", + "clap_builder", "clap_derive", - "clap_lex", - "is-terminal", "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", "strsim 0.10.0", - "termcolor", ] [[package]] name = "clap_derive" -version = "4.1.8" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck", - "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "clap_lex" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09" -dependencies = [ - "os_str_bytes", -] +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "cobs" @@ -891,19 +948,9 @@ dependencies = [ "convert_case 0.6.0", "parity-scale-codec", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "serde", - "syn 2.0.26", -] - -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", + "syn 2.0.27", ] [[package]] @@ -915,13 +962,13 @@ dependencies = [ "bincode", "bs58", "coins-core", - "digest 0.10.6", - "getrandom 0.2.9", + "digest 0.10.7", + "getrandom 0.2.10", "hmac", "k256", "lazy_static", "serde", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", ] @@ -933,12 +980,12 @@ checksum = "84f4d04ee18e58356accd644896aeb2094ddeafb6a713e056cef0c0a8e468c15" dependencies = [ "bitvec 0.17.4", "coins-bip32", - "getrandom 0.2.9", + "getrandom 0.2.10", "hmac", "once_cell", - "pbkdf2 0.12.1", + "pbkdf2 0.12.2", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", ] @@ -948,20 +995,26 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b949a1c63fb7eb591eb7ba438746326aedf0ae843e51ec92ba6bec5bb382c4f" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bech32", "bs58", - "digest 0.10.6", + "digest 0.10.7", "generic-array", "hex", "ripemd", "serde", "serde_derive", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "thiserror", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "comfy-table" version = "7.0.1" @@ -1000,9 +1053,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" [[package]] name = "const-str" @@ -1037,9 +1090,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpp_demangle" @@ -1052,9 +1105,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -1128,9 +1181,9 @@ checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1149,9 +1202,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", @@ -1162,9 +1215,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1218,9 +1271,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2538c4e68e52548bacb3e83ac549f903d44f011ac9d5abb5e132e67d0808f7" +checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1238,16 +1291,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote 1.0.31", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.8.0" @@ -1263,66 +1306,35 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.3", + "cipher 0.4.4", ] [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "436ace70fc06e06f7f689d2624dc4e2f0ea666efb5aa704215f7249ae6e047a7" dependencies = [ "cfg-if", - "digest 0.10.6", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms", + "rustc_version", "subtle", "zeroize", ] [[package]] -name = "cxx" -version = "1.0.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.66", - "quote 1.0.31", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.91" +name = "curve25519-dalek-derive" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -1337,22 +1349,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ - "darling_core 0.14.3", - "darling_macro 0.14.3", -] - -[[package]] -name = "darling" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" -dependencies = [ - "darling_core 0.20.1", - "darling_macro 0.20.1", + "darling_core 0.20.3", + "darling_macro 0.20.3", ] [[package]] @@ -1364,37 +1366,23 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "strsim 0.9.3", "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.66", - "quote 1.0.31", - "strsim 0.10.0", - "syn 2.0.26", + "syn 2.0.27", ] [[package]] @@ -1404,30 +1392,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ - "darling_core 0.14.3", - "quote 1.0.31", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" -dependencies = [ - "darling_core 0.20.1", - "quote 1.0.31", - "syn 2.0.26", + "darling_core 0.20.3", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -1445,9 +1422,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "debugid" @@ -1455,7 +1432,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ - "uuid 1.3.0", + "uuid 1.4.1", ] [[package]] @@ -1470,9 +1447,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.3" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b10af9f9f9f2134a42d3f8aa74658660f2e0234b0eb81bd171df8aa32779ed" +checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" dependencies = [ "const-oid", "zeroize", @@ -1480,13 +1457,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -1498,7 +1475,7 @@ dependencies = [ "darling 0.10.2", "derive_builder_core", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -1510,7 +1487,7 @@ checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -1522,7 +1499,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "rustc_version", "syn 1.0.109", ] @@ -1550,9 +1527,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "const-oid", @@ -1566,16 +1543,16 @@ version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ - "dirs-sys", + "dirs-sys 0.3.7", ] [[package]] name = "dirs" -version = "4.0.0" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ - "dirs-sys", + "dirs-sys 0.4.1", ] [[package]] @@ -1599,6 +1576,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -1612,14 +1601,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.3.0" -source = "git+https://github.com/sigp/discv5#f78d538ef8f3c3b3981cfbb8ce2ba3179295eeab" +version = "0.3.1" +source = "git+https://github.com/sigp/discv5#a9f1e99321aec746fb9d6e8df889aa515a5e1254" dependencies = [ "aes 0.7.5", "aes-gcm", "arrayvec", "delay_map", - "enr", + "enr 0.9.0", "fnv", "futures", "hashlink", @@ -1631,8 +1620,8 @@ dependencies = [ "parking_lot 0.11.2", "rand 0.8.5", "rlp", - "smallvec", - "socket2", + "smallvec 1.11.0", + "socket2 0.4.9", "tokio", "tracing", "tracing-subscriber", @@ -1647,8 +1636,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -1659,7 +1648,7 @@ checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872" dependencies = [ "cfg-if", "libc", - "socket2", + "socket2 0.4.9", "winapi", ] @@ -1671,27 +1660,28 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dunce" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd4b30a6560bbd9b4620f4de34c3f14f60848e58a9b7216801afcb4c7b31c3c" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "dyn-clone" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" +checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" [[package]] name = "ecdsa" -version = "0.16.6" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" +checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ "der", - "digest 0.10.6", + "digest 0.10.7", "elliptic-curve", "rfc6979", "signature", + "spki", ] [[package]] @@ -1706,27 +1696,27 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a" +checksum = "faa8e9049d5d72bfc12acbc05914731b5322f79b5e2f195e9f2d705fca22ab4c" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.6", + "sha2 0.10.7", "zeroize", ] [[package]] name = "educe" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0188e3c3ba8df5753894d54461f0e39bc91741dc5b22e1c46999ec2c71f4e4" +checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" dependencies = [ "enum-ordinalize", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -1750,19 +1740,19 @@ dependencies = [ [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ "base16ct", "crypto-bigint", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array", "group", @@ -1802,7 +1792,6 @@ checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" dependencies = [ "base64 0.13.1", "bytes", - "ed25519-dalek", "hex", "k256", "log", @@ -1814,6 +1803,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "enr" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" +dependencies = [ + "base64 0.21.2", + "bytes", + "ed25519-dalek", + "hex", + "k256", + "log", + "rand 0.8.5", + "rlp", + "serde", + "serde-hex", + "sha3", + "zeroize", +] + [[package]] name = "enum-as-inner" version = "0.3.4" @@ -1822,7 +1831,7 @@ checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -1834,40 +1843,39 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] [[package]] name = "enum-ordinalize" -version = "3.1.12" +version = "3.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bb1df8b45ecb7ffa78dca1c17a438fb193eb083db0b1b494d2a61bcb5096a" +checksum = "e4f76552f53cefc9a7f64987c3701b99d982f7690606fd67de1d09712fbf52f1" dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.31", - "rustc_version", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "enumn" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48016319042fb7c87b78d2993084a831793a897a5cd1a2a67cab9d1eeb4b7d76" +checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" @@ -1907,9 +1915,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "aes 0.8.2", + "aes 0.8.3", "ctr 0.9.2", - "digest 0.10.6", + "digest 0.10.7", "hex", "hmac", "pbkdf2 0.11.0", @@ -1917,7 +1925,7 @@ dependencies = [ "scrypt", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "thiserror", "uuid 0.8.2", @@ -2004,12 +2012,12 @@ dependencies = [ "hex", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "regex", "serde", "serde_json", - "syn 2.0.26", - "toml 0.7.5", + "syn 2.0.27", + "toml 0.7.6", "walkdir", ] @@ -2024,9 +2032,9 @@ dependencies = [ "ethers-core", "hex", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "serde_json", - "syn 2.0.26", + "syn 2.0.27", ] [[package]] @@ -2052,7 +2060,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.26", + "syn 2.0.27", "tempfile", "thiserror", "tiny-keccak", @@ -2109,9 +2117,9 @@ checksum = "b411b119f1cf0efb69e2190883dee731251882bb21270f893ee9513b3a697c48" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.0", + "base64 0.21.2", "bytes", - "enr", + "enr 0.8.1", "ethers-core", "futures-channel", "futures-core", @@ -2152,7 +2160,7 @@ dependencies = [ "ethers-core", "hex", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", "tracing", ] @@ -2216,6 +2224,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "fdlimit" version = "0.2.1" @@ -2267,9 +2281,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", @@ -2292,9 +2306,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -2376,8 +2390,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -2453,9 +2467,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", @@ -2476,9 +2490,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -2488,12 +2502,12 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +checksum = "aca8bbd8e0707c1887a8bbb7e6b40e228f251ff5d62c8220a4a7a53c73aff006" dependencies = [ - "aho-corasick 0.7.20", - "bstr 1.3.0", + "aho-corasick 1.0.2", + "bstr 1.6.0", "fnv", "log", "regex", @@ -2533,9 +2547,9 @@ dependencies = [ [[package]] name = "gloo-utils" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e8fc851e9c7b9852508bc6e3f690f452f474417e8545ec9857b7f7377036b5" +checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" dependencies = [ "js-sys", "serde", @@ -2557,9 +2571,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -2679,18 +2693,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -2728,16 +2733,16 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "home" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -2775,9 +2780,9 @@ dependencies = [ [[package]] name = "http-range-header" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" @@ -2793,9 +2798,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "human_bytes" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39b528196c838e8b3da8b665e08c30958a6f2ede91d79f2ffcd0d4664b9c64eb" +checksum = "27e2b089f28ad15597b48d8c0a8fe94eeb1c1cb26ca99b6f66ac9582ae10c5e6" [[package]] name = "humantime" @@ -2815,9 +2820,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -2830,7 +2835,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -2839,10 +2844,11 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", "log", @@ -2874,26 +2880,25 @@ checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2930,7 +2935,7 @@ dependencies = [ "icu_collections", "icu_properties", "icu_provider", - "smallvec", + "smallvec 1.11.0", "utf16_iter", "utf8_iter", "write16", @@ -2974,7 +2979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -2997,9 +3002,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3062,10 +3067,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.32", +] + [[package]] name = "indenter" version = "0.3.3" @@ -3132,32 +3156,32 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.3", "widestring", - "winapi", - "winreg", + "windows-sys 0.48.0", + "winreg 0.50.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "iri-string" @@ -3171,13 +3195,12 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", - "rustix 0.37.11", + "hermit-abi", + "rustix 0.38.4", "windows-sys 0.48.0", ] @@ -3201,15 +3224,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jemalloc-ctl" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +checksum = "7cffc705424a344c054e135d12ee591402f4539245e8bbd64e6c9eaa9458b63c" dependencies = [ "jemalloc-sys", "libc", @@ -3218,9 +3241,9 @@ dependencies = [ [[package]] name = "jemalloc-sys" -version = "0.5.3+5.3.0-patched" +version = "0.5.4+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9bd5d616ea7ed58b571b2e209a65759664d7fb021a0819d7a790afc67e47ca1" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" dependencies = [ "cc", "libc", @@ -3228,9 +3251,9 @@ dependencies = [ [[package]] name = "jemallocator" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" dependencies = [ "jemalloc-sys", "libc", @@ -3247,9 +3270,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -3349,7 +3372,7 @@ dependencies = [ "heck", "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -3412,11 +3435,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -3434,15 +3457,15 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", - "sha2 0.10.6", + "sha2 0.10.7", "signature", ] [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -3480,15 +3503,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.1.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - -[[package]] -name = "libm" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libproc" @@ -3510,15 +3527,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -3542,9 +3550,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "litemap" @@ -3579,9 +3587,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ "hashbrown 0.13.2", ] @@ -3625,14 +3633,20 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] name = "matches" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" @@ -3651,9 +3665,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -3665,8 +3679,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ "ahash 0.7.6", - "metrics-macros", - "portable-atomic", + "metrics-macros 0.6.0", + "portable-atomic 0.3.20", +] + +[[package]] +name = "metrics" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +dependencies = [ + "ahash 0.8.3", + "metrics-macros 0.7.0", + "portable-atomic 1.4.2", ] [[package]] @@ -3678,10 +3703,10 @@ dependencies = [ "hyper", "indexmap 1.9.3", "ipnet", - "metrics", + "metrics 0.20.1", "metrics-util", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "thiserror", "tokio", @@ -3695,19 +3720,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] +[[package]] +name = "metrics-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.32", + "syn 2.0.27", +] + [[package]] name = "metrics-process" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99eab79be9f7c18565e889d6eaed6f1ebdafb2b6a88aef446d2fee5e7796ed10" +checksum = "006271a8019ad7a9a28cfac2cc40e3ee104d54be763c4a0901e228a63f49d706" dependencies = [ "libproc", "mach2", - "metrics", + "metrics 0.21.1", "once_cell", "procfs", "rlimit", @@ -3725,11 +3761,11 @@ dependencies = [ "crossbeam-utils", "hashbrown 0.12.3", "indexmap 1.9.3", - "metrics", + "metrics 0.20.1", "num_cpus", "ordered-float", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "radix_trie", "sketches-ddsketch", @@ -3737,9 +3773,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -3759,9 +3795,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] @@ -3780,9 +3816,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ "cfg-if", "downcast", @@ -3795,13 +3831,13 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -3822,7 +3858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -3838,7 +3874,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec", + "smallvec 1.11.0", ] [[package]] @@ -3853,6 +3889,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + [[package]] name = "nom" version = "7.1.3" @@ -3881,9 +3923,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" dependencies = [ "num-bigint", "num-complex", @@ -3959,21 +4001,21 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", - "libm 0.2.6", + "libm", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi", "libc", ] @@ -3994,15 +4036,24 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", ] [[package]] name = "object" -version = "0.30.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] @@ -4013,7 +4064,7 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" dependencies = [ - "atomic-polyfill 1.0.2", + "atomic-polyfill 1.0.3", "critical-section", ] @@ -4050,7 +4101,7 @@ checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -4060,6 +4111,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "2.10.0" @@ -4069,37 +4126,12 @@ dependencies = [ "num-traits", ] -[[package]] -name = "os_str_bytes" -version = "6.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" - -[[package]] -name = "output_vt100" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" -dependencies = [ - "winapi", -] - [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm 0.1.4", -] - [[package]] name = "page_size" version = "0.4.2" @@ -4112,9 +4144,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.4.0" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -4127,13 +4159,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -4182,7 +4214,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec", + "smallvec 1.11.0", "winapi", ] @@ -4195,15 +4227,15 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.3.5", - "smallvec", + "smallvec 1.11.0", "windows-targets 0.48.1", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -4211,16 +4243,16 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "pbkdf2" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ca0b5a68607598bf3bad68f32227a8164f6254833f84eafaac409cd6746c31" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "hmac", ] @@ -4241,9 +4273,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pharos" @@ -4267,9 +4299,9 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1181c94580fa345f50f19d738aaa39c0ed30a600d95cb2d3e23f94266f14fbf" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared", "rand 0.8.5", @@ -4284,8 +4316,8 @@ dependencies = [ "phf_generator", "phf_shared", "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -4299,22 +4331,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -4341,9 +4373,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "plain_hasher" @@ -4362,9 +4394,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -4375,15 +4407,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -4408,15 +4440,24 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "0.3.19" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" +dependencies = [ + "portable-atomic 1.4.2", +] + +[[package]] +name = "portable-atomic" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" [[package]] name = "postcard" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa512cd0d087cc9f99ad30a1bf64795b67871edbead083ffc3a4dfafa59aa00" +checksum = "c9ee729232311d3cd113749948b689627618133b1c5012b77342c1950b25eaeb" dependencies = [ "cobs", "heapless", @@ -4425,9 +4466,9 @@ dependencies = [ [[package]] name = "pprof" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b90f8560ad8bd57b207b8293bc5226e48e89039a6e590c12a297d91b84c7e60" +checksum = "978385d59daf9269189d052ca8a84c1acfd0715c0599a5d5188d4acc078ca46a" dependencies = [ "backtrace", "cfg-if", @@ -4439,7 +4480,7 @@ dependencies = [ "nix", "once_cell", "parking_lot 0.12.1", - "smallvec", + "smallvec 1.11.0", "symbolic-demangle", "tempfile", "thiserror", @@ -4467,15 +4508,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", @@ -4483,24 +4524,22 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ - "ctor", "diff", - "output_vt100", "yansi", ] [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.26", + "syn 2.0.27", ] [[package]] @@ -4535,7 +4574,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", "version_check", ] @@ -4547,7 +4586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "version_check", ] @@ -4579,21 +4618,20 @@ dependencies = [ "byteorder", "hex", "lazy_static", - "rustix 0.36.11", + "rustix 0.36.15", ] [[package]] name = "proptest" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1b898011ce9595050a68e60f90bad083ff2987a695a42357134c8381fba70" +checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", "bitflags 1.3.2", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", @@ -4657,12 +4695,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.26.0" @@ -4683,9 +4715,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2 1.0.66", ] @@ -4771,7 +4803,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -4847,20 +4879,21 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ - "aho-corasick 1.0.1", + "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.1", + "regex-automata 0.3.4", + "regex-syntax 0.7.4", ] [[package]] @@ -4872,6 +4905,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +dependencies = [ + "aho-corasick 1.0.2", + "memchr", + "regex-syntax 0.7.4", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -4880,9 +4924,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "regress" @@ -4900,7 +4944,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -4925,7 +4969,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.10.1", ] [[package]] @@ -4935,7 +4979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -5000,7 +5044,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.7.5", + "toml 0.7.6", "tracing", "tui", "vergen", @@ -5077,7 +5121,7 @@ dependencies = [ "aquamarine", "assert_matches", "linked_hash_set", - "lru 0.10.0", + "lru 0.10.1", "parking_lot 0.12.1", "reth-db", "reth-interfaces", @@ -5178,7 +5222,7 @@ name = "reth-discv4" version = "0.1.0-alpha.4" dependencies = [ "discv5", - "enr", + "enr 0.8.1", "generic-array", "hex", "rand 0.8.5", @@ -5202,7 +5246,7 @@ version = "0.1.0-alpha.4" dependencies = [ "async-trait", "data-encoding", - "enr", + "enr 0.8.1", "linked_hash_set", "parking_lot 0.12.1", "reth-net-common", @@ -5249,12 +5293,12 @@ dependencies = [ name = "reth-ecies" version = "0.1.0-alpha.4" dependencies = [ - "aes 0.8.2", + "aes 0.8.3", "block-padding", "byteorder", - "cipher 0.4.3", + "cipher 0.4.4", "ctr 0.9.2", - "digest 0.10.6", + "digest 0.10.7", "educe", "futures", "generic-array", @@ -5266,7 +5310,7 @@ dependencies = [ "reth-primitives", "reth-rlp", "secp256k1", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "thiserror", "tokio", @@ -5390,7 +5434,7 @@ name = "reth-metrics" version = "0.1.0-alpha.4" dependencies = [ "futures", - "metrics", + "metrics 0.20.1", "reth-metrics-derive", "tokio", ] @@ -5399,13 +5443,13 @@ dependencies = [ name = "reth-metrics-derive" version = "0.1.0-alpha.4" dependencies = [ - "metrics", + "metrics 0.20.1", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "regex", "serial_test 0.10.0", - "syn 2.0.26", + "syn 2.0.27", "trybuild", ] @@ -5439,7 +5483,7 @@ dependencies = [ "aquamarine", "async-trait", "auto_impl", - "enr", + "enr 0.8.1", "ethers-core", "ethers-middleware", "ethers-providers", @@ -5507,7 +5551,7 @@ dependencies = [ "reth-rlp", "reth-rpc-types", "revm-primitives", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", "tokio", "tokio-stream", @@ -5555,11 +5599,11 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", - "toml 0.7.5", + "toml 0.7.6", "tracing", "triehash", "url", - "zstd 0.12.3+zstd.1.5.2", + "zstd", ] [[package]] @@ -5662,8 +5706,8 @@ name = "reth-rlp-derive" version = "0.1.0-alpha.4" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -5944,7 +5988,7 @@ dependencies = [ "revm-primitives", "ripemd", "secp256k1", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "substrate-bn", ] @@ -6013,7 +6057,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -6043,7 +6087,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -6070,9 +6114,9 @@ checksum = "e666a5496a0b2186dbcd0ff6106e29e093c15591bde62c20d3842007c6978a09" [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -6097,12 +6141,12 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.11" +version = "0.36.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" +checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ "bitflags 1.3.2", - "errno 0.2.8", + "errno 0.3.1", "io-lifetimes", "libc", "linux-raw-sys 0.1.4", @@ -6111,35 +6155,34 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.11" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85597d61f83914ddeba6a47b3b8ffe7365107221c2e557ed94426489fefb5f77" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.3", "errno 0.3.1", - "io-lifetimes", "libc", - "linux-raw-sys 0.3.1", + "linux-raw-sys 0.4.3", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.2", "sct", ] [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -6149,11 +6192,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -6166,6 +6209,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.101.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.14" @@ -6179,16 +6232,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "ryu-js" @@ -6202,7 +6255,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" dependencies = [ - "cipher 0.4.3", + "cipher 0.4.4", ] [[package]] @@ -6216,9 +6269,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.5.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" +checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" dependencies = [ "cfg-if", "derive_more", @@ -6228,23 +6281,23 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.5.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" +checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -6260,15 +6313,9 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scratch" -version = "1.0.4" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5e082f6ea090deaf0e6dd04b68360fd5cddb152af6ce8927c9d25db299f98c" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" @@ -6279,7 +6326,7 @@ dependencies = [ "hmac", "pbkdf2 0.11.0", "salsa20", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -6294,9 +6341,9 @@ dependencies = [ [[package]] name = "sec1" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct", "der", @@ -6328,9 +6375,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -6341,9 +6388,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -6351,9 +6398,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -6372,29 +6419,40 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.171" +version = "1.0.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +checksum = "60363bdd39a7be0266a520dab25fdc9241d2f987b08a01e01f0ec6d06a981348" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-hex" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" +dependencies = [ + "array-init", + "serde", + "smallvec 0.6.14", +] + [[package]] name = "serde_derive" -version = "1.0.171" +version = "1.0.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +checksum = "f28482318d6641454cb273da158647922d1be6b5a2fcc6165cd89ebdd7ed576b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "serde_json" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -6424,9 +6482,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.2.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d904179146de381af4c93d3af6ca4984b3152db687dacb9c3c35e86f39809c" +checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" dependencies = [ "base64 0.13.1", "chrono", @@ -6440,14 +6498,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.2.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" +checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ - "darling 0.14.3", + "darling 0.20.3", "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -6485,7 +6543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", ] @@ -6496,8 +6554,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -6521,7 +6579,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -6532,7 +6590,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -6550,22 +6608,22 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "sha3" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -6580,9 +6638,9 @@ dependencies = [ [[package]] name = "shellexpand" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd1c7ddea665294d484c39fd0c0d2b7e35bbfe10035c5fe1854741a57f6880e1" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" dependencies = [ "dirs", ] @@ -6595,9 +6653,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b824b6e687aff278cdbf3b36f07aa52d4bd4099699324d5da86a2ebce3aa00b3" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", @@ -6629,7 +6687,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -6673,9 +6731,9 @@ checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "sketches-ddsketch" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" @@ -6686,6 +6744,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallvec" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + [[package]] name = "smallvec" version = "1.11.0" @@ -6717,6 +6784,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -6750,9 +6827,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a5be806ab6f127c3da44b7378837ebf01dadca8510a0e572460216b228bd0e" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", "der", @@ -6817,7 +6894,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "rustversion", "syn 1.0.109", ] @@ -6830,9 +6907,9 @@ checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "rustversion", - "syn 2.0.26", + "syn 2.0.27", ] [[package]] @@ -6875,21 +6952,21 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.2.0" +version = "12.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f7afd8bcd36190409e6b71d89928f7f09d918a7aa3460d847bc49a538d672e" +checksum = "167a4ffd7c35c143fd1030aa3c2caf76ba42220bd5a6b5f4781896434723b8c3" dependencies = [ "debugid", "memmap2", "stable_deref_trait", - "uuid 1.3.0", + "uuid 1.4.1", ] [[package]] name = "symbolic-demangle" -version = "12.2.0" +version = "12.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec64922563a36e3fe686b6d99f06f25dacad2a202ac7502ed642930a188fb20a" +checksum = "e378c50e80686c1c5c205674e1f86a2858bec3d2a7dfdd690331a8a19330f293" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -6914,18 +6991,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.26" +version = "2.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" +checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "unicode-ident", ] @@ -6936,7 +7013,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -6948,8 +7025,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", "unicode-xid 0.2.4", ] @@ -6961,15 +7038,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.37.11", - "windows-sys 0.45.0", + "rustix 0.38.4", + "windows-sys 0.48.0", ] [[package]] @@ -6983,15 +7060,15 @@ dependencies = [ [[package]] name = "termtree" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7bb2f404d5d20140588fb209481f5841920a7e29c36124f3d1ac1041eb1842c" +checksum = "954c28be70cafa409ddf615dd3c14a62478d38b44a94ade29469e82258cd5229" dependencies = [ "serde", "test-fuzz-internal", @@ -7001,40 +7078,40 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "385624eb0031d550fe1bf99c08af79b838605fc4fcec2c4d55e229a2c342fdd0" +checksum = "0f0528a7ad0bc85f826aa831434a37833aea622a5ae155f5b5dd431b25244213" dependencies = [ "cargo_metadata", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "serde", - "strum_macros 0.24.3", + "strum_macros 0.25.1", ] [[package]] name = "test-fuzz-macro" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69247423e2d89bd51160e42200f6f45f921a23e5b44a0e5b57b888a378334037" +checksum = "646c4ee44c47ae7888e88fb8441c857bc21136fa2c7d6a0a766b0caf609b35e8" dependencies = [ - "darling 0.20.1", + "darling 0.20.3", "if_chain", "itertools 0.10.5", "lazy_static", "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "subprocess", - "syn 2.0.26", + "syn 2.0.27", "test-fuzz-internal", "toolchain_find", ] [[package]] name = "test-fuzz-runtime" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5054a92d02b1a95a0120155d20aef49b5c5673ba8a65d6f4ce667c2a6f3146c" +checksum = "491cf4aba9f04a5fa35cd2f69f73e4f390779c864d20a96c357c3be16eb7d501" dependencies = [ "bincode", "hex", @@ -7052,22 +7129,22 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -7082,11 +7159,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", + "libc", + "num_threads", "serde", "time-core", "time-macros", @@ -7094,15 +7173,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] @@ -7153,11 +7232,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.2" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", + "backtrace", "bytes", "libc", "mio", @@ -7165,7 +7245,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.48.0", ] @@ -7177,15 +7257,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", @@ -7193,9 +7273,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -7217,9 +7297,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -7242,9 +7322,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -7263,9 +7343,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.11" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -7310,13 +7390,13 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" +checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ "async-compression", - "base64 0.20.0", - "bitflags 1.3.2", + "base64 0.21.2", + "bitflags 2.3.3", "bytes", "futures-core", "futures-util", @@ -7335,7 +7415,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", - "uuid 1.3.0", + "uuid 1.4.1", ] [[package]] @@ -7376,20 +7456,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -7431,16 +7511,16 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec", + "smallvec 1.11.0", "thread_local", "tracing", "tracing-core", @@ -7495,7 +7575,7 @@ dependencies = [ "lazy_static", "log", "rand 0.8.5", - "smallvec", + "smallvec 1.11.0", "thiserror", "tinyvec", "tokio", @@ -7519,7 +7599,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec", + "smallvec 1.11.0", "thiserror", "tinyvec", "tokio", @@ -7540,7 +7620,7 @@ dependencies = [ "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec", + "smallvec 1.11.0", "thiserror", "tokio", "tracing", @@ -7555,9 +7635,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.78" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223fc354447478d08231355617eb8c37affad0e83d33aeac30a8c275786b905a" +checksum = "a84e0202ea606ba5ebee8507ab2bfbe89b98551ed9b8f0be198109275cff284b" dependencies = [ "basic-toml", "glob", @@ -7635,9 +7715,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -7696,12 +7776,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -7723,23 +7803,29 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64a8922555b9500e3d865caed19330172cd67cbf82203f1a3311d8c305cc9f33" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "serde", ] [[package]] name = "uuid" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -7750,9 +7836,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vergen" -version = "8.1.3" +version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e03272e388fb78fc79481a493424f78d77be1d55f21bcd314b5a6716e195afe" +checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "rustversion", @@ -7786,11 +7872,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -7814,9 +7899,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7824,24 +7909,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -7851,38 +7936,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.31", + "quote 1.0.32", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 1.0.109", + "quote 1.0.32", + "syn 2.0.27", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -7890,18 +7975,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.100.1", ] [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wildmatch" @@ -7949,21 +8034,6 @@ dependencies = [ "windows-targets 0.48.1", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -8098,9 +8168,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.7" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" +checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11" dependencies = [ "memchr", ] @@ -8114,6 +8184,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -8156,9 +8236,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" +checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" [[package]] name = "xmltree" @@ -8194,7 +8274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8215,7 +8295,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8236,8 +8316,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", + "quote 1.0.32", + "syn 2.0.27", ] [[package]] @@ -8258,44 +8338,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.31", + "quote 1.0.32", "syn 1.0.109", "synstructure 0.12.6", ] [[package]] name = "zstd" -version = "0.11.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" -dependencies = [ - "zstd-safe 5.0.2+zstd.1.5.2", -] - -[[package]] -name = "zstd" -version = "0.12.3+zstd.1.5.2" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zstd-safe 6.0.5+zstd.1.5.4", -] - -[[package]] -name = "zstd-safe" -version = "5.0.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" -dependencies = [ - "libc", - "zstd-sys", + "zstd-safe", ] [[package]] name = "zstd-safe" -version = "6.0.5+zstd.1.5.4" +version = "6.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" dependencies = [ "libc", "zstd-sys", From 73652ed3bc7be1c117301a886572d6ee3677092e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 13:36:12 +0200 Subject: [PATCH 291/722] chore: bump jsonrpsee 0.19 (#3989) --- Cargo.lock | 59 +++++++++++-------------- Cargo.toml | 5 +++ crates/rpc/ipc/Cargo.toml | 2 +- crates/rpc/ipc/src/server/ipc.rs | 12 +++-- crates/rpc/rpc-api/Cargo.toml | 2 +- crates/rpc/rpc-builder/Cargo.toml | 2 +- crates/rpc/rpc-builder/src/auth.rs | 4 +- crates/rpc/rpc-builder/src/lib.rs | 12 ++--- crates/rpc/rpc-builder/src/metrics.rs | 9 ++-- crates/rpc/rpc-engine-api/Cargo.toml | 4 +- crates/rpc/rpc-testing-util/Cargo.toml | 2 +- crates/rpc/rpc-types/Cargo.toml | 2 +- crates/rpc/rpc/Cargo.toml | 4 +- crates/rpc/rpc/src/layers/auth_layer.rs | 2 +- 14 files changed, 63 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca6c689e2304..c840411054b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2515,14 +2515,15 @@ dependencies = [ [[package]] name = "gloo-net" -version = "0.2.6" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" +checksum = "a66b4e3c7d9ed8d315fd6b97c8b1f74a7c6ecbbc2320e65ae7ed38b7068cc620" dependencies = [ "futures-channel", "futures-core", "futures-sink", "gloo-utils", + "http", "js-sys", "pin-project", "serde", @@ -3279,9 +3280,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1822d18e4384a5e79d94dc9e4d1239cfa9fad24e55b44d2efeff5b394c9fece4" +checksum = "e5f3783308bddc49d0218307f66a09330c106fbd792c58bac5c8dc294fdd0f98" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3296,9 +3297,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11aa5766d5c430b89cb26a99b88f3245eb91534be8126102cea9e45ee3891b22" +checksum = "abc5630e4fa0096f00ec7b44d520701fda4504170cb85e22dca603ae5d7ad0d7" dependencies = [ "futures-channel", "futures-util", @@ -3318,9 +3319,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c6832a55f662b5a6ecc844db24b8b9c387453f923de863062c60ce33d62b81" +checksum = "5aaa4c4d5fb801dcc316d81f76422db259809037a86b3194ae538dd026b05ed7" dependencies = [ "anyhow", "async-lock", @@ -3346,9 +3347,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1705c65069729e3dccff6fd91ee431d5d31cabcf00ce68a62a2c6435ac713af9" +checksum = "aa7165efcbfbc951d180162ff28fe91b657ed81925e37a35e4a396ce12109f96" dependencies = [ "async-trait", "hyper", @@ -3365,9 +3366,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" +checksum = "21dc12b1d4f16a86e8c522823c4fab219c88c03eb7c924ec0501a64bf12e058b" dependencies = [ "heck", "proc-macro-crate", @@ -3378,9 +3379,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f06661d1a6b6e5b85469dc9c29acfbb9b3bb613797a6fd10a3ebb8a70754057" +checksum = "6e79d78cfd5abd8394da10753723093c3ff64391602941c9c4b1d80a3414fd53" dependencies = [ "futures-util", "hyper", @@ -3398,9 +3399,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e5bf6c75ce2a4217421154adfc65a24d2b46e77286e59bba5d9fa6544ccc8f4" +checksum = "00aa7cc87bc42e04e26c8ac3e7186142f7fd2949c763d9b6a7e64a69672d8fb2" dependencies = [ "anyhow", "beef", @@ -3412,9 +3413,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e6ea7c6d862e60f8baebd946c037b70c6808a4e4e31e792a4029184e3ce13a" +checksum = "0fe953c2801356f214d3f4051f786b3d11134512a46763ee8c39a9e3fa2cc1c0" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3423,9 +3424,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64b2589680ba1ad7863f279cd2d5083c1dc0a7c0ea959d22924553050f8ab9f" +checksum = "5c71b2597ec1c958c6d5bc94bb61b44d74eb28e69dc421731ab0035706f13882" dependencies = [ "http", "jsonrpsee-client-transport", @@ -6174,7 +6175,7 @@ checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki 0.101.2", + "rustls-webpki", "sct", ] @@ -6199,16 +6200,6 @@ dependencies = [ "base64 0.21.2", ] -[[package]] -name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.101.2" @@ -7975,11 +7966,11 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki 0.100.1", + "rustls-webpki", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ee547d2256a4..e9e9ba79cb22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,5 +133,10 @@ futures = "0.3.26" pin-project = "1.0.12" futures-util = "0.3.25" +## json +jsonrpsee = { version = "0.19" } +jsonrpsee-core = { version = "0.19" } +jsonrpsee-types = { version = "0.19" } + ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index e78c8f5dee3e..0dc0bebee6da 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -23,7 +23,7 @@ pin-project.workspace = true tower = "0.4" # misc -jsonrpsee = { version = "0.18", features = ["server", "client"] } +jsonrpsee = { workspace = true, features = ["server", "client"] } serde_json.workspace = true tracing.workspace = true bytes.workspace = true diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index c03f3ca385a1..ff04d2c5e718 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -6,7 +6,7 @@ use jsonrpsee::{ tracing::{rx_log_from_json, tx_log_from_str}, JsonRawValue, }, - helpers::batch_response_error, + helpers::{batch_response_error, MethodResponseResult}, server::{ logger, logger::{Logger, TransportProtocol}, @@ -219,14 +219,20 @@ pub(crate) async fn execute_call( }; tx_log_from_str(&response.as_response().result, max_log_length); - logger.on_result(name, response.as_response().success, request_start, TransportProtocol::Http); + logger.on_result( + name, + response.as_response().success_or_error, + request_start, + TransportProtocol::Http, + ); response } #[instrument(name = "notification", fields(method = notif.method.as_ref()), skip(notif, max_log_length), level = "TRACE")] fn execute_notification(notif: Notif<'_>, max_log_length: u32) -> MethodResponse { rx_log_from_json(¬if, max_log_length); - let response = MethodResponse { result: String::new(), success: true }; + let response = + MethodResponse { result: String::new(), success_or_error: MethodResponseResult::Success }; tx_log_from_str(&response.result, max_log_length); response } diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 7b220ba11232..53a7676b1126 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -16,7 +16,7 @@ reth-primitives.workspace = true reth-rpc-types.workspace = true # misc -jsonrpsee = { version = "0.18", features = ["server", "macros"] } +jsonrpsee = { workspace = true, features = ["server", "macros"] } serde_json.workspace = true [features] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 7f8869e64de3..e7f7cf826d55 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -24,7 +24,7 @@ reth-transaction-pool.workspace = true reth-metrics = { workspace = true, features = ["common"] } # rpc/net -jsonrpsee = { version = "0.18", features = ["server"] } +jsonrpsee = { workspace = true, features = ["server"] } tower-http = { version = "0.4", features = ["full"] } tower = { version = "0.4", features = ["full"] } hyper = "0.14" diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index d7d14f963d1f..95e64b8fbfe2 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -115,7 +115,7 @@ where let local_addr = server.local_addr()?; - let handle = server.start(module)?; + let handle = server.start(module); Ok(AuthServerHandle { handle, local_addr, secret }) } @@ -154,7 +154,7 @@ impl AuthServerConfig { let local_addr = server.local_addr()?; - let handle = server.start(module.inner)?; + let handle = server.start(module.inner); Ok(AuthServerHandle { handle, local_addr, secret }) } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 44af9cb5e1e8..2ec4ea0a33c4 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1500,7 +1500,7 @@ impl WsHttpServers { config.ensure_ws_http_identical()?; if let Some(module) = http_module.or(ws_module) { - let handle = both.start(module).await?; + let handle = both.start(module).await; http_handle = Some(handle.clone()); ws_handle = Some(handle); } @@ -1509,12 +1509,12 @@ impl WsHttpServers { if let Some((server, module)) = http.and_then(|server| http_module.map(|module| (server, module))) { - http_handle = Some(server.start(module).await?); + http_handle = Some(server.start(module).await); } if let Some((server, module)) = ws.and_then(|server| ws_module.map(|module| (server, module))) { - ws_handle = Some(server.start(module).await?); + ws_handle = Some(server.start(module).await); } } } @@ -1541,10 +1541,10 @@ enum WsHttpServerKind { impl WsHttpServerKind { /// Starts the server and returns the handle - async fn start(self, module: RpcModule<()>) -> Result { + async fn start(self, module: RpcModule<()>) -> ServerHandle { match self { - WsHttpServerKind::Plain(server) => Ok(server.start(module)?), - WsHttpServerKind::WithCors(server) => Ok(server.start(module)?), + WsHttpServerKind::Plain(server) => server.start(module), + WsHttpServerKind::WithCors(server) => server.start(module), } } diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index e40e2c232586..ee3ed62328a8 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -1,4 +1,7 @@ -use jsonrpsee::server::logger::{HttpRequest, Logger, MethodKind, Params, TransportProtocol}; +use jsonrpsee::{ + helpers::MethodResponseResult, + server::logger::{HttpRequest, Logger, MethodKind, Params, TransportProtocol}, +}; use reth_metrics::{ metrics::{self, Counter, Histogram}, Metrics, @@ -58,13 +61,13 @@ impl Logger for RpcServerMetrics { fn on_result( &self, _method_name: &str, - success: bool, + success: MethodResponseResult, started_at: Self::Instant, _transport: TransportProtocol, ) { // capture call duration self.call_latency.record(started_at.elapsed().as_millis() as f64); - if !success { + if success.is_error() { self.failed_calls.increment(1); } else { self.successful_calls.increment(1); diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index aa5157add0d0..8570e610f35b 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -25,8 +25,8 @@ tokio = { workspace = true, features = ["sync"] } # misc async-trait.workspace = true thiserror.workspace = true -jsonrpsee-types = "0.18" -jsonrpsee-core = "0.18" +jsonrpsee-types.workspace = true +jsonrpsee-core.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 93415adb6816..5d0d1a2b2cf2 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -21,7 +21,7 @@ async-trait.workspace = true futures.workspace = true # misc -jsonrpsee = { version = "0.18", features = ["client", "async-client"] } +jsonrpsee = { workspace = true, features = ["client", "async-client"] } serde_json.workspace = true [dev-dependencies] diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 0ebeeff0cefb..8947d325b217 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -22,7 +22,7 @@ thiserror.workspace = true itertools = "0.10" serde = { workspace = true, features = ["derive"] } serde_json.workspace = true -jsonrpsee-types = { version = "0.18" } +jsonrpsee-types.workspace = true [dev-dependencies] # misc diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index dfd86b271070..1791821f1ce3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -35,7 +35,7 @@ ethers-core = { workspace = true, features = ["eip712"] } revm-primitives = { workspace = true, features = ["serde"] } # rpc -jsonrpsee = { version = "0.18" } +jsonrpsee.workspace = true http = "0.2.8" http-body = "0.4.5" hyper = "0.14.24" @@ -63,7 +63,7 @@ schnellru = "0.2" futures.workspace = true [dev-dependencies] -jsonrpsee = { version = "0.18", features = ["client"] } +jsonrpsee = { workspace = true, features = ["client"] } assert_matches = "1.5.0" tempfile = "3.5.0" reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc/src/layers/auth_layer.rs index b3a92de804bf..0548320eb113 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc/src/layers/auth_layer.rs @@ -286,7 +286,7 @@ mod tests { let mut module = RpcModule::new(()); module.register_method("greet_melkor", |_, _| "You are the dark lord").unwrap(); - server.start(module).unwrap() + server.start(module) } fn to_u64(time: SystemTime) -> u64 { From ed3d7366a0f994df13436620788091152b61be3d Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 31 Jul 2023 12:34:14 +0100 Subject: [PATCH 292/722] docs: add additional documentation to `TxType` (#3996) --- crates/primitives/src/transaction/tx_type.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 64788e46568c..822965a18f11 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -17,6 +17,12 @@ pub const EIP1559_TX_TYPE_ID: u8 = 2; pub(crate) const EIP4844_TX_TYPE_ID: u8 = 3; /// Transaction Type +/// +/// Currently being used as 2-bit type when encoding it to [`Compact`] on +/// [`crate::TransactionSignedNoHash`]. Adding more transaction types will break the codec and +/// database format. +/// +/// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). #[derive_arbitrary(compact)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize)] pub enum TxType { From e2142481c01cf6b269580e4e4d4c8f8734d4bc54 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 13:53:24 +0200 Subject: [PATCH 293/722] chore(deps): bump itertools (#3998) --- Cargo.lock | 10 +++++----- Cargo.toml | 1 + crates/net/downloaders/Cargo.toml | 4 ++-- crates/prune/Cargo.toml | 4 ++-- crates/rpc/rpc-types/Cargo.toml | 2 +- crates/stages/Cargo.toml | 4 ++-- crates/storage/provider/Cargo.toml | 2 +- 7 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c840411054b0..5ba8118176f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5272,7 +5272,7 @@ dependencies = [ "assert_matches", "futures", "futures-util", - "itertools 0.10.5", + "itertools 0.11.0", "pin-project", "rayon", "reth-db", @@ -5613,7 +5613,7 @@ version = "0.1.0-alpha.4" dependencies = [ "auto_impl", "derive_more", - "itertools 0.10.5", + "itertools 0.11.0", "parking_lot 0.12.1", "pin-project", "reth-db", @@ -5633,7 +5633,7 @@ name = "reth-prune" version = "0.1.0-alpha.4" dependencies = [ "assert_matches", - "itertools 0.10.5", + "itertools 0.11.0", "rayon", "reth-db", "reth-interfaces", @@ -5839,7 +5839,7 @@ dependencies = [ name = "reth-rpc-types" version = "0.1.0-alpha.4" dependencies = [ - "itertools 0.10.5", + "itertools 0.11.0", "jsonrpsee-types", "rand 0.8.5", "reth-primitives", @@ -5859,7 +5859,7 @@ dependencies = [ "async-trait", "criterion", "futures-util", - "itertools 0.10.5", + "itertools 0.11.0", "num-traits", "paste", "pin-project", diff --git a/Cargo.toml b/Cargo.toml index e9e9ba79cb22..738636e6b428 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,7 @@ serde = { version = "1.0", default-features = false } rand = "0.8.5" strum = "0.25" rayon = "1.7" +itertools = "0.11" ### proc-macros proc-macro2 = "1.0" diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index b5ca3d6b5b02..281dbfc6c784 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -32,7 +32,7 @@ thiserror.workspace = true # optional deps for the test-utils feature reth-rlp = { workspace = true, optional = true } tempfile = { version = "3.3", optional = true } -itertools = { version = "0.10", optional = true } +itertools = { workspace = true, optional = true } [dev-dependencies] reth-db = { path = "../../storage/db", features = ["test-utils"] } @@ -42,7 +42,7 @@ reth-tracing = { path = "../../tracing" } assert_matches = "1.5.0" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-rlp.workspace = true -itertools = "0.10" +itertools.workspace = true tempfile = "3.3" diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 13d5ae4f9169..99a4901cb133 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -20,8 +20,8 @@ reth-interfaces.workspace = true # misc tracing.workspace = true thiserror.workspace = true -itertools = "0.10" -rayon = "1.6.0" +itertools.workspace = true +rayon.workspace = true [dev-dependencies] # reth diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 8947d325b217..4915aa98d5a6 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -19,7 +19,7 @@ reth-rlp.workspace = true thiserror.workspace = true # misc -itertools = "0.10" +itertools.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true jsonrpsee-types.workspace = true diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 9d975e28e8ca..960ca15aa572 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -40,7 +40,7 @@ serde.workspace = true # misc thiserror.workspace = true aquamarine = "0.3.0" -itertools = "0.10.5" +itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" @@ -56,7 +56,7 @@ reth-rlp.workspace = true reth-revm = { path = "../revm" } reth-trie = { path = "../trie", features = ["test-utils"] } -itertools = "0.10.5" +itertools.workspace = true tokio = { workspace = true, features = ["rt", "sync", "macros"] } assert_matches = "1.5.0" rand.workspace = true diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 25b1fd3ccddd..62ceb6fa182a 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -25,7 +25,7 @@ tracing.workspace = true # misc auto_impl = "1.0" -itertools = "0.10" +itertools.workspace = true pin-project.workspace = true derive_more = "0.99" parking_lot = "0.12" From 922d35df0d28e29f05b980479da285a7bb91ad1f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 14:35:39 +0200 Subject: [PATCH 294/722] chore(deps): make jsonrpsee types a feature (#3999) --- crates/rpc/rpc-types/Cargo.toml | 5 ++++- crates/rpc/rpc-types/src/eth/block.rs | 5 +++-- .../rpc-types/src/eth/engine/forkchoice.rs | 1 + crates/rpc/rpc-types/src/eth/filter.rs | 19 ++++++++++--------- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 4915aa98d5a6..31a09d60b5f4 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -22,7 +22,10 @@ thiserror.workspace = true itertools.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true -jsonrpsee-types.workspace = true +jsonrpsee-types = { workspace = true, optional = true } + +[features] +default = ["jsonrpsee-types"] [dev-dependencies] # misc diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index a246baf20a7d..5b07b300878f 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -379,7 +379,6 @@ pub struct BlockOverrides { #[cfg(test)] mod tests { use super::*; - use jsonrpsee_types::SubscriptionResponse; #[test] fn test_full_conversion() { @@ -391,7 +390,9 @@ mod tests { } #[test] - fn serde_header() { + #[cfg(feature = "jsonrpsee-types")] + fn serde_json_header() { + use jsonrpsee_types::SubscriptionResponse; let resp = r#"{"jsonrpc":"2.0","method":"eth_subscribe","params":{"subscription":"0x7eef37ff35d471f8825b1c8f67a5d3c0","result":{"hash":"0x7a7ada12e140961a32395059597764416499f4178daf1917193fad7bd2cc6386","parentHash":"0xdedbd831f496e705e7f2ec3c8dcb79051040a360bf1455dbd7eb8ea6ad03b751","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","number":"0x8","gasUsed":"0x0","gasLimit":"0x1c9c380","extraData":"0x","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x642aa48f","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000"}}}"#; let _header: SubscriptionResponse<'_, Header> = serde_json::from_str(resp).unwrap(); diff --git a/crates/rpc/rpc-types/src/eth/engine/forkchoice.rs b/crates/rpc/rpc-types/src/eth/engine/forkchoice.rs index ea215df5c167..f1c2ca4a79c7 100644 --- a/crates/rpc/rpc-types/src/eth/engine/forkchoice.rs +++ b/crates/rpc/rpc-types/src/eth/engine/forkchoice.rs @@ -50,6 +50,7 @@ pub enum ForkchoiceUpdateError { UnknownFinalBlock, } +#[cfg(feature = "jsonrpsee-types")] impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(value: ForkchoiceUpdateError) -> Self { match value { diff --git a/crates/rpc/rpc-types/src/eth/filter.rs b/crates/rpc/rpc-types/src/eth/filter.rs index 29ca1f4124d7..bfe92fbe0703 100644 --- a/crates/rpc/rpc-types/src/eth/filter.rs +++ b/crates/rpc/rpc-types/src/eth/filter.rs @@ -1,6 +1,5 @@ use crate::Log as RpcLog; use itertools::{EitherOrBoth::*, Itertools}; -use jsonrpsee_types::SubscriptionId; use reth_primitives::{ bloom::{Bloom, Input}, keccak256, Address, BlockNumberOrTag, Log, H160, H256, U256, U64, @@ -874,7 +873,7 @@ impl<'de> Deserialize<'de> for FilterChanges { } } -/// Owned equivalent of [SubscriptionId] +/// Owned equivalent of a `SubscriptionId` #[derive(Debug, PartialEq, Clone, Hash, Eq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] #[serde(untagged)] @@ -885,20 +884,22 @@ pub enum FilterId { Str(String), } -impl From for SubscriptionId<'_> { +#[cfg(feature = "jsonrpsee-types")] +impl From for jsonrpsee_types::SubscriptionId<'_> { fn from(value: FilterId) -> Self { match value { - FilterId::Num(n) => SubscriptionId::Num(n), - FilterId::Str(s) => SubscriptionId::Str(s.into()), + FilterId::Num(n) => jsonrpsee_types::SubscriptionId::Num(n), + FilterId::Str(s) => jsonrpsee_types::SubscriptionId::Str(s.into()), } } } -impl From> for FilterId { - fn from(value: SubscriptionId<'_>) -> Self { +#[cfg(feature = "jsonrpsee-types")] +impl From> for FilterId { + fn from(value: jsonrpsee_types::SubscriptionId<'_>) -> Self { match value { - SubscriptionId::Num(n) => FilterId::Num(n), - SubscriptionId::Str(s) => FilterId::Str(s.into_owned()), + jsonrpsee_types::SubscriptionId::Num(n) => FilterId::Num(n), + jsonrpsee_types::SubscriptionId::Str(s) => FilterId::Str(s.into_owned()), } } } From 56b2ef19b529d5099c0981ad48256a2efe289164 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:46:35 +0200 Subject: [PATCH 295/722] ci: add weekly cargo update workflow (#3986) --- .github/workflows/dependencies.yml | 61 ++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/dependencies.yml diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml new file mode 100644 index 000000000000..e731a7a56923 --- /dev/null +++ b/.github/workflows/dependencies.yml @@ -0,0 +1,61 @@ +# Automatically run `cargo update` periodically + +name: Update Dependencies + +on: + schedule: + # Run weekly + - cron: "0 0 * * SUN" + workflow_dispatch: + # Needed so we can run it manually + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: cargo-update + TITLE: "chore(deps): weekly `cargo update`" + BODY: | + Automation to keep dependencies in `Cargo.lock` current. + +
cargo update log +

+ + ```log + $cargo_update_log + ``` + +

+
+ +jobs: + update: + name: Update + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@nightly + + - name: cargo update + # Remove first line that always just says "Updating crates.io index" + run: cargo update --color never 2>&1 | sed '/crates.io index/d' | tee -a cargo_update.log + + - name: craft commit message and PR body + id: msg + run: | + export cargo_update_log="$(cat cargo_update.log)" + + echo "commit_message<> $GITHUB_OUTPUT + printf "$TITLE\n\n$cargo_update_log\n" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "body<> $GITHUB_OUTPUT + echo "$BODY" | envsubst >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + add-paths: ./Cargo.lock + commit-message: ${{ steps.msg.outputs.commit_message }} + title: ${{ env.TITLE }} + body: ${{ steps.msg.outputs.body }} + branch: ${{ env.BRANCH }} From da3bc64fb423a4d02cd61b14756e23aab13498c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 15:01:58 +0200 Subject: [PATCH 296/722] fix: prevent child call out of bounds (#3920) --- crates/revm/revm-inspectors/src/tracing/mod.rs | 1 - crates/revm/revm-inspectors/src/tracing/types.rs | 13 +++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index fe6c5b8609d4..de8a3b3b24a9 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -395,7 +395,6 @@ where if self.config.record_steps { self.gas_inspector.step_end(interp, data, is_static, eval); self.fill_step_on_step_end(interp, data, eval); - return eval } InstructionResult::Continue } diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index a7cb9ef4aede..83c720dd2815 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -267,9 +267,12 @@ impl CallTraceNode { opcode::CALL | opcode::STATICCALL | opcode::CALLCODE => { - let call_id = self.children[child_id]; - item.call_child_id = Some(call_id); - child_id += 1; + // The opcode of this step is a call but it's possible that this step resulted + // in a revert or out of gas error in which case there's no actual child call executed and recorded: + if let Some(call_id) = self.children.get(child_id).copied() { + item.call_child_id = Some(call_id); + child_id += 1; + } } _ => {} } @@ -532,7 +535,9 @@ pub(crate) struct CallTraceStep { pub(crate) gas_cost: u64, /// Change of the contract state after step execution (effect of the SLOAD/SSTORE instructions) pub(crate) storage_change: Option, - /// Final status of the call + /// Final status of the step + /// + /// This is set after the step was executed. pub(crate) status: InstructionResult, } From 3118e27d9c60351c539c71e776cd35dd363f343a Mon Sep 17 00:00:00 2001 From: pistomat Date: Mon, 31 Jul 2023 15:29:29 +0200 Subject: [PATCH 297/722] fix(tracing): extend Parity tracing VmExecutedOperation (#3997) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 2 +- crates/rpc/rpc-types/src/eth/trace/parity.rs | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index cce86c43445c..7c3720621bc8 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -330,7 +330,7 @@ impl ParityTraceBuilder { let maybe_execution = Some(VmExecutedOperation { used: step.gas_cost, - push: step.new_stack.map(|new_stack| new_stack.into()), + push: step.new_stack.into_iter().map(|new_stack| new_stack.into()).collect(), mem: maybe_memory, store: maybe_storage, }); diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index f3fa4eb9b1f6..f99f97de6744 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -308,16 +308,16 @@ pub struct VmExecutedOperation { /// The total gas used. pub used: u64, /// The stack item placed, if any. - pub push: Option, + pub push: Vec, /// If altered, the memory delta. pub mem: Option, /// The altered storage value, if any. pub store: Option, } +/// A diff of some chunk of memory. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -/// A diff of some chunk of memory. pub struct MemoryDelta { /// Offset into memory the change begins. pub off: usize, @@ -325,6 +325,7 @@ pub struct MemoryDelta { pub data: Bytes, } +/// A diff of some storage value. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct StorageDelta { From 0b913e22656cf55cafac1f7372b1d87a44c4e950 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:31:43 +0100 Subject: [PATCH 298/722] fix(pipeline): clear `MerkleStage` checkpoints on invalid root (#3973) Co-authored-by: Bjerg Co-authored-by: Oliver Nordbjerg --- crates/stages/src/pipeline/mod.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index a108695cfade..d15fefacf679 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -431,6 +431,19 @@ where "Stage encountered a validation error: {error}" ); + // FIXME: When handling errors, we do not commit the database transaction. + // This leads to the Merkle stage not clearing its + // checkpoint, and restarting from an invalid place. + drop(provider_rw); + provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw + .save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; + provider_rw.save_stage_checkpoint( + StageId::MerkleExecute, + prev_checkpoint.unwrap_or_default(), + )?; + provider_rw.commit()?; + // We unwind because of a validation error. If the unwind itself fails, // we bail entirely, otherwise we restart the execution loop from the // beginning. From 5823255031bdd6fc7eb2cfc34c68f80eb6a0b36f Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 31 Jul 2023 15:40:14 +0200 Subject: [PATCH 299/722] feat: store logs in different folders based on the chain (#3948) Co-authored-by: Bjerg Co-authored-by: Oliver Nordbjerg --- bin/reth/src/cli.rs | 44 +++++++++++++++++++++++++++++++++++++++++++- bin/reth/src/dirs.rs | 5 +++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/bin/reth/src/cli.rs b/bin/reth/src/cli.rs index ce5d4a42ca7c..9423235ebbe7 100644 --- a/bin/reth/src/cli.rs +++ b/bin/reth/src/cli.rs @@ -1,5 +1,6 @@ //! CLI definition and entrypoint to executable use crate::{ + args::utils::genesis_value_parser, chain, config, db, debug_cmd, dirs::{LogsDir, PlatformPath}, node, p2p, @@ -8,11 +9,13 @@ use crate::{ version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{ArgAction, Args, Parser, Subcommand}; +use reth_primitives::ChainSpec; use reth_tracing::{ tracing::{metadata::LevelFilter, Level, Subscriber}, tracing_subscriber::{filter::Directive, registry::LookupSpan, EnvFilter}, BoxedLayer, FileWorkerGuard, }; +use std::sync::Arc; /// The main reth cli interface. /// @@ -24,6 +27,25 @@ pub struct Cli { #[clap(subcommand)] command: Commands, + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + /// + /// Built-in chains: + /// - mainnet + /// - goerli + /// - sepolia + #[arg( + long, + value_name = "CHAIN_OR_PATH", + global = true, + verbatim_doc_comment, + default_value = "mainnet", + value_parser = genesis_value_parser, + global = true, + )] + chain: Arc, + #[clap(flatten)] logs: Logs, @@ -33,7 +55,10 @@ pub struct Cli { impl Cli { /// Execute the configured cli command. - pub fn run(self) -> eyre::Result<()> { + pub fn run(mut self) -> eyre::Result<()> { + // add network name to logs dir + self.logs.log_directory = self.logs.log_directory.join(self.chain.chain.to_string()); + let _guard = self.init_tracing()?; let runner = CliRunner::default(); @@ -213,4 +238,21 @@ mod tests { assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); } } + + /// Tests that the log directory is parsed correctly. It's always tied to the specific chain's + /// name + #[test] + fn parse_logs_path() { + let mut reth = Cli::try_parse_from(["reth", "node", "--log.persistent"]).unwrap(); + reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); + let log_dir = reth.logs.log_directory; + assert!(log_dir.as_ref().ends_with("reth/logs/mainnet"), "{:?}", log_dir); + + let mut reth = + Cli::try_parse_from(["reth", "node", "--chain", "sepolia", "--log.persistent"]) + .unwrap(); + reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); + let log_dir = reth.logs.log_directory; + assert!(log_dir.as_ref().ends_with("reth/logs/sepolia"), "{:?}", log_dir); + } } diff --git a/bin/reth/src/dirs.rs b/bin/reth/src/dirs.rs index 796377b6a164..3305e8ab7138 100644 --- a/bin/reth/src/dirs.rs +++ b/bin/reth/src/dirs.rs @@ -171,6 +171,11 @@ impl PlatformPath { let platform_path = PlatformPath::(path, std::marker::PhantomData); ChainPath::new(platform_path, chain) } + + /// Map the inner path to a new type `T`. + pub fn map_to(&self) -> PlatformPath { + PlatformPath(self.0.clone(), std::marker::PhantomData) + } } /// An Optional wrapper type around [PlatformPath]. From 1ac2f15f1d55e73d238af515c7792d09ed9c3ad5 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 31 Jul 2023 09:40:50 -0400 Subject: [PATCH 300/722] feat: reth db diff (#3917) --- bin/reth/src/db/diff.rs | 410 ++++++++++++++++++ bin/reth/src/db/mod.rs | 8 + crates/primitives/src/prune/part.rs | 2 +- .../storage/db/src/tables/models/accounts.rs | 4 +- .../db/src/tables/models/sharded_key.rs | 12 + .../src/tables/models/storage_sharded_key.rs | 4 +- 6 files changed, 437 insertions(+), 3 deletions(-) create mode 100644 bin/reth/src/db/diff.rs diff --git a/bin/reth/src/db/diff.rs b/bin/reth/src/db/diff.rs new file mode 100644 index 000000000000..b7527b0ab188 --- /dev/null +++ b/bin/reth/src/db/diff.rs @@ -0,0 +1,410 @@ +use std::{ + collections::HashMap, + fmt::Debug, + fs::{self, File}, + hash::Hash, + io::Write, + path::{Path, PathBuf}, +}; + +use crate::{ + args::DatabaseArgs, + dirs::{DataDirPath, PlatformPath}, + utils::DbTool, +}; +use clap::Parser; + +use reth_db::{ + cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx, + AccountChangeSet, AccountHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, + BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnvRO, HashedAccount, HashedStorage, + HeaderNumbers, HeaderTD, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, + Receipts, StorageChangeSet, StorageHistory, StoragesTrie, SyncStage, SyncStageProgress, Tables, + TransactionBlock, Transactions, TxHashNumber, TxSenders, +}; +use tracing::info; + +#[derive(Parser, Debug)] +/// The arguments for the `reth db diff` command +pub struct Command { + /// The path to the data dir for all reth files and subdirectories. + #[arg(long, verbatim_doc_comment)] + secondary_datadir: PlatformPath, + + /// Arguments for the second database + #[clap(flatten)] + second_db: DatabaseArgs, + + /// The table name to diff. If not specified, all tables are diffed. + #[arg(long, verbatim_doc_comment)] + table: Option, + + /// The output directory for the diff report. + #[arg(long, verbatim_doc_comment)] + output: PlatformPath, +} + +impl Command { + /// Execute the `db diff` command. + /// + /// This first opens the `db/` folder from the secondary datadir, where the second database is + /// opened read-only. + /// + /// The tool will then iterate through all key-value pairs for the primary and secondary + /// databases. The value for each key will be compared with its corresponding value in the + /// other database. If the values are different, a discrepancy will be recorded in-memory. If + /// one key is present in one database but not the other, this will be recorded as an "extra + /// element" for that database. + /// + /// The discrepancies and extra elements, along with a brief summary of the diff results are + /// then written to a file in the output directory. + pub fn execute(self, tool: &DbTool<'_, DatabaseEnvRO>) -> eyre::Result<()> { + // open second db + let second_db_path: PathBuf = self.secondary_datadir.join("db").into(); + let second_db = open_db_read_only(&second_db_path, self.second_db.log_level)?; + + let tables = match self.table { + Some(table) => vec![table], + None => Tables::ALL.to_vec(), + }; + + for table in tables { + let primary_tx = tool.db.tx()?; + let secondary_tx = second_db.tx()?; + + let output_dir = self.output.clone(); + match table { + Tables::CanonicalHeaders => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::HeaderTD => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::HeaderNumbers => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::Headers => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::BlockBodyIndices => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::BlockOmmers => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::BlockWithdrawals => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::TransactionBlock => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::Transactions => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::TxHashNumber => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::Receipts => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::PlainAccountState => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::PlainStorageState => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::Bytecodes => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::AccountHistory => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::StorageHistory => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::AccountChangeSet => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::StorageChangeSet => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::HashedAccount => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::HashedStorage => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::AccountsTrie => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::StoragesTrie => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::TxSenders => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::SyncStage => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::SyncStageProgress => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::PruneCheckpoints => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + }; + } + + Ok(()) + } +} + +/// Find diffs for a table, then analyzing the result +fn find_diffs<'a, T: Table>( + primary_tx: impl DbTx<'a>, + secondary_tx: impl DbTx<'a>, + output_dir: impl AsRef, +) -> eyre::Result<()> +where + T::Key: Hash, + T::Value: PartialEq, +{ + let table_name = T::NAME; + + info!("Analyzing table {table_name}..."); + let result = find_diffs_advanced::(&primary_tx, &secondary_tx)?; + info!("Done analyzing table {table_name}!"); + + // Pretty info summary header: newline then header + info!(""); + info!("Diff results for {table_name}:"); + + // create directory and open file + fs::create_dir_all(output_dir.as_ref())?; + let file_name = format!("{table_name}.txt"); + let mut file = File::create(output_dir.as_ref().join(file_name.clone()))?; + + // analyze the result and print some stats + let discrepancies = result.discrepancies.len(); + let extra_elements = result.extra_elements.len(); + + // Make a pretty summary header for the table + writeln!(file, "Diff results for {table_name}")?; + + if discrepancies > 0 { + // write to file + writeln!(file, "Found {discrepancies} discrepancies in table {table_name}")?; + + // also print to info + info!("Found {discrepancies} discrepancies in table {table_name}"); + } else { + // write to file + writeln!(file, "No discrepancies found in table {table_name}")?; + + // also print to info + info!("No discrepancies found in table {table_name}"); + } + + if extra_elements > 0 { + // write to file + writeln!(file, "Found {extra_elements} extra elements in table {table_name}")?; + + // also print to info + info!("Found {extra_elements} extra elements in table {table_name}"); + } else { + writeln!(file, "No extra elements found in table {table_name}")?; + + // also print to info + info!("No extra elements found in table {table_name}"); + } + + info!("Writing diff results for {table_name} to {file_name}..."); + + if discrepancies > 0 { + writeln!(file, "Discrepancies:")?; + } + + for discrepancy in result.discrepancies.values() { + writeln!(file, "{discrepancy:?}")?; + } + + if extra_elements > 0 { + writeln!(file, "Extra elements:")?; + } + + for extra_element in result.extra_elements.values() { + writeln!(file, "{extra_element:?}")?; + } + + let full_file_name = output_dir.as_ref().join(file_name); + info!("Done writing diff results for {table_name} to {}", full_file_name.display()); + Ok(()) +} + +/// This diff algorithm is slightly different, it will walk _each_ table, cross-checking for the +/// element in the other table. +fn find_diffs_advanced<'a, T: Table>( + primary_tx: &impl DbTx<'a>, + secondary_tx: &impl DbTx<'a>, +) -> eyre::Result> +where + T::Value: PartialEq, + T::Key: Hash, +{ + // initialize the zipped walker + let mut primary_zip_cursor = + primary_tx.cursor_read::().expect("Was not able to obtain a cursor."); + let primary_walker = primary_zip_cursor.walk(None)?; + + let mut secondary_zip_cursor = + secondary_tx.cursor_read::().expect("Was not able to obtain a cursor."); + let secondary_walker = secondary_zip_cursor.walk(None)?; + let zipped_cursor = primary_walker.zip(secondary_walker); + + // initialize the cursors for seeking when we are cross checking elements + let mut primary_cursor = + primary_tx.cursor_read::().expect("Was not able to obtain a cursor."); + + let mut secondary_cursor = + secondary_tx.cursor_read::().expect("Was not able to obtain a cursor."); + + let mut result = TableDiffResult::::default(); + + // this loop will walk both tables, cross-checking for the element in the other table. + // it basically just loops through both tables at the same time. if the keys are different, it + // will check each key in the other table. if the keys are the same, it will compare the + // values + for (primary_entry, secondary_entry) in zipped_cursor { + let (primary_key, primary_value) = primary_entry?; + let (secondary_key, secondary_value) = secondary_entry?; + + if primary_key != secondary_key { + // if the keys are different, we need to check if the key is in the other table + let crossed_secondary = + secondary_cursor.seek_exact(primary_key.clone())?.map(|(_, value)| value); + result.try_push_discrepancy( + primary_key.clone(), + Some(primary_value), + crossed_secondary, + ); + + // now do the same for the primary table + let crossed_primary = + primary_cursor.seek_exact(secondary_key.clone())?.map(|(_, value)| value); + result.try_push_discrepancy( + secondary_key.clone(), + crossed_primary, + Some(secondary_value), + ); + } else { + // the keys are the same, so we need to compare the values + result.try_push_discrepancy(primary_key, Some(primary_value), Some(secondary_value)); + } + } + + Ok(result) +} + +/// Includes a table element between two databases with the same key, but different values +#[derive(Debug)] +struct TableDiffElement { + /// The key for the element + key: T::Key, + + /// The element from the first table + #[allow(dead_code)] + first: T::Value, + + /// The element from the second table + #[allow(dead_code)] + second: T::Value, +} + +/// The diff result for an entire table. If the tables had the same number of elements, there will +/// be no extra elements. +struct TableDiffResult +where + T::Key: Hash, +{ + /// All elements of the database that are different + discrepancies: HashMap>, + + /// Any extra elements, and the table they are in + extra_elements: HashMap>, +} + +impl Default for TableDiffResult +where + T: Table, + T::Key: Hash, +{ + fn default() -> Self { + Self { discrepancies: HashMap::new(), extra_elements: HashMap::new() } + } +} + +impl TableDiffResult +where + T::Key: Hash, +{ + /// Push a diff result into the discrepancies set. + fn push_discrepancy(&mut self, discrepancy: TableDiffElement) { + self.discrepancies.insert(discrepancy.key.clone(), discrepancy); + } + + /// Push an extra element into the extra elements set. + fn push_extra_element(&mut self, element: ExtraTableElement) { + self.extra_elements.insert(element.key().clone(), element); + } +} + +impl TableDiffResult +where + T: Table, + T::Key: Hash, + T::Value: PartialEq, +{ + /// Try to push a diff result into the discrepancy set, only pushing if the given elements are + /// different, and the discrepancy does not exist anywhere already. + fn try_push_discrepancy( + &mut self, + key: T::Key, + first: Option, + second: Option, + ) { + // do not bother comparing if the key is already in the discrepancies map + if self.discrepancies.contains_key(&key) { + return + } + + // do not bother comparing if the key is already in the extra elements map + if self.extra_elements.contains_key(&key) { + return + } + + match (first, second) { + (Some(first), Some(second)) => { + if first != second { + self.push_discrepancy(TableDiffElement { key, first, second }); + } + } + (Some(first), None) => { + self.push_extra_element(ExtraTableElement::First { key, value: first }); + } + (None, Some(second)) => { + self.push_extra_element(ExtraTableElement::Second { key, value: second }); + } + (None, None) => {} + } + } +} + +/// A single extra element from a table +#[derive(Debug)] +enum ExtraTableElement { + /// The extra element that is in the first table + #[allow(dead_code)] + First { key: T::Key, value: T::Value }, + + /// The extra element that is in the second table + #[allow(dead_code)] + Second { key: T::Key, value: T::Value }, +} + +impl ExtraTableElement { + /// Return the key for the extra element + fn key(&self) -> &T::Key { + match self { + Self::First { key, .. } => key, + Self::Second { key, .. } => key, + } + } +} diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 04b51a15462d..a56ff9d9a1ff 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -18,6 +18,7 @@ use reth_primitives::ChainSpec; use std::sync::Arc; mod clear; +mod diff; mod get; mod list; /// DB List TUI @@ -68,6 +69,8 @@ pub enum Subcommands { Stats, /// Lists the contents of a table List(list::Command), + /// Create a diff between two database tables or two entire databases. + Diff(diff::Command), /// Gets the content of a table for the given key Get(get::Command), /// Deletes all database entries @@ -165,6 +168,11 @@ impl Command { let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } + Subcommands::Diff(command) => { + let db = open_db_read_only(&db_path, self.db.log_level)?; + let tool = DbTool::new(&db, self.chain.clone())?; + command.execute(&tool)?; + } Subcommands::Get(command) => { let db = open_db_read_only(&db_path, self.db.log_level)?; let tool = DbTool::new(&db, self.chain.clone())?; diff --git a/crates/primitives/src/prune/part.rs b/crates/primitives/src/prune/part.rs index caa176b86a28..f47ea03d1b9f 100644 --- a/crates/primitives/src/prune/part.rs +++ b/crates/primitives/src/prune/part.rs @@ -2,7 +2,7 @@ use reth_codecs::{main_codec, Compact}; /// Part of the data that can be pruned. #[main_codec] -#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] pub enum PrunePart { /// Prune part responsible for the `TxSenders` table. SenderRecovery, diff --git a/crates/storage/db/src/tables/models/accounts.rs b/crates/storage/db/src/tables/models/accounts.rs index c1a50e95bc11..c82b4740901c 100644 --- a/crates/storage/db/src/tables/models/accounts.rs +++ b/crates/storage/db/src/tables/models/accounts.rs @@ -64,7 +64,9 @@ impl Compact for AccountBeforeTx { /// [`StorageChangeSet`](crate::tables::StorageChangeSet) /// /// Since it's used as a key, it isn't compressed when encoding it. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd)] +#[derive( + Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd, Hash, +)] pub struct BlockNumberAddress(pub (BlockNumber, Address)); impl BlockNumberAddress { diff --git a/crates/storage/db/src/tables/models/sharded_key.rs b/crates/storage/db/src/tables/models/sharded_key.rs index a38c3af3a315..5dedd349eb67 100644 --- a/crates/storage/db/src/tables/models/sharded_key.rs +++ b/crates/storage/db/src/tables/models/sharded_key.rs @@ -1,5 +1,7 @@ //! Sharded key +use std::hash::Hash; + use crate::{ table::{Decode, Encode}, DatabaseError, @@ -74,3 +76,13 @@ where Ok(ShardedKey::new(key, highest_tx_number)) } } + +impl Hash for ShardedKey +where + T: Hash, +{ + fn hash(&self, state: &mut H) { + self.key.hash(state); + self.highest_block_number.hash(state); + } +} diff --git a/crates/storage/db/src/tables/models/storage_sharded_key.rs b/crates/storage/db/src/tables/models/storage_sharded_key.rs index 984933d1f172..15e6736599fd 100644 --- a/crates/storage/db/src/tables/models/storage_sharded_key.rs +++ b/crates/storage/db/src/tables/models/storage_sharded_key.rs @@ -19,7 +19,9 @@ pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; /// `Address | Storagekey | 200` -> data is from transition 0 to 200. /// /// `Address | StorageKey | 300` -> data is from transition 201 to 300. -#[derive(Debug, Default, Clone, Eq, Ord, PartialOrd, PartialEq, AsRef, Serialize, Deserialize)] +#[derive( + Debug, Default, Clone, Eq, Ord, PartialOrd, PartialEq, AsRef, Serialize, Deserialize, Hash, +)] pub struct StorageShardedKey { /// Storage account address. pub address: H160, From 134fe81efb19bca0ec56fbe8bae3eca10b5e5287 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 31 Jul 2023 15:36:03 +0100 Subject: [PATCH 301/722] feat(pruning): prune ChangeSets & History during pipeline (#3728) --- bin/reth/src/node/mod.rs | 7 +- bin/reth/src/stage/dump/merkle.rs | 25 ++- bin/reth/src/stage/run.rs | 22 ++- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/prune/mod.rs | 2 +- crates/primitives/src/prune/part.rs | 12 +- crates/primitives/src/prune/target.rs | 40 ++-- crates/prune/src/error.rs | 3 + crates/prune/src/pruner.rs | 4 +- crates/stages/benches/criterion.rs | 4 +- crates/stages/src/error.rs | 3 + crates/stages/src/stages/execution.rs | 93 +-------- crates/stages/src/stages/hashing_account.rs | 26 ++- crates/stages/src/stages/hashing_storage.rs | 33 +++- .../src/stages/index_account_history.rs | 30 ++- .../src/stages/index_storage_history.rs | 30 ++- crates/stages/src/stages/merkle.rs | 56 ++++-- crates/stages/src/stages/mod.rs | 185 ++++++++++++++++++ crates/storage/provider/src/post_state/mod.rs | 21 +- 19 files changed, 434 insertions(+), 164 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 98fa3b113064..ab6bcb623e26 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -760,12 +760,17 @@ impl Command { .set(AccountHashingStage::new( stage_config.account_hashing.clean_threshold, stage_config.account_hashing.commit_threshold, + config.prune.map(|prune| prune.parts).unwrap_or_default(), )) .set(StorageHashingStage::new( stage_config.storage_hashing.clean_threshold, stage_config.storage_hashing.commit_threshold, + config.prune.map(|prune| prune.parts).unwrap_or_default(), + )) + .set(MerkleStage::new_execution( + stage_config.merkle.clean_threshold, + config.prune.map(|prune| prune.parts).unwrap_or_default(), )) - .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) .set(TransactionLookupStage::new(stage_config.transaction_lookup.commit_threshold)) .set(IndexAccountHistoryStage::new( stage_config.index_account_history.commit_threshold, diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 69b39234bd10..b54168768787 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -86,14 +86,22 @@ async fn unwind_and_copy( // Bring hashes to TO - AccountHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } - .execute(&provider, execute_input) - .await - .unwrap(); - StorageHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } - .execute(&provider, execute_input) - .await - .unwrap(); + AccountHashingStage { + clean_threshold: u64::MAX, + commit_threshold: u64::MAX, + prune_modes: PruneModes::none(), + } + .execute(&provider, execute_input) + .await + .unwrap(); + StorageHashingStage { + clean_threshold: u64::MAX, + commit_threshold: u64::MAX, + prune_modes: PruneModes::none(), + } + .execute(&provider, execute_input) + .await + .unwrap(); let unwind_inner_tx = provider.into_tx(); @@ -124,6 +132,7 @@ async fn dry_run( clean_threshold: u64::MAX, /* Forces updating the root instead of calculating * from * scratch */ + prune_modes: Default::default(), } .execute( &provider, diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index f53412c3dbbf..9953bfc8a0e6 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -208,12 +208,22 @@ impl Command { ) } StageEnum::TxLookup => (Box::new(TransactionLookupStage::new(batch_size)), None), - StageEnum::AccountHashing => { - (Box::new(AccountHashingStage::new(1, batch_size)), None) - } - StageEnum::StorageHashing => { - (Box::new(StorageHashingStage::new(1, batch_size)), None) - } + StageEnum::AccountHashing => ( + Box::new(AccountHashingStage::new( + 1, + batch_size, + config.prune.map(|prune| prune.parts).unwrap_or_default(), + )), + None, + ), + StageEnum::StorageHashing => ( + Box::new(StorageHashingStage::new( + 1, + batch_size, + config.prune.map(|prune| prune.parts).unwrap_or_default(), + )), + None, + ), StageEnum::Merkle => ( Box::new(MerkleStage::default_execution()), Some(Box::new(MerkleStage::default_unwind())), diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 8e82bdd70730..249073804f1f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -79,7 +79,7 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; -pub use prune::{PruneCheckpoint, PruneMode, PruneModes, PrunePart}; +pub use prune::{PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 4dfc591bfcc0..a3bcb959627e 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -5,5 +5,5 @@ mod target; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; -pub use part::PrunePart; +pub use part::{PrunePart, PrunePartError}; pub use target::PruneModes; diff --git a/crates/primitives/src/prune/part.rs b/crates/primitives/src/prune/part.rs index f47ea03d1b9f..db49870735a7 100644 --- a/crates/primitives/src/prune/part.rs +++ b/crates/primitives/src/prune/part.rs @@ -1,8 +1,10 @@ +use derive_more::Display; use reth_codecs::{main_codec, Compact}; +use thiserror::Error; /// Part of the data that can be pruned. #[main_codec] -#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] pub enum PrunePart { /// Prune part responsible for the `TxSenders` table. SenderRecovery, @@ -16,6 +18,14 @@ pub enum PrunePart { StorageHistory, } +/// PrunePart error type. +#[derive(Debug, Error)] +pub enum PrunePartError { + /// Invalid configuration of a prune part. + #[error("The configuration provided for {0} is invalid.")] + Configuration(PrunePart), +} + #[cfg(test)] impl Default for PrunePart { fn default() -> Self { diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index b314ae9d0382..af6781897c26 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -1,4 +1,7 @@ -use crate::{serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, PruneMode}; +use crate::{ + prune::PrunePartError, serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, + PruneMode, PrunePart, +}; use paste::paste; use serde::{Deserialize, Serialize}; @@ -19,10 +22,16 @@ pub struct PruneModes { )] pub receipts: Option, /// Account History pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" + )] pub account_history: Option, /// Storage History pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" + )] pub storage_history: Option, } @@ -51,12 +60,15 @@ macro_rules! impl_prune_parts { $human_part, " pruning needs to be done, inclusive, according to the provided tip." )] - pub fn [](&self, tip: BlockNumber) -> Option<(BlockNumber, PruneMode)> { - self.$part.as_ref().and_then(|mode| { - self.prune_target_block(mode, tip, $min_blocks).map(|block| { - (block, *mode) - }) - }) + pub fn [](&self, tip: BlockNumber) -> Result, PrunePartError> { + match &self.$part { + Some(mode) => + match self.prune_target_block(mode, tip, $min_blocks) { + Some(block) => Ok(Some((block, *mode))), + None => Err(PrunePartError::Configuration(PrunePart::[<$human_part>])) + } + None => Ok(None) + } } } )+ @@ -107,17 +119,17 @@ impl PruneModes { Some(tip.saturating_sub(*distance)) } PruneMode::Before(n) if tip.saturating_sub(*n) >= min_blocks.unwrap_or_default() => { - Some(*n) + Some(n.saturating_sub(1)) } _ => None, } } impl_prune_parts!( - (sender_recovery, "Sender Recovery", None), - (transaction_lookup, "Transaction Lookup", None), + (sender_recovery, "SenderRecovery", None), + (transaction_lookup, "TransactionLookup", None), (receipts, "Receipts", Some(64)), - (account_history, "Account History", None), - (storage_history, "Storage History", None) + (account_history, "AccountHistory", Some(64)), + (storage_history, "StorageHistory", Some(64)) ); } diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index fdc0af4484a8..1a31a0394224 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -4,6 +4,9 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum PrunerError { + #[error(transparent)] + PrunePart(#[from] reth_primitives::PrunePartError), + #[error("Inconsistent data: {0}")] InconsistentData(&'static str), diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 96f41c9ae213..3dae8bcb04f2 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -72,13 +72,13 @@ impl Pruner { let provider = self.provider_factory.provider_rw()?; if let Some((to_block, prune_mode)) = - self.modes.prune_target_block_receipts(tip_block_number) + self.modes.prune_target_block_receipts(tip_block_number)? { self.prune_receipts(&provider, to_block, prune_mode)?; } if let Some((to_block, prune_mode)) = - self.modes.prune_target_block_transaction_lookup(tip_block_number) + self.modes.prune_target_block_transaction_lookup(tip_block_number)? { self.prune_transaction_lookup(&provider, to_block, prune_mode)?; } diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 8fce2e37035e..d9b079d29847 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -95,7 +95,7 @@ fn merkle(c: &mut Criterion) { // don't need to run each stage for that many times group.sample_size(10); - let stage = MerkleStage::Both { clean_threshold: u64::MAX }; + let stage = MerkleStage::Both { clean_threshold: u64::MAX, prune_modes: Default::default() }; measure_stage( &mut group, setup::unwind_hashes, @@ -104,7 +104,7 @@ fn merkle(c: &mut Criterion) { "Merkle-incremental".to_string(), ); - let stage = MerkleStage::Both { clean_threshold: 0 }; + let stage = MerkleStage::Both { clean_threshold: 0, prune_modes: Default::default() }; measure_stage( &mut group, setup::unwind_hashes, diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 20310111ca9a..b5158f3e155e 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -49,6 +49,9 @@ pub enum StageError { #[source] error: executor::BlockExecutionError, }, + /// Invalid pruning configuration + #[error(transparent)] + PruningConfiguration(#[from] reth_primitives::PrunePartError), /// Invalid checkpoint passed to the stage #[error("Invalid stage checkpoint: {0}")] StageCheckpoint(u64), diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 366e10e7c3c0..fc8e789a1f05 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -60,7 +60,7 @@ pub struct ExecutionStage { /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// Pruning configuration. - prune_targets: PruneModes, + prune_modes: PruneModes, } impl ExecutionStage { @@ -68,9 +68,9 @@ impl ExecutionStage { pub fn new( executor_factory: EF, thresholds: ExecutionStageThresholds, - prune_targets: PruneModes, + prune_modes: PruneModes, ) -> Self { - Self { metrics_tx: None, executor_factory, thresholds, prune_targets } + Self { metrics_tx: None, executor_factory, thresholds, prune_modes } } /// Create an execution stage with the provided executor factory. @@ -110,7 +110,7 @@ impl ExecutionStage { // Execute block range let mut state = PostState::default(); - state.add_prune_targets(self.prune_targets); + state.add_prune_modes(self.prune_modes); for block_number in start_block..=max_block { let td = provider @@ -425,8 +425,7 @@ mod tests { use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db}; use reth_primitives::{ hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode, - ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StorageEntry, H160, H256, MAINNET, - U256, + ChainSpecBuilder, PruneModes, SealedBlock, StorageEntry, H160, H256, MAINNET, U256, }; use reth_provider::{AccountReader, BlockWriter, ProviderFactory, ReceiptProvider}; use reth_revm::Factory; @@ -894,86 +893,4 @@ mod tests { ] ); } - - #[tokio::test] - async fn test_prune() { - let test_tx = TestTransaction::default(); - let factory = Arc::new(ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone())); - - let provider = factory.provider_rw().unwrap(); - let input = ExecInput { - target: Some(1), - /// The progress of this stage the last time it was executed. - checkpoint: None, - }; - let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); - let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); - provider.commit().unwrap(); - - // insert pre state - let provider = factory.provider_rw().unwrap(); - let code = hex!("5a465a905090036002900360015500"); - let code_hash = keccak256(hex!("5a465a905090036002900360015500")); - provider - .tx_ref() - .put::( - H160(hex!("1000000000000000000000000000000000000000")), - Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }, - ) - .unwrap(); - provider - .tx_ref() - .put::( - H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")), - Account { - nonce: 0, - balance: U256::from(0x3635c9adc5dea00000u128), - bytecode_hash: None, - }, - ) - .unwrap(); - provider - .tx_ref() - .put::(code_hash, Bytecode::new_raw(code.to_vec().into())) - .unwrap(); - provider.commit().unwrap(); - - let check_pruning = |factory: Arc>, - prune_targets: PruneModes, - expect_num_receipts: usize| async move { - let provider = factory.provider_rw().unwrap(); - - let mut execution_stage = ExecutionStage::new( - Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), - ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, - prune_targets, - ); - - execution_stage.execute(&provider, input).await.unwrap(); - assert_eq!( - provider.receipts_by_block(1.into()).unwrap().unwrap().len(), - expect_num_receipts - ); - }; - - let mut prune = PruneModes::none(); - - check_pruning(factory.clone(), prune, 1).await; - - prune.receipts = Some(PruneMode::Full); - check_pruning(factory.clone(), prune, 0).await; - - prune.receipts = Some(PruneMode::Before(1)); - check_pruning(factory.clone(), prune, 1).await; - - prune.receipts = Some(PruneMode::Before(2)); - check_pruning(factory.clone(), prune, 0).await; - - prune.receipts = Some(PruneMode::Distance(0)); - check_pruning(factory.clone(), prune, 1).await; - } } diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index ccb6fb960fd0..7eed29bd33db 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -15,6 +15,7 @@ use reth_primitives::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, StageCheckpoint, StageId, }, + PruneModes, }; use reth_provider::{AccountExtReader, DatabaseProviderRW, HashingWriter}; use std::{ @@ -34,18 +35,25 @@ pub struct AccountHashingStage { pub clean_threshold: u64, /// The maximum number of accounts to process before committing. pub commit_threshold: u64, + /// Prune mode configuration. Required to know if we can actually make an incremental + /// update based on how many changesets exist. + pub prune_modes: PruneModes, } impl AccountHashingStage { /// Create new instance of [AccountHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64) -> Self { - Self { clean_threshold, commit_threshold } + pub fn new(clean_threshold: u64, commit_threshold: u64, prune_modes: PruneModes) -> Self { + Self { clean_threshold, commit_threshold, prune_modes } } } impl Default for AccountHashingStage { fn default() -> Self { - Self { clean_threshold: 500_000, commit_threshold: 100_000 } + Self { + clean_threshold: 500_000, + commit_threshold: 100_000, + prune_modes: PruneModes::default(), + } } } @@ -143,12 +151,19 @@ impl Stage for AccountHashingStage { } let (from_block, to_block) = input.next_block_range().into_inner(); + let has_enough_changesets = self + .prune_modes + .prune_target_block_account_history(to_block)? + .map(|(block_number, _)| block_number) + .unwrap_or_default() < + from_block; // if there are more blocks then threshold it is faster to go over Plain state and hash all // account otherwise take changesets aggregate the sets and apply hashing to // AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as // genesis accounts are not in changeset. - if to_block - from_block > self.clean_threshold || from_block == 1 { + if to_block - from_block > self.clean_threshold || from_block == 1 || !has_enough_changesets + { let tx = provider.tx_ref(); let stage_checkpoint = input .checkpoint @@ -448,6 +463,7 @@ mod tests { pub(crate) tx: TestTransaction, commit_threshold: u64, clean_threshold: u64, + prune_modes: PruneModes, } impl AccountHashingTestRunner { @@ -511,6 +527,7 @@ mod tests { tx: TestTransaction::default(), commit_threshold: 1000, clean_threshold: 1000, + prune_modes: PruneModes::default(), } } } @@ -526,6 +543,7 @@ mod tests { Self::S { commit_threshold: self.commit_threshold, clean_threshold: self.clean_threshold, + prune_modes: self.prune_modes, } } } diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 040b6375bd02..c6a65b3d80cb 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -14,7 +14,7 @@ use reth_primitives::{ CheckpointBlockRange, EntitiesCheckpoint, StageCheckpoint, StageId, StorageHashingCheckpoint, }, - StorageEntry, + PruneModes, StorageEntry, }; use reth_provider::{DatabaseProviderRW, HashingWriter, StorageReader}; use std::{collections::BTreeMap, fmt::Debug}; @@ -29,18 +29,25 @@ pub struct StorageHashingStage { pub clean_threshold: u64, /// The maximum number of slots to process before committing. pub commit_threshold: u64, + /// Prune mode configuration. Required to know if we can actually make an incremental + /// update based on how many changesets exist. + pub prune_modes: PruneModes, } impl StorageHashingStage { /// Create new instance of [StorageHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64) -> Self { - Self { clean_threshold, commit_threshold } + pub fn new(clean_threshold: u64, commit_threshold: u64, prune_modes: PruneModes) -> Self { + Self { clean_threshold, commit_threshold, prune_modes } } } impl Default for StorageHashingStage { fn default() -> Self { - Self { clean_threshold: 500_000, commit_threshold: 100_000 } + Self { + clean_threshold: 500_000, + commit_threshold: 100_000, + prune_modes: PruneModes::default(), + } } } @@ -63,12 +70,19 @@ impl Stage for StorageHashingStage { } let (from_block, to_block) = input.next_block_range().into_inner(); + let has_enough_changesets = self + .prune_modes + .prune_target_block_storage_history(to_block)? + .map(|(block_number, _)| block_number) + .unwrap_or_default() < + from_block; // if there are more blocks then threshold it is faster to go over Plain state and hash all // account otherwise take changesets aggregate the sets and apply hashing to // AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as // genesis accounts are not in changeset, along with their storages. - if to_block - from_block > self.clean_threshold || from_block == 1 { + if to_block - from_block > self.clean_threshold || from_block == 1 || !has_enough_changesets + { let stage_checkpoint = input .checkpoint .and_then(|checkpoint| checkpoint.storage_hashing_stage_checkpoint()); @@ -457,11 +471,17 @@ mod tests { tx: TestTransaction, commit_threshold: u64, clean_threshold: u64, + prune_modes: PruneModes, } impl Default for StorageHashingTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000, clean_threshold: 1000 } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + clean_threshold: 1000, + prune_modes: PruneModes::default(), + } } } @@ -476,6 +496,7 @@ mod tests { Self::S { commit_threshold: self.commit_threshold, clean_threshold: self.clean_threshold, + prune_modes: self.prune_modes, } } } diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index fe0b6d3b404c..d259ec3f8152 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -1,6 +1,9 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; -use reth_primitives::stage::{StageCheckpoint, StageId}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + PruneModes, +}; use reth_provider::{AccountExtReader, DatabaseProviderRW, HistoryWriter}; use std::fmt::Debug; @@ -12,18 +15,20 @@ pub struct IndexAccountHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, + /// Pruning configuration. + pub prune_modes: PruneModes, } impl IndexAccountHistoryStage { /// Create new instance of [IndexAccountHistoryStage]. pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + Self { commit_threshold, prune_modes: PruneModes::default() } } } impl Default for IndexAccountHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000 } + Self { commit_threshold: 100_000, prune_modes: PruneModes::default() } } } @@ -38,8 +43,16 @@ impl Stage for IndexAccountHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - input: ExecInput, + mut input: ExecInput, ) -> Result { + if let Some((target_prunable_block, _)) = + self.prune_modes.prune_target_block_account_history(input.target())? + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + } + } + if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -372,11 +385,16 @@ mod tests { struct IndexAccountHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, + prune_modes: PruneModes, } impl Default for IndexAccountHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000 } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + prune_modes: PruneModes::default(), + } } } @@ -388,7 +406,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold } + Self::S { commit_threshold: self.commit_threshold, prune_modes: self.prune_modes } } } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index a17c5f14e7c9..4759cd82c594 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -1,6 +1,9 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::{database::Database, models::BlockNumberAddress}; -use reth_primitives::stage::{StageCheckpoint, StageId}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + PruneModes, +}; use reth_provider::{DatabaseProviderRW, HistoryWriter, StorageReader}; use std::fmt::Debug; @@ -12,18 +15,20 @@ pub struct IndexStorageHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, + /// Pruning configuration. + pub prune_modes: PruneModes, } impl IndexStorageHistoryStage { /// Create new instance of [IndexStorageHistoryStage]. pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + Self { commit_threshold, prune_modes: PruneModes::default() } } } impl Default for IndexStorageHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000 } + Self { commit_threshold: 100_000, prune_modes: PruneModes::default() } } } @@ -38,8 +43,16 @@ impl Stage for IndexStorageHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - input: ExecInput, + mut input: ExecInput, ) -> Result { + if let Some((target_prunable_block, _)) = + self.prune_modes.prune_target_block_storage_history(input.target())? + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + } + } + if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -386,11 +399,16 @@ mod tests { struct IndexStorageHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, + prune_modes: PruneModes, } impl Default for IndexStorageHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000 } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + prune_modes: PruneModes::default(), + } } } @@ -402,7 +420,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold } + Self::S { commit_threshold: self.commit_threshold, prune_modes: self.prune_modes } } } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 1a95341d85e7..ffcc427b58ce 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -10,7 +10,7 @@ use reth_primitives::{ hex, stage::{EntitiesCheckpoint, MerkleCheckpoint, StageCheckpoint, StageId}, trie::StoredSubNode, - BlockNumber, SealedHeader, H256, + BlockNumber, PruneModes, SealedHeader, H256, }; use reth_provider::{ DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, @@ -47,6 +47,9 @@ pub enum MerkleStage { /// The threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. clean_threshold: u64, + /// Prune mode configuration. Required to know if we can actually make an incremental root + /// update based on how many changesets exist. + prune_modes: PruneModes, }, /// The unwind portion of the merkle stage. Unwind, @@ -54,13 +57,13 @@ pub enum MerkleStage { /// Able to execute and unwind. Used for tests #[cfg(any(test, feature = "test-utils"))] #[allow(missing_docs)] - Both { clean_threshold: u64 }, + Both { clean_threshold: u64, prune_modes: PruneModes }, } impl MerkleStage { /// Stage default for the [MerkleStage::Execution]. pub fn default_execution() -> Self { - Self::Execution { clean_threshold: 50_000 } + Self::Execution { clean_threshold: 50_000, prune_modes: PruneModes::default() } } /// Stage default for the [MerkleStage::Unwind]. @@ -69,8 +72,8 @@ impl MerkleStage { } /// Create new instance of [MerkleStage::Execution]. - pub fn new_execution(clean_threshold: u64) -> Self { - Self::Execution { clean_threshold } + pub fn new_execution(clean_threshold: u64, prune_modes: PruneModes) -> Self { + Self::Execution { clean_threshold, prune_modes } } /// Check that the computed state root matches the root in the expected header. @@ -128,6 +131,26 @@ impl MerkleStage { } Ok(provider.save_stage_checkpoint_progress(StageId::MerkleExecute, buf)?) } + + /// When pruning is enabled for account and storage history, we might not have all changesets + /// required for an incremental state root update on a pipeline re-run. + pub fn has_enough_changesets( + &self, + prune_modes: PruneModes, + from_block: BlockNumber, + to_block: BlockNumber, + ) -> Result { + Ok(prune_modes + .prune_target_block_account_history(to_block)? + .map(|(block_number, _)| block_number) + .unwrap_or_default() < + from_block && + prune_modes + .prune_target_block_storage_history(to_block)? + .map(|(block_number, _)| block_number) + .unwrap_or_default() < + from_block) + } } #[async_trait::async_trait] @@ -148,14 +171,16 @@ impl Stage for MerkleStage { provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { - let threshold = match self { + let (threshold, prune_modes) = match self { MerkleStage::Unwind => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))) } - MerkleStage::Execution { clean_threshold } => *clean_threshold, + MerkleStage::Execution { clean_threshold, prune_modes } => { + (*clean_threshold, *prune_modes) + } #[cfg(any(test, feature = "test-utils"))] - MerkleStage::Both { clean_threshold } => *clean_threshold, + MerkleStage::Both { clean_threshold, prune_modes } => (*clean_threshold, *prune_modes), }; let range = input.next_block_range(); @@ -168,10 +193,12 @@ impl Stage for MerkleStage { let target_block_root = target_block.state_root; let mut checkpoint = self.get_execution_checkpoint(provider)?; - let (trie_root, entities_checkpoint) = if range.is_empty() { (target_block_root, input.checkpoint().entities_stage_checkpoint().unwrap_or_default()) - } else if to_block - from_block > threshold || from_block == 1 { + } else if to_block - from_block > threshold || + from_block == 1 || + !self.has_enough_changesets(prune_modes, from_block, to_block)? + { // if there are more blocks than threshold it is faster to rebuild the trie let mut entities_checkpoint = if let Some(checkpoint) = checkpoint.as_ref().filter(|c| c.target_block == to_block) @@ -445,11 +472,16 @@ mod tests { struct MerkleTestRunner { tx: TestTransaction, clean_threshold: u64, + prune_modes: PruneModes, } impl Default for MerkleTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), clean_threshold: 10000 } + Self { + tx: TestTransaction::default(), + clean_threshold: 10000, + prune_modes: PruneModes::default(), + } } } @@ -461,7 +493,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S::Both { clean_threshold: self.clean_threshold } + Self::S::Both { clean_threshold: self.clean_threshold, prune_modes: self.prune_modes } } } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 96f1ec5fecb1..3cf295abeb42 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -35,3 +35,188 @@ pub use merkle::*; pub use sender_recovery::*; pub use total_difficulty::*; pub use tx_lookup::*; + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + stage::Stage, + stages::{ExecutionStage, IndexAccountHistoryStage, IndexStorageHistoryStage}, + test_utils::TestTransaction, + ExecInput, + }; + use reth_db::{ + cursor::DbCursorRO, + mdbx::{cursor::Cursor, RW}, + tables, + transaction::{DbTx, DbTxMut}, + AccountHistory, DatabaseEnv, + }; + use reth_interfaces::test_utils::generators::{self, random_block}; + use reth_primitives::{ + hex_literal::hex, keccak256, Account, Bytecode, ChainSpecBuilder, PruneMode, PruneModes, + SealedBlock, H160, MAINNET, U256, + }; + use reth_provider::{ + AccountExtReader, BlockWriter, DatabaseProviderRW, ProviderFactory, ReceiptProvider, + StorageReader, + }; + use reth_revm::Factory; + use reth_rlp::Decodable; + use std::sync::Arc; + + #[tokio::test] + async fn test_prune() { + let test_tx = TestTransaction::default(); + let factory = Arc::new(ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone())); + + let provider = factory.provider_rw().unwrap(); + let tip = 66; + let input = ExecInput { + target: Some(tip), + /// The progress of this stage the last time it was executed. + checkpoint: None, + }; + let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); + let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); + let block = SealedBlock::decode(&mut block_rlp).unwrap(); + provider.insert_block(genesis, None).unwrap(); + provider.insert_block(block.clone(), None).unwrap(); + + // Fill with bogus blocks to respect PruneMode distance. + let mut head = block.hash; + let mut rng = generators::rng(); + for block_number in 2..=tip { + let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0)); + head = nblock.hash; + provider.insert_block(nblock, None).unwrap(); + } + provider.commit().unwrap(); + + // insert pre state + let provider = factory.provider_rw().unwrap(); + let code = hex!("5a465a905090036002900360015500"); + let code_hash = keccak256(hex!("5a465a905090036002900360015500")); + provider + .tx_ref() + .put::( + H160(hex!("1000000000000000000000000000000000000000")), + Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }, + ) + .unwrap(); + provider + .tx_ref() + .put::( + H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")), + Account { + nonce: 0, + balance: U256::from(0x3635c9adc5dea00000u128), + bytecode_hash: None, + }, + ) + .unwrap(); + provider + .tx_ref() + .put::(code_hash, Bytecode::new_raw(code.to_vec().into())) + .unwrap(); + provider.commit().unwrap(); + + let check_pruning = |factory: Arc>, + prune_modes: PruneModes, + expect_num_receipts: usize, + expect_num_acc_changesets: usize, + expect_num_storage_changesets: usize| async move { + let provider: DatabaseProviderRW<'_, &DatabaseEnv> = factory.provider_rw().unwrap(); + + // Check execution and create receipts and changesets according to the pruning + // configuration + let mut execution_stage = ExecutionStage::new( + Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), + ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + prune_modes, + ); + + execution_stage.execute(&provider, input).await.unwrap(); + assert_eq!( + provider.receipts_by_block(1.into()).unwrap().unwrap().len(), + expect_num_receipts + ); + + assert_eq!( + provider.changed_storages_and_blocks_with_range(0..=1000).unwrap().len(), + expect_num_storage_changesets + ); + + assert_eq!( + provider.changed_accounts_and_blocks_with_range(0..=1000).unwrap().len(), + expect_num_acc_changesets + ); + + // Check AccountHistory + let mut acc_indexing_stage = + IndexAccountHistoryStage { prune_modes, ..Default::default() }; + + if let Some(PruneMode::Full) = prune_modes.account_history { + // Full is not supported + assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + } else { + acc_indexing_stage.execute(&provider, input).await.unwrap(); + let mut account_history: Cursor<'_, RW, AccountHistory> = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); + } + + // Check StorageHistory + let mut storage_indexing_stage = + IndexStorageHistoryStage { prune_modes, ..Default::default() }; + + if let Some(PruneMode::Full) = prune_modes.storage_history { + // Full is not supported + assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + } else { + storage_indexing_stage.execute(&provider, input).await.unwrap(); + + let mut storage_history = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!( + storage_history.walk(None).unwrap().count(), + expect_num_storage_changesets + ); + } + }; + + // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed + // storage. + let mut prune = PruneModes::none(); + check_pruning(factory.clone(), prune, 1, 3, 1).await; + + prune.receipts = Some(PruneMode::Full); + prune.account_history = Some(PruneMode::Full); + prune.storage_history = Some(PruneMode::Full); + // This will result in error for account_history and storage_history, which is caught. + check_pruning(factory.clone(), prune, 0, 0, 0).await; + + prune.receipts = Some(PruneMode::Before(1)); + prune.account_history = Some(PruneMode::Before(1)); + prune.storage_history = Some(PruneMode::Before(1)); + check_pruning(factory.clone(), prune, 1, 3, 1).await; + + prune.receipts = Some(PruneMode::Before(2)); + prune.account_history = Some(PruneMode::Before(2)); + prune.storage_history = Some(PruneMode::Before(2)); + // The one account is the miner + check_pruning(factory.clone(), prune, 0, 1, 0).await; + + prune.receipts = Some(PruneMode::Distance(66)); + prune.account_history = Some(PruneMode::Distance(66)); + prune.storage_history = Some(PruneMode::Distance(66)); + check_pruning(factory.clone(), prune, 1, 3, 1).await; + + prune.receipts = Some(PruneMode::Distance(64)); + prune.account_history = Some(PruneMode::Distance(64)); + prune.storage_history = Some(PruneMode::Distance(64)); + // The one account is the miner + check_pruning(factory.clone(), prune, 0, 1, 0).await; + } +} diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 846012bffad5..300b7e27d6e4 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -79,7 +79,7 @@ pub struct PostState { /// The receipt(s) of the executed transaction(s). receipts: BTreeMap>, /// Pruning configuration. - prune_targets: PruneModes, + prune_modes: PruneModes, } impl PostState { @@ -94,8 +94,8 @@ impl PostState { } /// Add a pruning configuration. - pub fn add_prune_targets(&mut self, prune_targets: PruneModes) { - self.prune_targets = prune_targets; + pub fn add_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; } /// Return the current size of the poststate. @@ -518,6 +518,7 @@ impl PostState { pub fn write_history_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( &mut self, tx: &TX, + tip: BlockNumber, ) -> Result<(), DbError> { // Write storage changes tracing::trace!(target: "provider::post_state", "Writing storage changes"); @@ -560,6 +561,10 @@ impl PostState { } } + if self.prune_modes.should_prune_storage_history(block_number, tip) { + continue + } + for (slot, old_value) in storage.storage { tracing::trace!(target: "provider::post_state", ?storage_id, ?slot, ?old_value, "Storage changed"); storage_changeset_cursor.append_dup( @@ -576,6 +581,10 @@ impl PostState { for (block_number, account_changes) in std::mem::take(&mut self.account_changes).inner.into_iter() { + if self.prune_modes.should_prune_account_history(block_number, tip) { + continue + } + for (address, info) in account_changes.into_iter() { tracing::trace!(target: "provider::post_state", block_number, ?address, old = ?info, "Account changed"); account_changeset_cursor @@ -592,7 +601,7 @@ impl PostState { tx: &TX, tip: BlockNumber, ) -> Result<(), DbError> { - self.write_history_to_db(tx)?; + self.write_history_to_db(tx, tip)?; // Write new storage state tracing::trace!(target: "provider::post_state", len = self.storage.len(), "Writing new storage state"); @@ -644,12 +653,12 @@ impl PostState { // Write the receipts of the transactions if not pruned tracing::trace!(target: "provider::post_state", len = self.receipts.len(), "Writing receipts"); - if !self.receipts.is_empty() && self.prune_targets.receipts != Some(PruneMode::Full) { + if !self.receipts.is_empty() && self.prune_modes.receipts != Some(PruneMode::Full) { let mut bodies_cursor = tx.cursor_read::()?; let mut receipts_cursor = tx.cursor_write::()?; for (block, receipts) in self.receipts { - if self.prune_targets.should_prune_receipts(block, tip) { + if self.prune_modes.should_prune_receipts(block, tip) { continue } From c0544ed7e1fe804556da6aa53fc47ba9e125b171 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 31 Jul 2023 16:36:13 +0200 Subject: [PATCH 302/722] feat(p2p): Add `GetReceipts` eth handler implementation (#3959) --- crates/net/network/src/eth_requests.rs | 54 ++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 7cb4db72b37b..10b53f61701b 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -8,7 +8,7 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::RequestResult; use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId}; -use reth_provider::{BlockReader, HeaderProvider}; +use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ borrow::Borrow, future::Future, @@ -21,6 +21,11 @@ use tokio_stream::wrappers::ReceiverStream; // Limits: +/// Maximum number of receipts to serve. +/// +/// Used to limit lookups. +const MAX_RECEIPTS_SERVE: usize = 1024; + /// Maximum number of block headers to serve. /// /// Used to limit lookups. @@ -32,6 +37,9 @@ const MAX_HEADERS_SERVE: usize = 1024; /// SOFT_RESPONSE_LIMIT. const MAX_BODIES_SERVE: usize = 1024; +/// Estimated size in bytes of an RLP encoded receipt. +const APPROX_RECEIPT_SIZE: usize = 24 * 1024; + /// Estimated size in bytes of an RLP encoded body. // TODO: check 24kb blocksize assumption const APPROX_BODY_SIZE: usize = 24 * 1024; @@ -70,7 +78,7 @@ impl EthRequestHandler { impl EthRequestHandler where - C: BlockReader + HeaderProvider, + C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ @@ -185,6 +193,44 @@ where let _ = response.send(Ok(BlockBodies(bodies))); } + + fn on_receipts_request( + &mut self, + _peer_id: PeerId, + request: GetReceipts, + response: oneshot::Sender>, + ) { + let mut receipts = Vec::new(); + + let mut total_bytes = APPROX_RECEIPT_SIZE; + + for hash in request.0 { + if let Some(receipts_by_block) = + self.client.receipts_by_block(BlockHashOrNumber::Hash(hash)).unwrap_or_default() + { + receipts.push( + receipts_by_block + .into_iter() + .map(|receipt| receipt.with_bloom()) + .collect::>(), + ); + + total_bytes += APPROX_RECEIPT_SIZE; + + if total_bytes > SOFT_RESPONSE_LIMIT { + break + } + + if receipts.len() >= MAX_RECEIPTS_SERVE { + break + } + } else { + break + } + } + + let _ = response.send(Ok(Receipts(receipts))); + } } /// An endless future. @@ -211,7 +257,9 @@ where this.on_bodies_request(peer_id, request, response) } IncomingEthRequest::GetNodeData { .. } => {} - IncomingEthRequest::GetReceipts { .. } => {} + IncomingEthRequest::GetReceipts { peer_id, request, response } => { + this.on_receipts_request(peer_id, request, response) + } }, } } From a1e68151d3f16bcf573266228a3f466e8f457202 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 16:55:11 +0200 Subject: [PATCH 303/722] feat: Add RethCliExt (#3983) --- bin/reth/src/args/rpc_server_args.rs | 53 +++++++++------- bin/reth/src/cli/ext.rs | 94 ++++++++++++++++++++++++++++ bin/reth/src/{cli.rs => cli/mod.rs} | 22 ++++--- bin/reth/src/node/mod.rs | 78 +++++++++++------------ crates/rpc/rpc-builder/src/lib.rs | 73 +++++++++++++++++++-- 5 files changed, 242 insertions(+), 78 deletions(-) create mode 100644 bin/reth/src/cli/ext.rs rename bin/reth/src/{cli.rs => cli/mod.rs} (93%) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 35d9e7945936..13e9f2016732 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -1,6 +1,9 @@ //! clap [Args](clap::Args) for RPC related arguments. -use crate::args::GasPriceOracleArgs; +use crate::{ + args::GasPriceOracleArgs, + cli::ext::{NoopArgsExt, RethRpcConfig, RethRpcServerArgsExt}, +}; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, @@ -52,9 +55,9 @@ pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; /// Parameters for configuring the rpc more granularity via CLI -#[derive(Debug, Args, PartialEq, Eq)] +#[derive(Debug, Args)] #[command(next_help_heading = "RPC")] -pub struct RpcServerArgs { +pub struct RpcServerArgs { /// Enable the HTTP-RPC server #[arg(long, default_value_if("dev", "true", "true"))] pub http: bool, @@ -160,9 +163,13 @@ pub struct RpcServerArgs { /// Maximum number of env cache entries. #[arg(long, default_value_t = DEFAULT_ENV_CACHE_MAX_LEN)] pub env_cache_len: u32, + + /// Additional arguments for rpc. + #[clap(flatten)] + pub ext: Ext, } -impl RpcServerArgs { +impl RpcServerArgs { /// Returns the max request size in bytes. pub fn rpc_max_request_size_bytes(&self) -> u32 { self.rpc_max_request_size * 1024 * 1024 @@ -183,21 +190,6 @@ impl RpcServerArgs { ) } - /// Extracts the [EthConfig] from the args. - pub fn eth_config(&self) -> EthConfig { - EthConfig::default() - .max_tracing_requests(self.rpc_max_tracing_requests) - .rpc_gas_cap(self.rpc_gas_cap) - .gpo_config(self.gas_price_oracle_config()) - } - - /// Convenience function that returns whether ipc is enabled - /// - /// By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. - fn is_ipc_enabled(&self) -> bool { - !self.ipcdisable - } - /// The execution layer and consensus layer clients SHOULD accept a configuration parameter: /// jwt-secret, which designates a file containing the hex-encoded 256 bit secret key to be used /// for verifying/generating JWT tokens. @@ -244,7 +236,7 @@ impl RpcServerArgs { events: Events, engine_api: Engine, jwt_secret: JwtSecret, - ) -> Result<(RpcServerHandle, AuthServerHandle), RpcError> + ) -> eyre::Result<(RpcServerHandle, AuthServerHandle)> where Provider: BlockReaderIdExt + HeaderProvider @@ -266,7 +258,7 @@ impl RpcServerArgs { let module_config = self.transport_rpc_module_config(); debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); - let (rpc_modules, auth_module) = RpcModuleBuilder::default() + let (mut rpc_modules, auth_module, mut registry) = RpcModuleBuilder::default() .with_provider(provider) .with_pool(pool) .with_network(network) @@ -274,6 +266,9 @@ impl RpcServerArgs { .with_executor(executor) .build_with_auth_server(module_config, engine_api); + // apply configured customization + self.ext.extend_rpc_modules(self, &mut registry, &mut rpc_modules)?; + let server_config = self.rpc_server_config(); let launch_rpc = rpc_modules.start_server(server_config).map_ok(|handle| { if let Some(url) = handle.ipc_endpoint() { @@ -295,7 +290,7 @@ impl RpcServerArgs { }); // launch servers concurrently - futures::future::try_join(launch_rpc, launch_auth).await + Ok(futures::future::try_join(launch_rpc, launch_auth).await?) } /// Convenience function for starting a rpc server with configs which extracted from cli args. @@ -454,6 +449,20 @@ impl RpcServerArgs { } } +impl RethRpcConfig for RpcServerArgs { + fn is_ipc_enabled(&self) -> bool { + // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. + !self.ipcdisable + } + + fn eth_config(&self) -> EthConfig { + EthConfig::default() + .max_tracing_requests(self.rpc_max_tracing_requests) + .rpc_gas_cap(self.rpc_gas_cap) + .gpo_config(self.gas_price_oracle_config()) + } +} + /// clap value parser for [RpcModuleSelection]. #[derive(Clone, Debug, Default)] #[non_exhaustive] diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs new file mode 100644 index 000000000000..11db2ca347d7 --- /dev/null +++ b/bin/reth/src/cli/ext.rs @@ -0,0 +1,94 @@ +//! Support for integrating customizations into the CLI. + +use clap::Args; +use reth_network_api::{NetworkInfo, Peers}; +use reth_provider::{ + BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + StateProviderFactory, +}; +use reth_rpc_builder::{EthConfig, RethModuleRegistry, TransportRpcModules}; +use reth_tasks::TaskSpawner; +use reth_transaction_pool::TransactionPool; +use std::fmt; + +/// A trait that allows for extending parts of the CLI with additional functionality. +pub trait RethCliExt { + /// Extends the rpc arguments for the node + type RpcExt: RethRpcServerArgsExt; +} + +impl RethCliExt for () { + type RpcExt = NoopArgsExt; +} + +/// An [Args] extension that does nothing. +#[derive(Debug, Clone, Copy, Default, Args)] +pub struct NoopArgsExt; + +/// A trait that provides configured RPC server. +/// +/// This provides all basic config values for the RPC server and is implemented by the +/// [RpcServerArgs](crate::args::RpcServerArgs) type. +pub trait RethRpcConfig { + /// Returns whether ipc is enabled. + fn is_ipc_enabled(&self) -> bool; + + /// The configured ethereum RPC settings. + fn eth_config(&self) -> EthConfig; + + // TODO extract more functions from RpcServerArgs +} + +/// A trait that allows further customization of the RPC server via CLI. +pub trait RethRpcServerArgsExt: fmt::Debug + clap::Args { + /// Allows for registering additional RPC modules for the transports. + /// + /// This is expected to call the merge functions of [TransportRpcModules], for example + /// [TransportRpcModules::merge_configured] + fn extend_rpc_modules( + &self, + config: &Conf, + registry: &mut RethModuleRegistry, + modules: &mut TransportRpcModules<()>, + ) -> eyre::Result<()> + where + Conf: RethRpcConfig, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + ChangeSetReader + + Clone + + Unpin + + 'static, + Pool: TransactionPool + Clone + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static; +} + +impl RethRpcServerArgsExt for NoopArgsExt { + fn extend_rpc_modules( + &self, + _config: &Conf, + _registry: &mut RethModuleRegistry, + _modules: &mut TransportRpcModules<()>, + ) -> eyre::Result<()> + where + Conf: RethRpcConfig, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + ChangeSetReader + + Clone + + Unpin + + 'static, + Pool: TransactionPool + Clone + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, + { + Ok(()) + } +} diff --git a/bin/reth/src/cli.rs b/bin/reth/src/cli/mod.rs similarity index 93% rename from bin/reth/src/cli.rs rename to bin/reth/src/cli/mod.rs index 9423235ebbe7..79a1f7b92417 100644 --- a/bin/reth/src/cli.rs +++ b/bin/reth/src/cli/mod.rs @@ -1,7 +1,9 @@ //! CLI definition and entrypoint to executable use crate::{ args::utils::genesis_value_parser, - chain, config, db, debug_cmd, + chain, + cli::ext::RethCliExt, + config, db, debug_cmd, dirs::{LogsDir, PlatformPath}, node, p2p, runner::CliRunner, @@ -17,15 +19,17 @@ use reth_tracing::{ }; use std::sync::Arc; +pub mod ext; + /// The main reth cli interface. /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] #[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)] -pub struct Cli { +pub struct Cli { /// The command to run #[clap(subcommand)] - command: Commands, + command: Commands, /// The chain this node is running. /// @@ -99,10 +103,10 @@ pub fn run() -> eyre::Result<()> { /// Commands to be executed #[derive(Debug, Subcommand)] -pub enum Commands { +pub enum Commands { /// Start the node #[command(name = "node")] - Node(node::Command), + Node(node::Command), /// Initialize the database from a genesis file. #[command(name = "init")] Init(chain::InitCommand), @@ -225,9 +229,9 @@ mod tests { /// runtime #[test] fn test_parse_help_all_subcommands() { - let reth = Cli::command(); + let reth = Cli::<()>::command(); for sub_command in reth.get_subcommands() { - let err = Cli::try_parse_from(["reth", sub_command.get_name(), "--help"]) + let err = Cli::<()>::try_parse_from(["reth", sub_command.get_name(), "--help"]) .err() .unwrap_or_else(|| { panic!("Failed to parse help message {}", sub_command.get_name()) @@ -243,13 +247,13 @@ mod tests { /// name #[test] fn parse_logs_path() { - let mut reth = Cli::try_parse_from(["reth", "node", "--log.persistent"]).unwrap(); + let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--log.persistent"]).unwrap(); reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); let log_dir = reth.logs.log_directory; assert!(log_dir.as_ref().ends_with("reth/logs/mainnet"), "{:?}", log_dir); let mut reth = - Cli::try_parse_from(["reth", "node", "--chain", "sepolia", "--log.persistent"]) + Cli::<()>::try_parse_from(["reth", "node", "--chain", "sepolia", "--log.persistent"]) .unwrap(); reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); let log_dir = reth.logs.log_directory; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index ab6bcb623e26..8dc76f0fe234 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -2,9 +2,16 @@ //! //! Starts the client use crate::{ - args::{get_secret_key, DebugArgs, DevArgs, NetworkArgs, RpcServerArgs, TxPoolArgs}, - dirs::DataDirPath, + args::{ + get_secret_key, + utils::{genesis_value_parser, parse_socket_address}, + DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, RpcServerArgs, + TxPoolArgs, + }, + cli::ext::RethCliExt, + dirs::{DataDirPath, MaybePlatformPath}, init::init_genesis, + node::cl_events::ConsensusLayerHealthEvents, prometheus_exporter, runner::CliContext, utils::get_single_header, @@ -32,26 +39,30 @@ use reth_interfaces::{ p2p::{ bodies::{client::BodiesClient, downloader::BodyDownloader}, either::EitherDownloader, - headers::downloader::HeaderDownloader, + headers::{client::HeadersClient, downloader::HeaderDownloader}, }, }; use reth_network::{error::NetworkError, NetworkConfig, NetworkHandle, NetworkManager}; use reth_network_api::NetworkInfo; +use reth_payload_builder::PayloadBuilderService; use reth_primitives::{ - stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, Head, SealedHeader, H256, + stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, + SealedHeader, H256, }; use reth_provider::{ - BlockHashReader, BlockReader, CanonStateSubscriptions, HeaderProvider, ProviderFactory, - StageCheckpointReader, + providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions, + HeaderProvider, ProviderFactory, StageCheckpointReader, }; +use reth_prune::BatchSizes; use reth_revm::Factory; use reth_revm_inspectors::stack::Hook; use reth_rpc_engine_api::EngineApi; use reth_stages::{ prelude::*, stages::{ - ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, - TotalDifficultyStage, + AccountHashingStage, ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, + IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, + StorageHashingStage, TotalDifficultyStage, TransactionLookupStage, }, MetricEventsSender, MetricsListener, }; @@ -66,30 +77,12 @@ use std::{ use tokio::sync::{mpsc::unbounded_channel, oneshot, watch}; use tracing::*; -use crate::{ - args::{ - utils::{genesis_value_parser, parse_socket_address}, - DatabaseArgs, PayloadBuilderArgs, - }, - dirs::MaybePlatformPath, - node::cl_events::ConsensusLayerHealthEvents, -}; -use reth_interfaces::p2p::headers::client::HeadersClient; -use reth_payload_builder::PayloadBuilderService; -use reth_primitives::DisplayHardforks; -use reth_provider::providers::BlockchainProvider; -use reth_prune::BatchSizes; -use reth_stages::stages::{ - AccountHashingStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, - StorageHashingStage, TransactionLookupStage, -}; - pub mod cl_events; pub mod events; /// Start the node #[derive(Debug, Parser)] -pub struct Command { +pub struct Command { /// The path to the data dir for all reth files and subdirectories. /// /// Defaults to the OS-specific data directory: @@ -134,7 +127,7 @@ pub struct Command { network: NetworkArgs, #[clap(flatten)] - rpc: RpcServerArgs, + rpc: RpcServerArgs, #[clap(flatten)] txpool: TxPoolArgs, @@ -820,60 +813,61 @@ async fn run_network_until_shutdown( #[cfg(test)] mod tests { - use reth_primitives::DEV; - use super::*; + use reth_primitives::DEV; use std::{net::IpAddr, path::Path}; #[test] fn parse_help_node_command() { - let err = Command::try_parse_from(["reth", "--help"]).unwrap_err(); + let err = Command::<()>::try_parse_from(["reth", "--help"]).unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); } #[test] fn parse_common_node_command_chain_args() { for chain in ["mainnet", "sepolia", "goerli"] { - let args: Command = Command::parse_from(["reth", "--chain", chain]); + let args: Command = Command::<()>::parse_from(["reth", "--chain", chain]); assert_eq!(args.chain.chain, chain.parse().unwrap()); } } #[test] fn parse_discovery_port() { - let cmd = Command::try_parse_from(["reth", "--discovery.port", "300"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth", "--discovery.port", "300"]).unwrap(); assert_eq!(cmd.network.discovery.port, Some(300)); } #[test] fn parse_port() { let cmd = - Command::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"]).unwrap(); + Command::<()>::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"]) + .unwrap(); assert_eq!(cmd.network.discovery.port, Some(300)); assert_eq!(cmd.network.port, Some(99)); } #[test] fn parse_metrics_port() { - let cmd = Command::try_parse_from(["reth", "--metrics", "9001"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth", "--metrics", "9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = Command::try_parse_from(["reth", "--metrics", ":9001"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth", "--metrics", ":9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = Command::try_parse_from(["reth", "--metrics", "localhost:9001"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth", "--metrics", "localhost:9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); } #[test] fn parse_config_path() { - let cmd = Command::try_parse_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); + let cmd = + Command::<()>::try_parse_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let config_path = cmd.config.unwrap_or(data_dir.config_path()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); - let cmd = Command::try_parse_from(["reth"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); @@ -883,12 +877,12 @@ mod tests { #[test] fn parse_db_path() { - let cmd = Command::try_parse_from(["reth"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let db_path = data_dir.db_path(); assert!(db_path.ends_with("reth/mainnet/db"), "{:?}", cmd.config); - let cmd = Command::try_parse_from(["reth", "--datadir", "my/custom/path"]).unwrap(); + let cmd = Command::<()>::try_parse_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let db_path = data_dir.db_path(); assert_eq!(db_path, Path::new("my/custom/path/db")); @@ -896,7 +890,7 @@ mod tests { #[test] fn parse_dev() { - let cmd = Command::parse_from(["reth", "--dev"]); + let cmd = Command::<()>::parse_from(["reth", "--dev"]); let chain = DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 2ec4ea0a33c4..c66636babb00 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -89,7 +89,7 @@ //! let builder = RpcModuleBuilder::new(provider, pool, network, TokioTaskExecutor::default(), events); //! //! // configure the server modules -//! let (modules, auth_module) = builder.build_with_auth_server(transports, engine_api); +//! let (modules, auth_module, _registry) = builder.build_with_auth_server(transports, engine_api); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); @@ -343,7 +343,11 @@ where self, module_config: TransportRpcModuleConfig, engine: EngineApi, - ) -> (TransportRpcModules<()>, AuthRpcModule) + ) -> ( + TransportRpcModules<()>, + AuthRpcModule, + RethModuleRegistry, + ) where EngineApi: EngineApiServer, { @@ -369,7 +373,7 @@ where let auth_module = registry.create_auth_module(engine); - (modules, auth_module) + (modules, auth_module, registry) } /// Configures all [RpcModule]s specific to the given [TransportRpcModuleConfig] which can be @@ -1026,12 +1030,12 @@ where } /// Returns the configured [EthHandlers] or creates it if it does not exist yet - fn eth_handlers(&mut self) -> EthHandlers { + pub fn eth_handlers(&mut self) -> EthHandlers { self.with_eth(|handlers| handlers.clone()) } /// Returns the configured [EthApi] or creates it if it does not exist yet - fn eth_api(&mut self) -> EthApi { + pub fn eth_api(&mut self) -> EthApi { self.with_eth(|handlers| handlers.api.clone()) } } @@ -1456,6 +1460,65 @@ impl TransportRpcModules<()> { &self.config } + /// Merge the given Methods in the configured http methods. + /// + /// Fails if any of the methods in other is present already. + /// + /// Returns Ok(false) if no http transport is configured. + pub fn merge_http( + &mut self, + other: impl Into, + ) -> Result { + if let Some(ref mut http) = self.http { + return http.merge(other.into()).map(|_| true) + } + Ok(false) + } + + /// Merge the given Methods in the configured ws methods. + /// + /// Fails if any of the methods in other is present already. + /// + /// Returns Ok(false) if no http transport is configured. + pub fn merge_ws( + &mut self, + other: impl Into, + ) -> Result { + if let Some(ref mut ws) = self.ws { + return ws.merge(other.into()).map(|_| true) + } + Ok(false) + } + + /// Merge the given Methods in the configured ipc methods. + /// + /// Fails if any of the methods in other is present already. + /// + /// Returns Ok(false) if no ipc transport is configured. + pub fn merge_ipc( + &mut self, + other: impl Into, + ) -> Result { + if let Some(ref mut http) = self.http { + return http.merge(other.into()).map(|_| true) + } + Ok(false) + } + + /// Merge the given Methods in all configured methods. + /// + /// Fails if any of the methods in other is present already. + pub fn merge_configured( + &mut self, + other: impl Into, + ) -> Result<(), jsonrpsee::core::error::Error> { + let other = other.into(); + self.merge_http(other.clone())?; + self.merge_ws(other.clone())?; + self.merge_ipc(other.clone())?; + Ok(()) + } + /// Convenience function for starting a server pub async fn start_server(self, builder: RpcServerConfig) -> Result { builder.start(self).await From dee14c7b4cc27fdc78819820b831d981f77c222a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 31 Jul 2023 17:04:35 +0200 Subject: [PATCH 304/722] fix: serde rename revertReason (#4002) --- crates/rpc/rpc-types/src/eth/trace/geth/call.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs index 99085208f144..d131f0f6a328 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs @@ -16,7 +16,7 @@ pub struct CallFrame { pub output: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(default, rename = "revertReason", skip_serializing_if = "Option::is_none")] pub revert_reason: Option, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub calls: Vec, From 9a7911b49e0b4bbb03d01db5fce645124b01ab89 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 31 Jul 2023 17:00:45 +0100 Subject: [PATCH 305/722] feat(bin): `node --full` flag (#3965) --- bin/reth/src/args/mod.rs | 4 ++++ bin/reth/src/args/pruning_args.rs | 37 +++++++++++++++++++++++++++++ bin/reth/src/init.rs | 1 + bin/reth/src/node/mod.rs | 20 ++++++++++++---- crates/primitives/src/chain/spec.rs | 13 ++++++++++ crates/primitives/src/hardfork.rs | 2 ++ crates/rpc/rpc-builder/src/lib.rs | 2 +- 7 files changed, 73 insertions(+), 6 deletions(-) create mode 100644 bin/reth/src/args/pruning_args.rs diff --git a/bin/reth/src/args/mod.rs b/bin/reth/src/args/mod.rs index dd4dd83d0b69..0710176a0d59 100644 --- a/bin/reth/src/args/mod.rs +++ b/bin/reth/src/args/mod.rs @@ -39,4 +39,8 @@ pub use txpool_args::TxPoolArgs; mod dev_args; pub use dev_args::DevArgs; +/// PruneArgs for configuring the pruning and full node +mod pruning_args; +pub use pruning_args::PruningArgs; + pub mod utils; diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs new file mode 100644 index 000000000000..42f8d6571c44 --- /dev/null +++ b/bin/reth/src/args/pruning_args.rs @@ -0,0 +1,37 @@ +//! Pruning and full node arguments + +use clap::Args; +use reth_config::config::PruneConfig; +use reth_primitives::{ChainSpec, PruneMode, PruneModes}; +use std::sync::Arc; + +/// Parameters for pruning and full node +#[derive(Debug, Args, PartialEq, Default)] +#[command(next_help_heading = "Pruning")] +pub struct PruningArgs { + /// Run full node. Only the most recent 128 block states are stored. This flag takes + /// priority over pruning configuration in reth.toml. + // TODO(alexey): unhide when pruning is ready for production use + #[arg(long, hide = true, default_value_t = false)] + pub full: bool, +} + +impl PruningArgs { + /// Returns pruning configuration. + pub fn prune_config(&self, chain_spec: Arc) -> Option { + if self.full { + Some(PruneConfig { + block_interval: 5, + parts: PruneModes { + sender_recovery: Some(PruneMode::Distance(128)), + transaction_lookup: None, + receipts: chain_spec.deposit_contract_deployment_block.map(PruneMode::Before), + account_history: Some(PruneMode::Distance(128)), + storage_history: Some(PruneMode::Distance(128)), + }, + }) + } else { + None + } + } +} diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index cb6f885801b1..1e3cae87787f 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -272,6 +272,7 @@ mod tests { fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, + deposit_contract_deployment_block: None, }); let db = create_test_rw_db(); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 8dc76f0fe234..76448af826e6 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -5,8 +5,8 @@ use crate::{ args::{ get_secret_key, utils::{genesis_value_parser, parse_socket_address}, - DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, RpcServerArgs, - TxPoolArgs, + DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, + RpcServerArgs, TxPoolArgs, }, cli::ext::RethCliExt, dirs::{DataDirPath, MaybePlatformPath}, @@ -27,7 +27,7 @@ use reth_beacon_consensus::{BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_F use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; -use reth_config::Config; +use reth_config::{config::PruneConfig, Config}; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_downloaders::{ @@ -143,6 +143,9 @@ pub struct Command { #[clap(flatten)] dev: DevArgs, + + #[clap(flatten)] + pruning: PruningArgs, } impl Command { @@ -298,6 +301,8 @@ impl Command { None }; + let prune_config = self.pruning.prune_config(Arc::clone(&self.chain)).or(config.prune); + // Configure the pipeline let (mut pipeline, client) = if self.dev.dev { info!(target: "reth::cli", "Starting Reth in dev mode"); @@ -332,6 +337,7 @@ impl Command { db.clone(), &ctx.task_executor, metrics_tx, + prune_config, max_block, ) .await?; @@ -351,6 +357,7 @@ impl Command { db.clone(), &ctx.task_executor, metrics_tx, + prune_config, max_block, ) .await?; @@ -373,7 +380,7 @@ impl Command { None }; - let pruner = config.prune.map(|prune_config| { + let pruner = prune_config.map(|prune_config| { info!(target: "reth::cli", "Pruner initialized"); reth_prune::Pruner::new( db.clone(), @@ -479,6 +486,7 @@ impl Command { db: DB, task_executor: &TaskExecutor, metrics_tx: MetricEventsSender, + prune_config: Option, max_block: Option, ) -> eyre::Result> where @@ -504,6 +512,7 @@ impl Command { max_block, self.debug.continuous, metrics_tx, + prune_config, ) .await?; @@ -685,6 +694,7 @@ impl Command { max_block: Option, continuous: bool, metrics_tx: MetricEventsSender, + prune_config: Option, ) -> eyre::Result> where DB: Database + Clone + 'static, @@ -746,7 +756,7 @@ impl Command { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, }, - config.prune.map(|prune| prune.parts).unwrap_or_default(), + prune_config.map(|prune| prune.parts).unwrap_or_default(), ) .with_metrics_tx(metrics_tx), ) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index d86d375a44b3..f23839d1daea 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -54,6 +54,8 @@ pub static MAINNET: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(1681338455)), ]), + // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 + deposit_contract_deployment_block: Some(11052984), } .into() }); @@ -88,6 +90,8 @@ pub static GOERLI: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)), ]), + // https://goerli.etherscan.io/tx/0xa3c07dc59bfdb1bfc2d50920fed2ef2c1c4e0a09fe2325dbc14e07702f965a78 + deposit_contract_deployment_block: Some(4367322), } .into() }); @@ -126,6 +130,8 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(1677557088)), ]), + // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 + deposit_contract_deployment_block: Some(1273020), } .into() }); @@ -163,6 +169,7 @@ pub static DEV: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(0)), ]), + deposit_contract_deployment_block: Some(0), } .into() }); @@ -201,6 +208,10 @@ pub struct ChainSpec { /// The active hard forks and their activation conditions pub hardforks: BTreeMap, + + /// The block at which the deposit contract for PoS was deployed. + #[serde(skip, default)] + pub deposit_contract_deployment_block: Option, } impl ChainSpec { @@ -433,6 +444,7 @@ impl From for ChainSpec { fork_timestamps: ForkTimestamps::from_hardforks(&hardforks), hardforks, paris_block_and_final_difficulty: None, + deposit_contract_deployment_block: None, } } } @@ -655,6 +667,7 @@ impl ChainSpecBuilder { fork_timestamps: ForkTimestamps::from_hardforks(&self.hardforks), hardforks: self.hardforks, paris_block_and_final_difficulty: None, + deposit_contract_deployment_block: None, } } } diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs index ba87a53ef77a..724ddf93a39a 100644 --- a/crates/primitives/src/hardfork.rs +++ b/crates/primitives/src/hardfork.rs @@ -164,6 +164,7 @@ mod tests { hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]), fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, + deposit_contract_deployment_block: None, }; assert_eq!(Hardfork::Frontier.fork_id(&spec), None); @@ -178,6 +179,7 @@ mod tests { hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]), fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, + deposit_contract_deployment_block: None, }; assert_eq!(Hardfork::Shanghai.fork_filter(&spec), None); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index c66636babb00..e7f617194ec6 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -818,7 +818,7 @@ where let eth = self.eth_handlers(); self.modules.insert( RethRpcModule::Trace, - TraceApi::new(self.provider.clone(), eth.api.clone(), self.tracing_call_guard.clone()) + TraceApi::new(self.provider.clone(), eth.api, self.tracing_call_guard.clone()) .into_rpc() .into(), ); From 27c65d291fc29c6fc7e9b2a1b8971ad1fa4e1506 Mon Sep 17 00:00:00 2001 From: bemevolent <140763712+bemevolent@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:11:31 -0500 Subject: [PATCH 306/722] fix: track full_transactions propagation when packet size limited (#3993) --- crates/net/network/src/transactions.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 8c1db9a03bd5..55c434593e53 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -267,12 +267,17 @@ where // send hashes of transactions self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); } else { - // send full transactions - self.network.send_transactions(*peer_id, full_transactions.build()); - - for hash in new_pooled_hashes.into_iter_hashes() { - propagated.0.entry(hash).or_default().push(PropagateKind::Full(*peer_id)); + let new_full_transactions = full_transactions.build(); + + for tx in new_full_transactions.iter() { + propagated + .0 + .entry(tx.hash()) + .or_default() + .push(PropagateKind::Full(*peer_id)); } + // send full transactions + self.network.send_transactions(*peer_id, new_full_transactions); } } } From e560b063f93b00d1b770aff81b108a12fd498b36 Mon Sep 17 00:00:00 2001 From: Plamen Hristov Date: Tue, 1 Aug 2023 00:18:59 +0200 Subject: [PATCH 307/722] Fix preState diffMode logic (#4014) --- crates/revm/revm-inspectors/src/tracing/builder/geth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 2f02b32cfa6f..91db8c2af2d3 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -172,7 +172,7 @@ impl GethTraceBuilder { let account_diffs: Vec<_> = state.into_iter().map(|(addr, acc)| (*addr, &acc.info)).collect(); - if prestate_config.is_diff_mode() { + if !prestate_config.is_diff_mode() { let mut prestate = PreStateMode::default(); for (addr, _) in account_diffs { let db_acc = db.basic(addr)?.unwrap_or_default(); From b28bc8de571214dc2ae889523e89f2e213c3eee5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Aug 2023 01:49:19 +0200 Subject: [PATCH 308/722] chore: add example how to install additional rpc namespace (#4005) --- Cargo.lock | 13 ++ Cargo.toml | 2 + bin/reth/Cargo.toml | 2 + bin/reth/src/cli/mod.rs | 4 +- bin/reth/src/lib.rs | 40 +++++++ bin/reth/src/node/mod.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 19 ++- .../Cargo.toml | 14 +++ .../src/main.rs | 112 ++++++++++++++++++ 9 files changed, 203 insertions(+), 5 deletions(-) create mode 100644 examples/additional-rpc-namespace-in-cli/Cargo.toml create mode 100644 examples/additional-rpc-namespace-in-cli/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 5ba8118176f3..7b0b1b89f0a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,6 +12,17 @@ dependencies = [ "regex", ] +[[package]] +name = "additional-rpc-namespace-in-cli" +version = "0.0.0" +dependencies = [ + "clap", + "eyre", + "jsonrpsee", + "reth", + "reth-transaction-pool", +] + [[package]] name = "addr2line" version = "0.20.0" @@ -5031,8 +5042,10 @@ dependencies = [ "reth-revm-inspectors", "reth-rlp", "reth-rpc", + "reth-rpc-api", "reth-rpc-builder", "reth-rpc-engine-api", + "reth-rpc-types", "reth-stages", "reth-tasks", "reth-tracing", diff --git a/Cargo.toml b/Cargo.toml index 738636e6b428..5b1333c2fb37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ members = [ "testing/ef-tests", "examples", + "examples/additional-rpc-namespace-in-cli", ] default-members = ["bin/reth"] @@ -84,6 +85,7 @@ revm-primitives = { git = "https://github.com/bluealloy/revm/", branch = "releas ## reth revm = { version = "3" } revm-primitives = "1.1" +reth = { path = "./bin/reth" } reth-primitives = { path = "./crates/primitives" } reth-interfaces = { path = "./crates/interfaces" } reth-provider = { path = "./crates/storage/provider" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 6a48f0be5a30..704955498688 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -26,6 +26,8 @@ reth-blockchain-tree = { path = "../../crates/blockchain-tree" } reth-rpc-engine-api = { path = "../../crates/rpc/rpc-engine-api" } reth-rpc-builder = { path = "../../crates/rpc/rpc-builder" } reth-rpc = { path = "../../crates/rpc/rpc" } +reth-rpc-types = { path = "../../crates/rpc/rpc-types" } +reth-rpc-api = { path = "../../crates/rpc/rpc-api" } reth-rlp.workspace = true reth-network = { path = "../../crates/net/network", features = ["serde"] } reth-network-api.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 79a1f7b92417..19ffa3fb2153 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -57,7 +57,7 @@ pub struct Cli { verbosity: Verbosity, } -impl Cli { +impl Cli { /// Execute the configured cli command. pub fn run(mut self) -> eyre::Result<()> { // add network name to logs dir @@ -98,7 +98,7 @@ impl Cli { /// Convenience function for parsing CLI options, set up logging and run the chosen command. #[inline] pub fn run() -> eyre::Result<()> { - Cli::parse().run() + Cli::<()>::parse().run() } /// Commands to be executed diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 60fb98df12cd..8320b6808321 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -40,5 +40,45 @@ pub mod test_vectors; pub mod utils; pub mod version; +/// Re-exported from `reth_provider`. +pub mod providers { + pub use reth_provider::*; +} + +/// Re-exported from `reth_tasks`. +pub mod tasks { + pub use reth_tasks::*; +} + +/// Re-exported from `reth_network`. +pub mod network { + pub use reth_network::*; + pub use reth_network_api::{noop, reputation, NetworkInfo, PeerKind, Peers, PeersInfo}; +} + +/// Re-exported from `reth_transaction_pool`. +pub mod transaction_pool { + pub use reth_transaction_pool::*; +} + +/// Re-export of `reth_rpc_*` crates. +pub mod rpc { + + /// Re-exported from `reth_rpc_builder`. + pub mod builder { + pub use reth_rpc_builder::*; + } + + /// Re-exported from `reth_rpc_types`. + pub mod types { + pub use reth_rpc_types::*; + } + + /// Re-exported from `reth_rpc_api`. + pub mod api { + pub use reth_rpc_api::*; + } +} + #[cfg(all(feature = "jemalloc", unix))] use jemallocator as _; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 76448af826e6..a3fe6f711fed 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -148,7 +148,7 @@ pub struct Command { pruning: PruningArgs, } -impl Command { +impl Command { /// Execute `node` command pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index e7f617194ec6..dadccb1c7b67 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -731,6 +731,21 @@ impl } } + /// Returns a reference to the provider + pub fn pool(&self) -> &Pool { + &self.pool + } + + /// Returns a reference to the events type + pub fn events(&self) -> &Events { + &self.events + } + + /// Returns a reference to the tasks type + pub fn tasks(&self) -> &Tasks { + &self.executor + } + /// Returns all installed methods pub fn methods(&self) -> Vec { self.modules.values().cloned().collect() @@ -1499,8 +1514,8 @@ impl TransportRpcModules<()> { &mut self, other: impl Into, ) -> Result { - if let Some(ref mut http) = self.http { - return http.merge(other.into()).map(|_| true) + if let Some(ref mut ipc) = self.ipc { + return ipc.merge(other.into()).map(|_| true) } Ok(false) } diff --git a/examples/additional-rpc-namespace-in-cli/Cargo.toml b/examples/additional-rpc-namespace-in-cli/Cargo.toml new file mode 100644 index 000000000000..dadbe58587db --- /dev/null +++ b/examples/additional-rpc-namespace-in-cli/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "additional-rpc-namespace-in-cli" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-transaction-pool.workspace = true + +clap = { version = "4", features = ["derive"] } +jsonrpsee = { workspace = true, features = ["server", "macros"] } +eyre = "0.6" \ No newline at end of file diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/additional-rpc-namespace-in-cli/src/main.rs new file mode 100644 index 000000000000..46df9a6bb2be --- /dev/null +++ b/examples/additional-rpc-namespace-in-cli/src/main.rs @@ -0,0 +1,112 @@ +//! Example of how to use additional rpc namespaces in the reth CLI +//! +//! Run with +//! +//! ```not_rust +//! cargo run -p additional-rpc-namespace-in-cli -- node --http --ws --enable-ext +//! ``` +//! +//! This installs an additional RPC method `txpoolExt_transactionCount` that can queried via [cast](https://github.com/foundry-rs/foundry) +//! +//! ```sh +//! cast rpc txpoolExt_transactionCount +//! ``` +use clap::Parser; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth::{ + cli::{ + ext::{RethCliExt, RethRpcConfig, RethRpcServerArgsExt}, + Cli, + }, + network::{NetworkInfo, Peers}, + providers::{ + BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, StateProviderFactory, + }, + rpc::builder::{RethModuleRegistry, TransportRpcModules}, + tasks::TaskSpawner, +}; +use reth_transaction_pool::TransactionPool; + +fn main() { + Cli::::parse().run().unwrap(); +} + +/// The type that tells the reth CLI what extensions to use +struct MyRethCliExt; + +impl RethCliExt for MyRethCliExt { + /// This tells the reth CLI to install the `txpool` rpc namespace via `RethCliTxpoolExt` + type RpcExt = RethCliTxpoolExt; +} + +/// Our custom cli args extension that adds one flag to reth default CLI. +#[derive(Debug, Clone, Copy, Default, clap::Args)] +struct RethCliTxpoolExt { + /// CLI flag to enable the txpool extension namespace + #[clap(long)] + pub enable_ext: bool, +} + +impl RethRpcServerArgsExt for RethCliTxpoolExt { + // This is the entrypoint for the CLI to extend the RPC server with custom rpc namespaces. + fn extend_rpc_modules( + &self, + _config: &Conf, + registry: &mut RethModuleRegistry, + modules: &mut TransportRpcModules<()>, + ) -> eyre::Result<()> + where + Conf: RethRpcConfig, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + ChangeSetReader + + Clone + + Unpin + + 'static, + Pool: TransactionPool + Clone + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, + { + if !self.enable_ext { + return Ok(()) + } + + // here we get the configured pool type from the CLI. + let pool = registry.pool().clone(); + let ext = TxpoolExt { pool }; + + // now we merge our extension namespace into all configured transports + modules.merge_configured(ext.into_rpc())?; + + println!("txpool extension enabled"); + Ok(()) + } +} + +/// trait interface for a custom rpc namespace: `txpool` +/// +/// This defines an additional namespace where all methods are configured as trait functions. +#[rpc(server, namespace = "txpoolExt")] +pub trait TxpoolExtApi { + /// Returns the number of transactions in the pool. + #[method(name = "transactionCount")] + fn transaction_count(&self) -> RpcResult; +} + +/// The type that implements the `txpool` rpc namespace trait +pub struct TxpoolExt { + pool: Pool, +} + +impl TxpoolExtApiServer for TxpoolExt +where + Pool: TransactionPool + Clone + 'static, +{ + fn transaction_count(&self) -> RpcResult { + Ok(self.pool.pool_size().total) + } +} From 335908c07e37bd7a496d96140e3853457a54b83b Mon Sep 17 00:00:00 2001 From: niko-renko <93560662+niko-renko@users.noreply.github.com> Date: Mon, 31 Jul 2023 20:40:30 -0400 Subject: [PATCH 309/722] Update lib.rs (#4017) --- crates/net/network/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 25733cc11001..e8a7b6760a9d 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -17,7 +17,7 @@ //! Ethereum's networking protocol is specified in [devp2p](https://github.com/ethereum/devp2p). //! //! In order for a node to join the ethereum p2p network it needs to know what nodes are already -//! port of that network. This includes public identities (public key) and addresses (where to reach +//! part of that network. This includes public identities (public key) and addresses (where to reach //! them). //! //! ## Bird's Eye View From bd28eedd8cfcc9d0dce5811cc5609a2c80267118 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 1 Aug 2023 05:34:47 -0400 Subject: [PATCH 310/722] chore: add subkey docs to `StorageEntry` (#4016) --- crates/primitives/src/storage.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/primitives/src/storage.rs b/crates/primitives/src/storage.rs index a0997e6d0d21..07434f4b6fc8 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives/src/storage.rs @@ -3,6 +3,8 @@ use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; /// Account storage entry. +/// +/// `key` is the subkey when used as a value in the `StorageChangeSet` table. #[derive_arbitrary(compact)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] pub struct StorageEntry { From a1c3a44cedcf00e574822853eacbf3c2823ba651 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 1 Aug 2023 05:56:55 -0400 Subject: [PATCH 311/722] chore: fix `PrefixSetMut` doc comment (#4015) --- crates/trie/src/prefix_set/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index d760e236d9c9..ac25ab297c05 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -9,8 +9,15 @@ pub use loader::PrefixSetLoader; /// This data structure stores a set of `Nibbles` and provides methods to insert /// new elements and check whether any existing element has a given prefix. /// -/// Internally, this implementation uses a `BTreeSet` to store the `Nibbles`, which -/// ensures that they are always sorted and deduplicated. +/// Internally, this implementation uses a `Vec` and aims to act like a `BTreeSet` in being both +/// sorted and deduplicated. It does this by keeping a `sorted` flag. The `sorted` flag represents +/// whether or not the `Vec` is definitely sorted. When a new element is added, it is set to +/// `false.`. The `Vec` is sorted and deduplicated when `sorted` is `false` and: +/// * An element is being checked for inclusion (`contains`), or +/// * The set is being converted into an immutable `PrefixSet` (`freeze`) +/// +/// This means that a `PrefixSet` will always be sorted and deduplicated when constructed from a +/// `PrefixSetMut`. /// /// # Examples /// From 3a4419625a5ea176640ea6c0a3670883a60d76a2 Mon Sep 17 00:00:00 2001 From: Resende <17102689+ZePedroResende@users.noreply.github.com> Date: Tue, 1 Aug 2023 11:42:08 +0100 Subject: [PATCH 312/722] feat(rpc): ots_getBlockDetails and ots_getBlockDetailsByHash (#4007) --- crates/rpc/rpc-builder/tests/it/http.rs | 11 +++++------ crates/rpc/rpc-types/src/eth/block.rs | 1 + crates/rpc/rpc-types/src/otterscan.rs | 26 +++++++++++++++++++++++-- crates/rpc/rpc/src/otterscan.rs | 6 ++++-- 4 files changed, 34 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 09dbc52cb254..9dbfb2136f02 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -206,12 +206,11 @@ where assert!(is_unimplemented( OtterscanClient::trace_transaction(client, tx_hash).await.err().unwrap() )); - assert!(is_unimplemented( - OtterscanClient::get_block_details(client, block_number,).await.err().unwrap() - )); - assert!(is_unimplemented( - OtterscanClient::get_block_details_by_hash(client, block_hash).await.err().unwrap() - )); + + OtterscanClient::get_block_details(client, block_number).await.unwrap(); + + OtterscanClient::get_block_details_by_hash(client, block_hash).await.unwrap(); + assert!(is_unimplemented( OtterscanClient::get_block_transactions(client, block_number, page_number, page_size,) .await diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index 5b07b300878f..fbb6a06b62e6 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -20,6 +20,7 @@ pub enum BlockTransactions { /// Special case for uncle response. Uncle, } + impl BlockTransactions { /// Check if the enum variant is /// used for an uncle response. diff --git a/crates/rpc/rpc-types/src/otterscan.rs b/crates/rpc/rpc-types/src/otterscan.rs index 57e12e8bf123..4c2c57223926 100644 --- a/crates/rpc/rpc-types/src/otterscan.rs +++ b/crates/rpc/rpc-types/src/otterscan.rs @@ -1,4 +1,4 @@ -use crate::{Block, Transaction, TransactionReceipt}; +use crate::{Block, BlockTransactions, Rich, Transaction, TransactionReceipt}; use reth_primitives::{Address, Bytes, U256}; use serde::{Deserialize, Serialize}; @@ -36,7 +36,7 @@ pub struct TraceEntry { } /// Internal issuance struct for `BlockDetails` struct -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct InternalIssuance { block_reward: U256, @@ -95,3 +95,25 @@ pub struct ContractCreator { tx: Transaction, creator: Address, } + +impl From for OtsBlock { + fn from(block: Block) -> Self { + let transaction_count = match &block.transactions { + BlockTransactions::Full(t) => t.len(), + BlockTransactions::Hashes(t) => t.len(), + BlockTransactions::Uncle => 0, + }; + + Self { block, transaction_count } + } +} + +impl From> for BlockDetails { + fn from(rich_block: Rich) -> Self { + Self { + block: rich_block.inner.into(), + issuance: Default::default(), + total_fees: U256::default(), + } + } +} diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 767b3657ad79..7afcf2d545ae 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -59,12 +59,14 @@ where &self, block_number: BlockNumberOrTag, ) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + let block = self.eth.block_by_number(block_number, true).await?; + Ok(block.map(Into::into)) } /// Handler for `getBlockDetailsByHash` async fn get_block_details_by_hash(&self, block_hash: H256) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + let block = self.eth.block_by_hash(block_hash, true).await?; + Ok(block.map(Into::into)) } /// Handler for `getBlockTransactions` From 4688fd2ae0f23d86c4dcbb2d25ad4da96436e436 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 1 Aug 2023 12:30:02 +0100 Subject: [PATCH 313/722] feat(pruner): transaction senders (#3912) --- crates/primitives/src/prune/target.rs | 9 +- crates/prune/src/pruner.rs | 139 ++++++++++++++++++-- crates/stages/src/stages/sender_recovery.rs | 78 ++++++++++- crates/stages/src/test_utils/test_db.rs | 12 ++ 4 files changed, 221 insertions(+), 17 deletions(-) diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index af6781897c26..52e0806d33c3 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -10,7 +10,12 @@ use serde::{Deserialize, Serialize}; #[serde(default)] pub struct PruneModes { /// Sender Recovery pruning configuration. - #[serde(skip_serializing_if = "Option::is_none")] + // TODO(alexey): removing min blocks restriction is possible if we start calculating the senders + // dynamically on blockchain tree unwind. + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" + )] pub sender_recovery: Option, /// Transaction Lookup pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] @@ -126,7 +131,7 @@ impl PruneModes { } impl_prune_parts!( - (sender_recovery, "SenderRecovery", None), + (sender_recovery, "SenderRecovery", Some(64)), (transaction_lookup, "TransactionLookup", None), (receipts, "Receipts", Some(64)), (account_history, "AccountHistory", Some(64)), diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 3dae8bcb04f2..4a50faf26842 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -22,11 +22,12 @@ pub type PrunerWithResult = (Pruner, PrunerResult); pub struct BatchSizes { receipts: usize, transaction_lookup: usize, + transaction_senders: usize, } impl Default for BatchSizes { fn default() -> Self { - Self { receipts: 10000, transaction_lookup: 10000 } + Self { receipts: 10000, transaction_lookup: 10000, transaction_senders: 10000 } } } @@ -83,6 +84,12 @@ impl Pruner { self.prune_transaction_lookup(&provider, to_block, prune_mode)?; } + if let Some((to_block, prune_mode)) = + self.modes.prune_target_block_sender_recovery(tip_block_number)? + { + self.prune_transaction_senders(&provider, to_block, prune_mode)?; + } + provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); @@ -124,13 +131,16 @@ impl Pruner { prune_part: PrunePart, to_block: BlockNumber, ) -> reth_interfaces::Result>> { - let from_tx_num = provider - .get_prune_checkpoint(prune_part)? - .map(|checkpoint| provider.block_body_indices(checkpoint.block_number + 1)) - .transpose()? - .flatten() - .map(|body| body.first_tx_num) - .unwrap_or_default(); + let checkpoint = provider.get_prune_checkpoint(prune_part)?.unwrap_or(PruneCheckpoint { + block_number: 0, // No checkpoint, fresh pruning + prune_mode: PruneMode::Full, // Doesn't matter in this case, can be anything + }); + // Get first transaction of the next block after the highest pruned one + let from_tx_num = + provider.block_body_indices(checkpoint.block_number + 1)?.map(|body| body.first_tx_num); + // If no block body index is found, the DB is either corrupted or we've already pruned up to + // the latest block, so there's no thing to prune now. + let Some(from_tx_num) = from_tx_num else { return Ok(None) }; let to_tx_num = match provider.block_body_indices(to_block)? { Some(body) => body, @@ -200,7 +210,7 @@ impl Pruner { )? { Some(range) => range, None => { - trace!(target: "pruner", "No receipts to prune"); + trace!(target: "pruner", "No transaction lookup entries to prune"); return Ok(()) } }; @@ -248,6 +258,50 @@ impl Pruner { Ok(()) } + + /// Prune transaction senders up to the provided block, inclusive. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_transaction_senders( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + prune_mode: PruneMode, + ) -> PrunerResult { + let range = match self.get_next_tx_num_range_from_checkpoint( + provider, + PrunePart::SenderRecovery, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No transaction senders to prune"); + return Ok(()) + } + }; + let total = range.clone().count(); + + let mut processed = 0; + provider.prune_table_in_batches::( + range, + self.batch_sizes.transaction_senders, + |entries| { + processed += entries; + trace!( + target: "pruner", + %entries, + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + "Pruned transaction senders" + ); + }, + )?; + + provider.save_prune_checkpoint( + PrunePart::SenderRecovery, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } } #[cfg(test)] @@ -409,4 +463,71 @@ mod tests { // ended last time test_prune(20); } + + #[test] + fn prune_transaction_senders() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range(&mut rng, 0..=100, H256::zero(), 0..10); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let mut transaction_senders = Vec::new(); + for block in &blocks { + for transaction in &block.body { + transaction_senders.push(( + transaction_senders.len() as u64, + transaction.recover_signer().expect("recover signer"), + )); + } + } + tx.insert_transaction_senders(transaction_senders).expect("insert transaction senders"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.table::().unwrap().len(), + tx.table::().unwrap().len() + ); + + let test_prune = |to_block: BlockNumber| { + let prune_mode = PruneMode::Before(to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + 0, + PruneModes { sender_recovery: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + transaction_senders: 10, + ..Default::default() + }, + ); + + let provider = tx.inner_rw(); + assert_matches!( + pruner.prune_transaction_senders(&provider, to_block, prune_mode), + Ok(()) + ); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::SenderRecovery).unwrap(), + Some(PruneCheckpoint { block_number: to_block, prune_mode }) + ); + }; + + // Pruning first time ever, no previous checkpoint is present + test_prune(10); + // Prune second time, previous checkpoint is present, should continue pruning from where + // ended last time + test_prune(20); + } } diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 8872c8138ffa..9f72c69d6e3b 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -11,9 +11,11 @@ use reth_interfaces::consensus; use reth_primitives::{ keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, - TransactionSignedNoHash, TxNumber, H160, + PrunePart, TransactionSignedNoHash, TxNumber, H160, +}; +use reth_provider::{ + BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, PruneCheckpointReader, }; -use reth_provider::{BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError}; use std::fmt::Debug; use thiserror::Error; use tokio::sync::mpsc; @@ -207,9 +209,20 @@ fn recover_sender( fn stage_checkpoint( provider: &DatabaseProviderRW<'_, &DB>, -) -> Result { +) -> Result { + let pruned_entries = provider + .get_prune_checkpoint(PrunePart::SenderRecovery)? + .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) + .transpose()? + .flatten() + // +1 is needed because TxNumber is 0-indexed + .map(|body| body.last_tx_num() + 1) + .unwrap_or_default(); Ok(EntitiesCheckpoint { - processed: provider.tx_ref().entries::()? as u64, + // If `TxSenders` table was pruned, we will have a number of entries in it not matching + // the actual number of processed transactions. To fix that, we add the number of pruned + // `TxSenders` entries. + processed: provider.tx_ref().entries::()? as u64 + pruned_entries, total: provider.tx_ref().entries::()? as u64, }) } @@ -239,9 +252,10 @@ mod tests { generators::{random_block, random_block_range}, }; use reth_primitives::{ - stage::StageUnitCheckpoint, BlockNumber, SealedBlock, TransactionSigned, H256, + stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, + TransactionSigned, H256, MAINNET, }; - use reth_provider::TransactionsProvider; + use reth_provider::{ProviderFactory, PruneCheckpointWriter, TransactionsProvider}; use super::*; use crate::test_utils::{ @@ -366,6 +380,58 @@ mod tests { assert!(runner.validate_execution(first_input, result.ok()).is_ok(), "validation failed"); } + #[test] + fn stage_checkpoint_pruned() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range(&mut rng, 0..=100, H256::zero(), 0..10); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let max_pruned_block = 30; + let max_processed_block = 70; + + let mut tx_senders = Vec::new(); + let mut tx_number = 0; + for block in &blocks[..=max_processed_block] { + for transaction in &block.body { + if block.number > max_pruned_block { + tx_senders + .push((tx_number, transaction.recover_signer().expect("recover signer"))); + } + tx_number += 1; + } + } + tx.insert_transaction_senders(tx_senders).expect("insert tx hash numbers"); + + let provider = tx.inner_rw(); + provider + .save_prune_checkpoint( + PrunePart::SenderRecovery, + PruneCheckpoint { + block_number: max_pruned_block as BlockNumber, + prune_mode: PruneMode::Full, + }, + ) + .expect("save stage checkpoint"); + provider.commit().expect("commit"); + + let db = tx.inner_raw(); + let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().expect("provider rw"); + + assert_eq!( + stage_checkpoint(&provider).expect("stage checkpoint"), + EntitiesCheckpoint { + processed: blocks[..=max_processed_block] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + total: blocks.iter().map(|block| block.body.len() as u64).sum::() + } + ); + } + struct SenderRecoveryTestRunner { tx: TestTransaction, threshold: u64, diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 425cf1d7120c..a4df2b207f91 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -293,6 +293,18 @@ impl TestTransaction { }) } + pub fn insert_transaction_senders(&self, transaction_senders: I) -> Result<(), DbError> + where + I: IntoIterator, + { + self.commit(|tx| { + transaction_senders.into_iter().try_for_each(|(tx_num, sender)| { + // Insert into receipts table. + tx.put::(tx_num, sender) + }) + }) + } + /// Insert collection of ([Address], [Account]) into corresponding tables. pub fn insert_accounts_and_storages(&self, accounts: I) -> Result<(), DbError> where From a371cb8bc133bbc014cad4e43f9a26b8ba4f5522 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 1 Aug 2023 14:44:47 +0300 Subject: [PATCH 314/722] fix(txpool): clone impl for `FullTransactionEvent` (#4020) --- crates/transaction-pool/src/pool/events.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 03856aea27eb..3556337a6dff 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -39,10 +39,17 @@ pub enum FullTransactionEvent { impl Clone for FullTransactionEvent { fn clone(&self) -> Self { match self { + Self::Pending(hash) => Self::Pending(*hash), + Self::Queued(hash) => Self::Queued(*hash), + Self::Mined { tx_hash, block_hash } => { + Self::Mined { tx_hash: *tx_hash, block_hash: *block_hash } + } Self::Replaced { transaction, replaced_by } => { Self::Replaced { transaction: Arc::clone(transaction), replaced_by: *replaced_by } } - other => other.clone(), + Self::Discarded(hash) => Self::Discarded(*hash), + Self::Invalid(hash) => Self::Invalid(*hash), + Self::Propagated(propagated) => Self::Propagated(Arc::clone(propagated)), } } } From 72e6a1ec9fd980f0f86f5cbbd26f6c1135435b2f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 1 Aug 2023 13:17:51 +0100 Subject: [PATCH 315/722] chore(stages): remove unnecessary prune configuration from history index stages (#4018) --- .../src/stages/index_account_history.rs | 30 +++----------- .../src/stages/index_storage_history.rs | 30 +++----------- crates/stages/src/stages/mod.rs | 39 ++++++------------- 3 files changed, 23 insertions(+), 76 deletions(-) diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index d259ec3f8152..fe0b6d3b404c 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -1,9 +1,6 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; -use reth_primitives::{ - stage::{StageCheckpoint, StageId}, - PruneModes, -}; +use reth_primitives::stage::{StageCheckpoint, StageId}; use reth_provider::{AccountExtReader, DatabaseProviderRW, HistoryWriter}; use std::fmt::Debug; @@ -15,20 +12,18 @@ pub struct IndexAccountHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, - /// Pruning configuration. - pub prune_modes: PruneModes, } impl IndexAccountHistoryStage { /// Create new instance of [IndexAccountHistoryStage]. pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold, prune_modes: PruneModes::default() } + Self { commit_threshold } } } impl Default for IndexAccountHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000, prune_modes: PruneModes::default() } + Self { commit_threshold: 100_000 } } } @@ -43,16 +38,8 @@ impl Stage for IndexAccountHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - mut input: ExecInput, + input: ExecInput, ) -> Result { - if let Some((target_prunable_block, _)) = - self.prune_modes.prune_target_block_account_history(input.target())? - { - if target_prunable_block > input.checkpoint().block_number { - input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); - } - } - if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -385,16 +372,11 @@ mod tests { struct IndexAccountHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, - prune_modes: PruneModes, } impl Default for IndexAccountHistoryTestRunner { fn default() -> Self { - Self { - tx: TestTransaction::default(), - commit_threshold: 1000, - prune_modes: PruneModes::default(), - } + Self { tx: TestTransaction::default(), commit_threshold: 1000 } } } @@ -406,7 +388,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold, prune_modes: self.prune_modes } + Self::S { commit_threshold: self.commit_threshold } } } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 4759cd82c594..a17c5f14e7c9 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -1,9 +1,6 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::{database::Database, models::BlockNumberAddress}; -use reth_primitives::{ - stage::{StageCheckpoint, StageId}, - PruneModes, -}; +use reth_primitives::stage::{StageCheckpoint, StageId}; use reth_provider::{DatabaseProviderRW, HistoryWriter, StorageReader}; use std::fmt::Debug; @@ -15,20 +12,18 @@ pub struct IndexStorageHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, - /// Pruning configuration. - pub prune_modes: PruneModes, } impl IndexStorageHistoryStage { /// Create new instance of [IndexStorageHistoryStage]. pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold, prune_modes: PruneModes::default() } + Self { commit_threshold } } } impl Default for IndexStorageHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000, prune_modes: PruneModes::default() } + Self { commit_threshold: 100_000 } } } @@ -43,16 +38,8 @@ impl Stage for IndexStorageHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - mut input: ExecInput, + input: ExecInput, ) -> Result { - if let Some((target_prunable_block, _)) = - self.prune_modes.prune_target_block_storage_history(input.target())? - { - if target_prunable_block > input.checkpoint().block_number { - input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); - } - } - if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -399,16 +386,11 @@ mod tests { struct IndexStorageHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, - prune_modes: PruneModes, } impl Default for IndexStorageHistoryTestRunner { fn default() -> Self { - Self { - tx: TestTransaction::default(), - commit_threshold: 1000, - prune_modes: PruneModes::default(), - } + Self { tx: TestTransaction::default(), commit_threshold: 1000 } } } @@ -420,7 +402,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold, prune_modes: self.prune_modes } + Self::S { commit_threshold: self.commit_threshold } } } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 3cf295abeb42..d1086f95af30 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -154,36 +154,19 @@ mod tests { ); // Check AccountHistory - let mut acc_indexing_stage = - IndexAccountHistoryStage { prune_modes, ..Default::default() }; - - if let Some(PruneMode::Full) = prune_modes.account_history { - // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); - } else { - acc_indexing_stage.execute(&provider, input).await.unwrap(); - let mut account_history: Cursor<'_, RW, AccountHistory> = - provider.tx_ref().cursor_read::().unwrap(); - assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); - } + let mut acc_indexing_stage = IndexAccountHistoryStage::default(); + acc_indexing_stage.execute(&provider, input).await.unwrap(); + let mut account_history: Cursor<'_, RW, AccountHistory> = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); // Check StorageHistory - let mut storage_indexing_stage = - IndexStorageHistoryStage { prune_modes, ..Default::default() }; - - if let Some(PruneMode::Full) = prune_modes.storage_history { - // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); - } else { - storage_indexing_stage.execute(&provider, input).await.unwrap(); - - let mut storage_history = - provider.tx_ref().cursor_read::().unwrap(); - assert_eq!( - storage_history.walk(None).unwrap().count(), - expect_num_storage_changesets - ); - } + let mut storage_indexing_stage = IndexStorageHistoryStage::default(); + storage_indexing_stage.execute(&provider, input).await.unwrap(); + + let mut storage_history = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!(storage_history.walk(None).unwrap().count(), expect_num_storage_changesets); }; // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed From 4b0f4ec67e06b955e5a9bb9cf38a9ae8ee9eed56 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Aug 2023 14:19:08 +0200 Subject: [PATCH 316/722] fix: record selfdstructs properly (#3921) --- .../src/tracing/builder/parity.rs | 66 +++++++++++++++++-- .../revm/revm-inspectors/src/tracing/types.rs | 44 ++++++++++--- 2 files changed, 97 insertions(+), 13 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 7c3720621bc8..fb18366424fa 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -217,6 +217,25 @@ impl ParityTraceBuilder { if with_traces { let trace = node.parity_transaction_trace(trace_address); traces.push(trace); + + // check if the trace node is a selfdestruct + if node.is_selfdestruct() { + // selfdestructs are not recorded as individual call traces but are derived from + // the call trace and are added as additional `TransactionTrace` objects in the + // trace array + let addr = { + let last = traces.last_mut().expect("exists"); + let mut addr = last.trace_address.clone(); + addr.push(last.subtraces); + // need to account for the additional selfdestruct trace + last.subtraces += 1; + addr + }; + + if let Some(trace) = node.parity_selfdestruct_trace(addr) { + traces.push(trace); + } + } } if with_diff { node.parity_update_state_diff(&mut diff); @@ -232,11 +251,15 @@ impl ParityTraceBuilder { /// Returns an iterator over all recorded traces for `trace_transaction` pub fn into_transaction_traces_iter(self) -> impl Iterator { let trace_addresses = self.trace_addresses(); - self.nodes - .into_iter() - .zip(trace_addresses) - .filter(|(node, _)| !node.is_precompile()) - .map(|(node, trace_address)| node.parity_transaction_trace(trace_address)) + TransactionTraceIter { + next_selfdestruct: None, + iter: self + .nodes + .into_iter() + .zip(trace_addresses) + .filter(|(node, _)| !node.is_precompile()) + .map(|(node, trace_address)| (node.parity_transaction_trace(trace_address), node)), + } } /// Returns the raw traces of the transaction @@ -344,6 +367,39 @@ impl ParityTraceBuilder { } } +/// An iterator for [TransactionTrace]s +/// +/// This iterator handles additional selfdestruct actions based on the last emitted +/// [TransactionTrace], since selfdestructs are not recorded as individual call traces but are +/// derived from recorded call +struct TransactionTraceIter { + iter: Iter, + next_selfdestruct: Option, +} + +impl Iterator for TransactionTraceIter +where + Iter: Iterator, +{ + type Item = TransactionTrace; + + fn next(&mut self) -> Option { + if let Some(selfdestruct) = self.next_selfdestruct.take() { + return Some(selfdestruct) + } + let (mut trace, node) = self.iter.next()?; + if node.is_selfdestruct() { + // since selfdestructs are emitted as additional trace, increase the trace count + let mut addr = trace.trace_address.clone(); + addr.push(trace.subtraces); + // need to account for the additional selfdestruct trace + trace.subtraces += 1; + self.next_selfdestruct = node.parity_selfdestruct_trace(addr); + } + Some(trace) + } +} + /// addresses are presorted via breadth first walk thru [CallTraceNode]s, this can be done by a /// walker in [crate::tracing::builder::walker] /// diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 83c720dd2815..2739517dd480 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -297,6 +297,12 @@ impl CallTraceNode { self.trace.status } + /// Returns true if the call was a selfdestruct + #[inline] + pub(crate) fn is_selfdestruct(&self) -> bool { + self.status() == InstructionResult::SelfDestruct + } + /// Updates the values of the state diff pub(crate) fn parity_update_state_diff(&self, diff: &mut StateDiff) { let addr = self.trace.address; @@ -348,9 +354,7 @@ impl CallTraceNode { /// Converts this node into a parity `TransactionTrace` pub(crate) fn parity_transaction_trace(&self, trace_address: Vec) -> TransactionTrace { let action = self.parity_action(); - let result = if action.is_selfdestruct() || - (self.trace.is_error() && !self.trace.is_revert()) - { + let result = if self.trace.is_error() && !self.trace.is_revert() { // if the trace is a selfdestruct or an error that is not a revert, the result is None None } else { @@ -377,15 +381,39 @@ impl CallTraceNode { } } - /// Returns the `Action` for a parity trace - pub(crate) fn parity_action(&self) -> Action { - if self.status() == InstructionResult::SelfDestruct { - return Action::Selfdestruct(SelfdestructAction { + /// If the trace is a selfdestruct, returns the `Action` for a parity trace. + pub(crate) fn parity_selfdestruct_action(&self) -> Option { + if self.is_selfdestruct() { + Some(Action::Selfdestruct(SelfdestructAction { address: self.trace.address, refund_address: self.trace.selfdestruct_refund_target.unwrap_or_default(), balance: self.trace.value, - }) + })) + } else { + None } + } + + /// If the trace is a selfdestruct, returns the `TransactionTrace` for a parity trace. + pub(crate) fn parity_selfdestruct_trace( + &self, + trace_address: Vec, + ) -> Option { + let trace = self.parity_selfdestruct_action()?; + Some(TransactionTrace { + action: trace, + error: None, + result: None, + trace_address, + subtraces: 0, + }) + } + + /// Returns the `Action` for a parity trace. + /// + /// Caution: This does not include the selfdestruct action, if the trace is a selfdestruct, + /// since those are handled in addition to the call action. + pub(crate) fn parity_action(&self) -> Action { match self.kind() { CallKind::Call | CallKind::StaticCall | CallKind::CallCode | CallKind::DelegateCall => { Action::Call(CallAction { From 124960154053416c54ea146e8d87acc210d18b00 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 1 Aug 2023 15:22:24 +0300 Subject: [PATCH 317/722] test(txpool): listener it tests (#4019) --- Cargo.lock | 1 + crates/transaction-pool/Cargo.toml | 1 + crates/transaction-pool/tests/it/listeners.rs | 39 +++++++++++++++++++ crates/transaction-pool/tests/it/main.rs | 3 ++ 4 files changed, 44 insertions(+) create mode 100644 crates/transaction-pool/tests/it/listeners.rs diff --git a/Cargo.lock b/Cargo.lock index 7b0b1b89f0a1..22c9001e7d36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5927,6 +5927,7 @@ name = "reth-transaction-pool" version = "0.1.0-alpha.4" dependencies = [ "aquamarine", + "assert_matches", "async-trait", "auto_impl", "bitflags 1.3.2", diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 0275c0313d9f..dadfedff611e 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -51,6 +51,7 @@ paste = "1.0" rand = "0.8" proptest = "1.0" criterion = "0.5" +assert_matches = "1.5" [features] default = ["serde"] diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs new file mode 100644 index 000000000000..1c68d89ee045 --- /dev/null +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -0,0 +1,39 @@ +use assert_matches::assert_matches; +use reth_transaction_pool::{ + test_utils::{testing_pool, MockTransactionFactory}, + FullTransactionEvent, TransactionEvent, TransactionOrigin, TransactionPool, +}; +use tokio_stream::StreamExt; + +#[tokio::test(flavor = "multi_thread")] +async fn txpool_listener_by_hash() { + let txpool = testing_pool(); + let mut mock_tx_factory = MockTransactionFactory::default(); + let transaction = mock_tx_factory.create_eip1559(); + + let result = txpool + .add_transaction_and_subscribe(TransactionOrigin::External, transaction.transaction.clone()) + .await; + assert_matches!(result, Ok(_)); + + let mut events = result.unwrap(); + assert_matches!(events.next().await, Some(TransactionEvent::Pending)); +} + +#[tokio::test(flavor = "multi_thread")] +async fn txpool_listener_all() { + let txpool = testing_pool(); + let mut mock_tx_factory = MockTransactionFactory::default(); + let transaction = mock_tx_factory.create_eip1559(); + + let mut all_tx_events = txpool.all_transactions_event_listener(); + + let added_result = + txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; + assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + + assert_matches!( + all_tx_events.next().await, + Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() + ); +} diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index 1a707e615504..1b91bc6d8c70 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -1,3 +1,6 @@ //! transaction-pool integration tests +#[cfg(feature = "test-utils")] +mod listeners; + fn main() {} From 9430800d2eec08463bc55b52f578d0fccbc58b21 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 1 Aug 2023 15:30:38 +0100 Subject: [PATCH 318/722] fix(primitives): nothing to prune situations for `PruneModes` (#4021) --- crates/primitives/src/prune/target.rs | 54 ++++++++++----------------- 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 52e0806d33c3..939de0f3a227 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; pub struct PruneModes { /// Sender Recovery pruning configuration. // TODO(alexey): removing min blocks restriction is possible if we start calculating the senders - // dynamically on blockchain tree unwind. + // dynamically on blockchain tree unwind. #[serde( skip_serializing_if = "Option::is_none", deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" @@ -41,12 +41,12 @@ pub struct PruneModes { } macro_rules! impl_prune_parts { - ($(($part:ident, $human_part:expr, $min_blocks:expr)),+) => { + ($(($part:ident, $variant:ident, $min_blocks:expr)),+) => { $( paste! { #[doc = concat!( "Check if ", - $human_part, + stringify!($variant), " should be pruned at the target block according to the provided tip." )] pub fn [](&self, block: BlockNumber, tip: BlockNumber) -> bool { @@ -62,16 +62,20 @@ macro_rules! impl_prune_parts { paste! { #[doc = concat!( "Returns block up to which ", - $human_part, + stringify!($variant), " pruning needs to be done, inclusive, according to the provided tip." )] pub fn [](&self, tip: BlockNumber) -> Result, PrunePartError> { - match &self.$part { - Some(mode) => - match self.prune_target_block(mode, tip, $min_blocks) { - Some(block) => Ok(Some((block, *mode))), - None => Err(PrunePartError::Configuration(PrunePart::[<$human_part>])) - } + let min_blocks: u64 = $min_blocks.unwrap_or_default(); + match self.$part { + Some(mode) => Ok(match mode { + PruneMode::Full if min_blocks == 0 => Some((tip, mode)), + PruneMode::Distance(distance) if distance > tip => None, // Nothing to prune yet + PruneMode::Distance(distance) if distance >= min_blocks => Some((tip - distance, mode)), + PruneMode::Before(n) if n > tip => None, // Nothing to prune yet + PruneMode::Before(n) if tip - n >= min_blocks => Some((n - 1, mode)), + _ => return Err(PrunePartError::Configuration(PrunePart::$variant)), + }), None => Ok(None) } } @@ -110,31 +114,11 @@ impl PruneModes { } } - /// Returns block up to which pruning needs to be done, inclusive, according to the provided - /// prune mode, tip block number and minimum number of blocks allowed to be pruned. - pub fn prune_target_block( - &self, - mode: &PruneMode, - tip: BlockNumber, - min_blocks: Option, - ) -> Option { - match mode { - PruneMode::Full if min_blocks.unwrap_or_default() == 0 => Some(tip), - PruneMode::Distance(distance) if *distance >= min_blocks.unwrap_or_default() => { - Some(tip.saturating_sub(*distance)) - } - PruneMode::Before(n) if tip.saturating_sub(*n) >= min_blocks.unwrap_or_default() => { - Some(n.saturating_sub(1)) - } - _ => None, - } - } - impl_prune_parts!( - (sender_recovery, "SenderRecovery", Some(64)), - (transaction_lookup, "TransactionLookup", None), - (receipts, "Receipts", Some(64)), - (account_history, "AccountHistory", Some(64)), - (storage_history, "StorageHistory", Some(64)) + (sender_recovery, SenderRecovery, Some(64)), + (transaction_lookup, TransactionLookup, None), + (receipts, Receipts, Some(64)), + (account_history, AccountHistory, Some(64)), + (storage_history, StorageHistory, Some(64)) ); } From bfbad261ec9a7565f1e60464284d83a7cf6f9111 Mon Sep 17 00:00:00 2001 From: erik Date: Tue, 1 Aug 2023 11:25:10 -0400 Subject: [PATCH 319/722] feat(engine): payload cancun fields (#4010) --- crates/consensus/auto-seal/src/lib.rs | 2 +- crates/payload/basic/src/lib.rs | 2 +- crates/rpc/rpc-types/src/eth/block.rs | 17 ++++++++++++--- .../rpc/rpc-types/src/eth/engine/payload.rs | 21 ++++++++++++++----- crates/rpc/rpc/src/eth/api/pending_block.rs | 2 +- testing/ef-tests/src/models.rs | 8 +++++-- 6 files changed, 39 insertions(+), 13 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 29cb6f491771..abd0fada2c55 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -268,9 +268,9 @@ impl StorageInner { mix_hash: Default::default(), nonce: 0, base_fee_per_gas, - extra_data: Default::default(), blob_gas_used: None, excess_blob_gas: None, + extra_data: Default::default(), }; header.transactions_root = if transactions.is_empty() { diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 3e15444d15da..dea187a30b62 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -786,9 +786,9 @@ where gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: 0, - extra_data: extra_data.into(), blob_gas_used: None, excess_blob_gas: None, + extra_data: extra_data.into(), }; let block = Block { header, body: vec![], ommers: vec![], withdrawals }; diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index fbb6a06b62e6..a3762aef76d1 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -239,6 +239,12 @@ pub struct Header { /// Withdrawals root hash added by EIP-4895 and is ignored in legacy headers. #[serde(skip_serializing_if = "Option::is_none")] pub withdrawals_root: Option, + /// Blob gas used + #[serde(rename = "blobGasUsed", skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// Excess blob gas + #[serde(rename = "excessBlobGas", skip_serializing_if = "Option::is_none")] + pub excess_blob_gas: Option, } // === impl Header === @@ -268,9 +274,8 @@ impl Header { base_fee_per_gas, extra_data, withdrawals_root, - // TODO: add header fields to the rpc header - blob_gas_used: _, - excess_blob_gas: _, + blob_gas_used, + excess_blob_gas, }, hash, } = primitive_header; @@ -294,6 +299,8 @@ impl Header { mix_hash, nonce: Some(nonce.to_be_bytes().into()), base_fee_per_gas: base_fee_per_gas.map(U256::from), + blob_gas_used: blob_gas_used.map(U64::from), + excess_blob_gas: excess_blob_gas.map(U64::from), } } } @@ -423,6 +430,8 @@ mod tests { mix_hash: H256::from_low_u64_be(14), nonce: Some(H64::from_low_u64_be(15)), base_fee_per_gas: Some(U256::from(20)), + blob_gas_used: None, + excess_blob_gas: None, }, total_difficulty: Some(U256::from(100000)), uncles: vec![H256::from_low_u64_be(17)], @@ -461,6 +470,8 @@ mod tests { mix_hash: H256::from_low_u64_be(14), nonce: Some(H64::from_low_u64_be(15)), base_fee_per_gas: Some(U256::from(20)), + blob_gas_used: None, + excess_blob_gas: None, }, total_difficulty: Some(U256::from(100000)), uncles: vec![H256::from_low_u64_be(17)], diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index d2b0625577a5..d63e0817e183 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -79,6 +79,8 @@ pub struct ExecutionPayload { pub timestamp: U64, pub extra_data: Bytes, pub base_fee_per_gas: U256, + pub blob_gas_used: Option, + pub excess_blob_gas: Option, pub block_hash: H256, pub transactions: Vec, /// Array of [`Withdrawal`] enabled with V2 @@ -111,6 +113,8 @@ impl From for ExecutionPayload { timestamp: value.timestamp.into(), extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + blob_gas_used: value.blob_gas_used.map(U64::from), + excess_blob_gas: value.excess_blob_gas.map(U64::from), block_hash: value.hash(), transactions, withdrawals: value.withdrawals, @@ -167,14 +171,15 @@ impl TryFrom for SealedBlock { .uint_try_to() .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?, ), + blob_gas_used: payload.blob_gas_used.map(|blob_gas_used| blob_gas_used.as_u64()), + excess_blob_gas: payload + .excess_blob_gas + .map(|excess_blob_gas| excess_blob_gas.as_u64()), extra_data: payload.extra_data, // Defaults ommers_hash: EMPTY_LIST_HASH, difficulty: Default::default(), nonce: Default::default(), - // TODO: add conversion once ExecutionPayload has 4844 fields - blob_gas_used: None, - excess_blob_gas: None, } .seal_slow(); @@ -211,6 +216,12 @@ pub enum PayloadError { /// Invalid payload base fee. #[error("Invalid payload base fee: {0}")] BaseFee(U256), + /// Invalid payload base fee. + #[error("Invalid payload blob gas used: {0}")] + BlobGasUsed(U256), + /// Invalid payload base fee. + #[error("Invalid payload excess blob gas: {0}")] + ExcessBlobGas(U256), /// Invalid payload block hash. #[error("blockhash mismatch, want {consensus}, got {execution}")] BlockHash { @@ -519,7 +530,7 @@ mod tests { #[test] fn serde_roundtrip_legacy_txs_payload() { // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } @@ -527,7 +538,7 @@ mod tests { #[test] fn serde_roundtrip_enveloped_txs_payload() { // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index c9b74a3de7df..97285309e6e4 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -142,9 +142,9 @@ impl PendingBlockEnv { gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, - extra_data: Default::default(), blob_gas_used: None, excess_blob_gas: None, + extra_data: Default::default(), }; // seal the block diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 989a0dd9e7fa..44e9c9b3a944 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -78,6 +78,10 @@ pub struct Header { pub base_fee_per_gas: Option, /// Withdrawals root. pub withdrawals_root: Option, + /// Blob gas used. + pub blob_gas_used: Option, + /// Excess blob gas. + pub excess_blob_gas: Option, } impl From
for SealedHeader { @@ -100,8 +104,8 @@ impl From
for SealedHeader { parent_hash: value.parent_hash, logs_bloom: value.bloom, withdrawals_root: value.withdrawals_root, - blob_gas_used: None, - excess_blob_gas: None, + blob_gas_used: value.blob_gas_used.map(|v| v.0.to::()), + excess_blob_gas: value.excess_blob_gas.map(|v| v.0.to::()), }; header.seal(value.hash) } From b46101afb5e549d40b7b2537fff9b67e05ad4448 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Altu=C4=9F=20Bakan?= Date: Tue, 1 Aug 2023 20:44:39 +0200 Subject: [PATCH 320/722] feat(cli): add max peer args (#4024) --- bin/reth/src/args/network_args.rs | 41 ++++++++++++++++++++++--- crates/net/network/src/peers/manager.rs | 18 +++++++++++ 2 files changed, 54 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/args/network_args.rs b/bin/reth/src/args/network_args.rs index c945fdaf32f6..086373687124 100644 --- a/bin/reth/src/args/network_args.rs +++ b/bin/reth/src/args/network_args.rs @@ -60,6 +60,14 @@ pub struct NetworkArgs { /// Network listening port. default: 30303 #[arg(long = "port", value_name = "PORT")] pub port: Option, + + /// Maximum number of outbound requests. default: 100 + #[arg(long)] + pub max_outbound_peers: Option, + + /// Maximum number of inbound requests. default: 30 + #[arg(long)] + pub max_inbound_peers: Option, } impl NetworkArgs { @@ -78,9 +86,17 @@ impl NetworkArgs { let chain_bootnodes = chain_spec.chain.bootnodes().unwrap_or_else(mainnet_nodes); let peers_file = self.peers_file.clone().unwrap_or(default_peers_file); - // Configure basic network stack. + // Configure peer connections + let peer_config = config + .peers + .clone() + .with_max_inbound_opt(self.max_inbound_peers) + .with_max_outbound_opt(self.max_outbound_peers); + + // Configure basic network stack let mut network_config_builder = config .network_config(self.nat, self.persistent_peers_file(peers_file), secret_key) + .peer_config(peer_config) .boot_nodes(self.bootnodes.clone().unwrap_or(chain_bootnodes)) .chain_spec(chain_spec); @@ -91,11 +107,7 @@ impl NetworkArgs { self.discovery.apply_to_builder(network_config_builder) } -} -// === impl NetworkArgs === - -impl NetworkArgs { /// If `no_persist_peers` is true then this returns the path to the persistent peers file path. pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option { if self.no_persist_peers { @@ -163,4 +175,23 @@ mod tests { CommandParser::::parse_from(["reth", "--nat", "extip:0.0.0.0"]).args; assert_eq!(args.nat, NatResolver::ExternalIp("0.0.0.0".parse().unwrap())); } + + #[test] + fn parse_peer_args() { + let args = + CommandParser::::parse_from(["reth", "--max-outbound-peers", "50"]).args; + assert_eq!(args.max_outbound_peers, Some(50)); + assert_eq!(args.max_inbound_peers, None); + + let args = CommandParser::::parse_from([ + "reth", + "--max-outbound-peers", + "75", + "--max-inbound-peers", + "15", + ]) + .args; + assert_eq!(args.max_outbound_peers, Some(75)); + assert_eq!(args.max_inbound_peers, Some(15)); + } } diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 8b192f1d79a3..1955603c7349 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -1134,11 +1134,29 @@ impl PeersConfig { self.connection_info.num_inbound = num_inbound; self } + /// Maximum allowed outbound connections. pub fn with_max_outbound(mut self, max_outbound: usize) -> Self { self.connection_info.max_outbound = max_outbound; self } + + /// Maximum allowed inbound connections with optional update. + pub fn with_max_inbound_opt(mut self, max_inbound: Option) -> Self { + if let Some(max_inbound) = max_inbound { + self.connection_info.max_inbound = max_inbound; + } + self + } + + /// Maximum allowed outbound connections with optional update. + pub fn with_max_outbound_opt(mut self, max_outbound: Option) -> Self { + if let Some(max_outbound) = max_outbound { + self.connection_info.max_outbound = max_outbound; + } + self + } + /// Maximum allowed inbound connections. pub fn with_max_inbound(mut self, max_inbound: usize) -> Self { self.connection_info.max_inbound = max_inbound; From cb0dedc8a9f4f9857ad3026f91cbd01203bc10aa Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 14:02:50 +0100 Subject: [PATCH 321/722] chore: fix clippy (#4032) --- bin/reth/src/node/mod.rs | 6 +++--- bin/reth/src/stage/dump/execution.rs | 4 ++-- bin/reth/src/stage/dump/hashing_account.rs | 4 ++-- bin/reth/src/stage/dump/hashing_storage.rs | 4 ++-- bin/reth/src/stage/dump/merkle.rs | 4 ++-- bin/reth/src/stage/dump/mod.rs | 10 +++++----- crates/revm/revm-inspectors/src/tracing/types.rs | 2 +- crates/rpc/rpc/src/reth.rs | 1 - crates/storage/libmdbx-rs/src/environment.rs | 3 ++- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index a3fe6f711fed..3c3aa43165e5 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -331,7 +331,7 @@ impl Command { let mut pipeline = self .build_networked_pipeline( - &mut config, + &config, client.clone(), Arc::clone(&consensus), db.clone(), @@ -351,7 +351,7 @@ impl Command { } else { let pipeline = self .build_networked_pipeline( - &mut config, + &config, network_client.clone(), Arc::clone(&consensus), db.clone(), @@ -480,7 +480,7 @@ impl Command { #[allow(clippy::too_many_arguments)] async fn build_networked_pipeline( &self, - config: &mut Config, + config: &Config, client: Client, consensus: Arc, db: DB, diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index b2d7bbed32fc..67eda8033cc7 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -13,7 +13,7 @@ use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_execution_stage( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, to: u64, output_db: &PathBuf, @@ -90,7 +90,7 @@ fn import_tables_with_range( /// PlainAccountState safely. There might be some state dependency from an address /// which hasn't been changed in the given range. async fn unwind_and_copy( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, diff --git a/bin/reth/src/stage/dump/hashing_account.rs b/bin/reth/src/stage/dump/hashing_account.rs index cc0e72536d31..2a947d013e63 100644 --- a/bin/reth/src/stage/dump/hashing_account.rs +++ b/bin/reth/src/stage/dump/hashing_account.rs @@ -9,7 +9,7 @@ use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_hashing_account_stage( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: BlockNumber, to: BlockNumber, output_db: &PathBuf, @@ -33,7 +33,7 @@ pub(crate) async fn dump_hashing_account_stage( /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. async fn unwind_and_copy( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, diff --git a/bin/reth/src/stage/dump/hashing_storage.rs b/bin/reth/src/stage/dump/hashing_storage.rs index 1af985281d76..0a8df0a6e44a 100644 --- a/bin/reth/src/stage/dump/hashing_storage.rs +++ b/bin/reth/src/stage/dump/hashing_storage.rs @@ -9,7 +9,7 @@ use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_hashing_storage_stage( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, to: u64, output_db: &PathBuf, @@ -28,7 +28,7 @@ pub(crate) async fn dump_hashing_storage_stage( /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. async fn unwind_and_copy( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: u64, tip_block_number: u64, output_db: &DatabaseEnv, diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index b54168768787..4339ef94e71f 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -15,7 +15,7 @@ use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_merkle_stage( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, from: BlockNumber, to: BlockNumber, output_db: &PathBuf, @@ -42,7 +42,7 @@ pub(crate) async fn dump_merkle_stage( /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. async fn unwind_and_copy( - db_tool: &mut DbTool<'_, DB>, + db_tool: &DbTool<'_, DB>, range: (u64, u64), tip_block_number: u64, output_db: &DatabaseEnv, diff --git a/bin/reth/src/stage/dump/mod.rs b/bin/reth/src/stage/dump/mod.rs index 5e7206cf1a59..792777d1a672 100644 --- a/bin/reth/src/stage/dump/mod.rs +++ b/bin/reth/src/stage/dump/mod.rs @@ -104,20 +104,20 @@ impl Command { let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); - let mut tool = DbTool::new(&db, self.chain.clone())?; + let tool = DbTool::new(&db, self.chain.clone())?; match &self.command { Stages::Execution(StageCommand { output_db, from, to, dry_run, .. }) => { - dump_execution_stage(&mut tool, *from, *to, output_db, *dry_run).await? + dump_execution_stage(&tool, *from, *to, output_db, *dry_run).await? } Stages::StorageHashing(StageCommand { output_db, from, to, dry_run, .. }) => { - dump_hashing_storage_stage(&mut tool, *from, *to, output_db, *dry_run).await? + dump_hashing_storage_stage(&tool, *from, *to, output_db, *dry_run).await? } Stages::AccountHashing(StageCommand { output_db, from, to, dry_run, .. }) => { - dump_hashing_account_stage(&mut tool, *from, *to, output_db, *dry_run).await? + dump_hashing_account_stage(&tool, *from, *to, output_db, *dry_run).await? } Stages::Merkle(StageCommand { output_db, from, to, dry_run, .. }) => { - dump_merkle_stage(&mut tool, *from, *to, output_db, *dry_run).await? + dump_merkle_stage(&tool, *from, *to, output_db, *dry_run).await? } } diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 2739517dd480..b7d864814095 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -487,7 +487,7 @@ impl CallTraceNode { post_value: bool, ) { let addr = self.trace.address; - let acc_state = account_states.entry(addr).or_insert_with(AccountState::default); + let acc_state = account_states.entry(addr).or_default(); for change in self.trace.steps.iter().filter_map(|s| s.storage_change) { let StorageChange { key, value, had_value } = change; let storage_map = acc_state.storage.get_or_insert_with(BTreeMap::new); diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 7fbdf25ab460..8fabf50e5f95 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -62,7 +62,6 @@ where } fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult> { - let block_id = block_id; let Some(block_number) = self.provider().block_number_for_id(block_id)? else { return Err(EthApiError::UnknownBlockNumber) }; diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index fb89beecefde..4a37b941bd97 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -346,7 +346,7 @@ where } /////////////////////////////////////////////////////////////////////////////////////////////////// -//// Environment Builder +// Environment Builder /////////////////////////////////////////////////////////////////////////////////////////////////// #[derive(Clone, Debug, PartialEq, Eq)] @@ -512,6 +512,7 @@ where match rx.recv() { Ok(msg) => match msg { TxnManagerMessage::Begin { parent, flags, sender } => { + #[allow(clippy::redundant_locals)] let e = e; let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); sender From 9510a5ca7edef4b132571b7cc1c5779ac6692a3d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 14:04:55 +0100 Subject: [PATCH 322/722] feat(pruner): metrics (#4023) --- Cargo.lock | 1 + bin/reth/src/node/mod.rs | 1 - .../consensus/beacon/src/engine/test_utils.rs | 1 - crates/prune/Cargo.toml | 1 + crates/prune/src/lib.rs | 2 + crates/prune/src/metrics.rs | 36 ++++++++++++++++++ crates/prune/src/pruner.rs | 38 ++++++++++++------- 7 files changed, 65 insertions(+), 15 deletions(-) create mode 100644 crates/prune/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 22c9001e7d36..72c0bd3f7dea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5650,6 +5650,7 @@ dependencies = [ "rayon", "reth-db", "reth-interfaces", + "reth-metrics", "reth-primitives", "reth-provider", "reth-stages", diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3c3aa43165e5..184949c4cf6e 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -386,7 +386,6 @@ impl Command { db.clone(), self.chain.clone(), prune_config.block_interval, - tree_config.max_reorg_depth(), prune_config.parts, BatchSizes::default(), ) diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 46fb5eaeb245..78b3825e6b39 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -469,7 +469,6 @@ where db.clone(), self.base_config.chain_spec.clone(), 5, - 0, PruneModes::default(), BatchSizes::default(), ); diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 99a4901cb133..4ef111c6cda0 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -16,6 +16,7 @@ reth-primitives.workspace = true reth-db.workspace = true reth-provider.workspace = true reth-interfaces.workspace = true +reth-metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs index c7cb720cb696..b6264e47ceb3 100644 --- a/crates/prune/src/lib.rs +++ b/crates/prune/src/lib.rs @@ -1,5 +1,7 @@ mod error; +mod metrics; mod pruner; pub use error::PrunerError; +use metrics::Metrics; pub use pruner::{BatchSizes, Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/prune/src/metrics.rs b/crates/prune/src/metrics.rs new file mode 100644 index 000000000000..8c3e768f1435 --- /dev/null +++ b/crates/prune/src/metrics.rs @@ -0,0 +1,36 @@ +use reth_metrics::{metrics, metrics::Histogram, Metrics}; +use reth_primitives::PrunePart; +use std::collections::HashMap; + +#[derive(Debug, Default)] +pub(crate) struct Metrics { + pub(crate) pruner: PrunerMetrics, + prune_parts: HashMap, +} + +impl Metrics { + /// Returns existing or initializes a new instance of [PrunerPartMetrics] for the provided + /// [PrunePart]. + pub(crate) fn get_prune_part_metrics( + &mut self, + prune_part: PrunePart, + ) -> &mut PrunerPartMetrics { + self.prune_parts.entry(prune_part).or_insert_with(|| { + PrunerPartMetrics::new_with_labels(&[("part", prune_part.to_string())]) + }) + } +} + +#[derive(Metrics)] +#[metrics(scope = "pruner")] +pub(crate) struct PrunerMetrics { + /// Pruning duration + pub(crate) duration_seconds: Histogram, +} + +#[derive(Metrics)] +#[metrics(scope = "pruner.parts")] +pub(crate) struct PrunerPartMetrics { + /// Pruning duration for this part + pub(crate) duration_seconds: Histogram, +} diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 4a50faf26842..3b8270bde8bf 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -1,6 +1,6 @@ //! Support for pruning. -use crate::PrunerError; +use crate::{Metrics, PrunerError}; use rayon::prelude::*; use reth_db::{database::Database, tables}; use reth_primitives::{ @@ -10,7 +10,7 @@ use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, TransactionsProvider, }; -use std::{ops::RangeInclusive, sync::Arc}; +use std::{ops::RangeInclusive, sync::Arc, time::Instant}; use tracing::{debug, instrument, trace}; /// Result of [Pruner::run] execution @@ -33,14 +33,11 @@ impl Default for BatchSizes { /// Pruning routine. Main pruning logic happens in [Pruner::run]. pub struct Pruner { + metrics: Metrics, provider_factory: ProviderFactory, /// Minimum pruning interval measured in blocks. All prune parts are checked and, if needed, /// pruned, when the chain advances by the specified number of blocks. min_block_interval: u64, - /// Maximum prune depth. Used to determine the pruning target for parts that are needed during - /// the reorg, e.g. changesets. - #[allow(dead_code)] - max_prune_depth: u64, /// Last pruned block number. Used in conjunction with `min_block_interval` to determine /// when the pruning needs to be initiated. last_pruned_block_number: Option, @@ -54,14 +51,13 @@ impl Pruner { db: DB, chain_spec: Arc, min_block_interval: u64, - max_prune_depth: u64, modes: PruneModes, batch_sizes: BatchSizes, ) -> Self { Self { + metrics: Metrics::default(), provider_factory: ProviderFactory::new(db, chain_spec), min_block_interval, - max_prune_depth, last_pruned_block_number: None, modes, batch_sizes, @@ -70,29 +66,48 @@ impl Pruner { /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { + let start = Instant::now(); + let provider = self.provider_factory.provider_rw()?; if let Some((to_block, prune_mode)) = self.modes.prune_target_block_receipts(tip_block_number)? { + let part_start = Instant::now(); self.prune_receipts(&provider, to_block, prune_mode)?; + self.metrics + .get_prune_part_metrics(PrunePart::Receipts) + .duration_seconds + .record(part_start.elapsed()) } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_transaction_lookup(tip_block_number)? { + let part_start = Instant::now(); self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + self.metrics + .get_prune_part_metrics(PrunePart::TransactionLookup) + .duration_seconds + .record(part_start.elapsed()) } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_sender_recovery(tip_block_number)? { + let part_start = Instant::now(); self.prune_transaction_senders(&provider, to_block, prune_mode)?; + self.metrics + .get_prune_part_metrics(PrunePart::SenderRecovery) + .duration_seconds + .record(part_start.elapsed()) } provider.commit()?; - self.last_pruned_block_number = Some(tip_block_number); + + self.metrics.pruner.duration_seconds.record(start.elapsed()); + Ok(()) } @@ -323,7 +338,7 @@ mod tests { fn is_pruning_needed() { let db = create_test_rw_db(); let pruner = - Pruner::new(db, MAINNET.clone(), 5, 0, PruneModes::default(), BatchSizes::default()); + Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), BatchSizes::default()); // No last pruned block number was set before let first_block_number = 1; @@ -370,7 +385,6 @@ mod tests { tx.inner_raw(), MAINNET.clone(), 5, - 0, PruneModes { receipts: Some(prune_mode), ..Default::default() }, BatchSizes { // Less than total amount of blocks to prune to test the batching logic @@ -431,7 +445,6 @@ mod tests { tx.inner_raw(), MAINNET.clone(), 5, - 0, PruneModes { transaction_lookup: Some(prune_mode), ..Default::default() }, BatchSizes { // Less than total amount of blocks to prune to test the batching logic @@ -498,7 +511,6 @@ mod tests { tx.inner_raw(), MAINNET.clone(), 5, - 0, PruneModes { sender_recovery: Some(prune_mode), ..Default::default() }, BatchSizes { // Less than total amount of blocks to prune to test the batching logic From 88f83fca3939826d7ad3c2612dab179e7e6d64d7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 15:12:53 +0100 Subject: [PATCH 323/722] feat(pruner): account history (#4000) --- .../interfaces/src/test_utils/generators.rs | 45 ++-- crates/prune/src/pruner.rs | 240 +++++++++++++++++- crates/stages/benches/setup/mod.rs | 12 +- .../src/stages/index_account_history.rs | 6 +- .../src/stages/index_storage_history.rs | 6 +- crates/stages/src/stages/merkle.rs | 7 +- crates/stages/src/test_utils/test_db.rs | 56 +++- .../src/providers/database/provider.rs | 54 ++-- 8 files changed, 353 insertions(+), 73 deletions(-) diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index fbbdf42a2129..6b33d9973de8 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -191,21 +191,22 @@ pub fn random_block_range( blocks } -type Transition = Vec<(Address, Account, Vec)>; +/// Collection of account and storage entry changes +pub type ChangeSet = Vec<(Address, Account, Vec)>; type AccountState = (Account, Vec); -/// Generate a range of transitions for given blocks and accounts. +/// Generate a range of changesets for given blocks and accounts. /// Assumes all accounts start with an empty storage. /// -/// Returns a Vec of account and storage changes for each transition, +/// Returns a Vec of account and storage changes for each block, /// along with the final state of all accounts and storages. -pub fn random_transition_range<'a, R: Rng, IBlk, IAcc>( +pub fn random_changeset_range<'a, R: Rng, IBlk, IAcc>( rng: &mut R, blocks: IBlk, accounts: IAcc, - n_changes: std::ops::Range, + n_storage_changes: std::ops::Range, key_range: std::ops::Range, -) -> (Vec, BTreeMap) +) -> (Vec, BTreeMap) where IBlk: IntoIterator, IAcc: IntoIterator))>, @@ -217,16 +218,20 @@ where let valid_addresses = state.keys().copied().collect(); - let mut transitions = Vec::new(); + let mut changesets = Vec::new(); blocks.into_iter().for_each(|block| { - let mut transition = Vec::new(); - let (from, to, mut transfer, new_entries) = - random_account_change(rng, &valid_addresses, n_changes.clone(), key_range.clone()); + let mut changeset = Vec::new(); + let (from, to, mut transfer, new_entries) = random_account_change( + rng, + &valid_addresses, + n_storage_changes.clone(), + key_range.clone(), + ); // extract from sending account let (prev_from, _) = state.get_mut(&from).unwrap(); - transition.push((from, *prev_from, Vec::new())); + changeset.push((from, *prev_from, Vec::new())); transfer = max(min(transfer, prev_from.balance), U256::from(1)); prev_from.balance = prev_from.balance.wrapping_sub(transfer); @@ -250,11 +255,11 @@ where }) .collect(); - transition.push((to, *prev_to, old_entries)); + changeset.push((to, *prev_to, old_entries)); prev_to.balance = prev_to.balance.wrapping_add(transfer); - transitions.push(transition); + changesets.push(changeset); }); let final_state = state @@ -263,7 +268,7 @@ where (addr, (acc, storage.into_iter().map(|v| v.into()).collect())) }) .collect(); - (transitions, final_state) + (changesets, final_state) } /// Generate a random account change. @@ -272,7 +277,7 @@ where pub fn random_account_change( rng: &mut R, valid_addresses: &Vec
, - n_changes: std::ops::Range, + n_storage_changes: std::ops::Range, key_range: std::ops::Range, ) -> (Address, Address, U256, Vec) { let mut addresses = valid_addresses.choose_multiple(rng, 2).cloned(); @@ -282,9 +287,13 @@ pub fn random_account_change( let balance_change = U256::from(rng.gen::()); - let storage_changes = (0..n_changes.sample_single(rng)) - .map(|_| random_storage_entry(rng, key_range.clone())) - .collect(); + let storage_changes = if n_storage_changes.is_empty() { + Vec::new() + } else { + (0..n_storage_changes.sample_single(rng)) + .map(|_| random_storage_entry(rng, key_range.clone())) + .collect() + }; (addr_from, addr_to, balance_change, storage_changes) } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 3b8270bde8bf..57775390e74a 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -2,9 +2,16 @@ use crate::{Metrics, PrunerError}; use rayon::prelude::*; -use reth_db::{database::Database, tables}; +use reth_db::{ + abstraction::cursor::{DbCursorRO, DbCursorRW}, + database::Database, + models::ShardedKey, + tables, + transaction::DbTxMut, + BlockNumberList, +}; use reth_primitives::{ - BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, + Address, BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, }; use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, @@ -23,11 +30,17 @@ pub struct BatchSizes { receipts: usize, transaction_lookup: usize, transaction_senders: usize, + account_history: usize, } impl Default for BatchSizes { fn default() -> Self { - Self { receipts: 10000, transaction_lookup: 10000, transaction_senders: 10000 } + Self { + receipts: 10000, + transaction_lookup: 10000, + transaction_senders: 10000, + account_history: 10000, + } } } @@ -103,6 +116,12 @@ impl Pruner { .record(part_start.elapsed()) } + if let Some((to_block, prune_mode)) = + self.modes.prune_target_block_account_history(tip_block_number)? + { + self.prune_account_history(&provider, to_block, prune_mode)?; + } + provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); @@ -188,7 +207,7 @@ impl Pruner { let total = range.clone().count(); let mut processed = 0; - provider.prune_table_in_batches::( + provider.prune_table_with_iterator_in_batches::( range, self.batch_sizes.receipts, |entries| { @@ -256,7 +275,7 @@ impl Pruner { // Pre-sort hashes to prune them in order hashes.sort_unstable(); - let entries = provider.prune_table::(hashes)?; + let entries = provider.prune_table_with_iterator::(hashes)?; processed += entries; trace!( target: "pruner", @@ -296,7 +315,7 @@ impl Pruner { let total = range.clone().count(); let mut processed = 0; - provider.prune_table_in_batches::( + provider.prune_table_with_range_in_batches::( range, self.batch_sizes.transaction_senders, |entries| { @@ -317,22 +336,135 @@ impl Pruner { Ok(()) } + + /// Prune account history up to the provided block, inclusive. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_account_history( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + prune_mode: PruneMode, + ) -> PrunerResult { + let from_block = provider + .get_prune_checkpoint(PrunePart::AccountHistory)? + .map(|checkpoint| checkpoint.block_number + 1) + .unwrap_or_default(); + let range = from_block..=to_block; + let total = range.clone().count(); + + let mut processed = 0; + provider.prune_table_with_range_in_batches::( + range, + self.batch_sizes.account_history, + |entries| { + processed += entries; + trace!( + target: "pruner", + %entries, + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + "Pruned account history (changesets)" + ); + }, + )?; + + let mut cursor = provider.tx_ref().cursor_write::()?; + // Prune `AccountHistory` table: + // 1. If the shard has `highest_block_number` less than or equal to the target block number + // for pruning, delete the shard completely. + // 2. If the shard has `highest_block_number` greater than the target block number for + // pruning, filter block numbers inside the shard which are less than the target + // block number for pruning. + while let Some(result) = cursor.next()? { + let (key, blocks): (ShardedKey
, BlockNumberList) = result; + + if key.highest_block_number <= to_block { + // If shard consists only of block numbers less than the target one, delete shard + // completely. + cursor.delete_current()?; + if key.highest_block_number == to_block { + // Shard contains only block numbers up to the target one, so we can skip to the + // next address. It is guaranteed that further shards for this address will not + // contain the target block number, as it's in this shard. + cursor.seek_exact(ShardedKey::last(key.key))?; + } + } else { + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this address will not contain + // the target block number, as it's in this shard. + let blocks = blocks + .iter(0) + .skip_while(|block| *block <= to_block as usize) + .collect::>(); + if blocks.is_empty() { + // If there are no more blocks in this shard, we need to remove it, as empty + // shards are not allowed. + if key.highest_block_number == u64::MAX { + // If current shard is the last shard for this address, replace it with the + // previous shard. + if let Some((prev_key, prev_value)) = cursor.prev()? { + if prev_key.key == key.key { + cursor.delete_current()?; + // Upsert will replace the last shard for this address with the + // previous value + cursor.upsert(key.clone(), prev_value)?; + } + } + } else { + // If current shard is not the last shard for this address, just delete it. + cursor.delete_current()?; + } + } else { + cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(blocks))?; + } + + // Jump to the next address + cursor.seek_exact(ShardedKey::last(key.key))?; + } + + processed += 1; + if processed % self.batch_sizes.account_history == 0 { + trace!( + target: "pruner", + entries = self.batch_sizes.account_history, + "Pruned account history (indices)" + ); + } + } + + if processed % self.batch_sizes.account_history != 0 { + trace!( + target: "pruner", + entries = processed % self.batch_sizes.account_history, + "Pruned account history (indices)" + ); + } + + provider.save_prune_checkpoint( + PrunePart::AccountHistory, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } } #[cfg(test)] mod tests { use crate::{pruner::BatchSizes, Pruner}; use assert_matches::assert_matches; - use reth_db::{tables, test_utils::create_test_rw_db}; + use reth_db::{tables, test_utils::create_test_rw_db, BlockNumberList}; use reth_interfaces::test_utils::{ generators, - generators::{random_block_range, random_receipt}, + generators::{ + random_block_range, random_changeset_range, random_eoa_account_range, random_receipt, + }, }; use reth_primitives::{ - BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, + Address, BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::TestTransaction; + use std::{collections::BTreeMap, ops::AddAssign}; #[test] fn is_pruning_needed() { @@ -542,4 +674,94 @@ mod tests { // ended last time test_prune(20); } + + #[test] + fn prune_account_history() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let block_num = 7000; + let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let accounts = + random_eoa_account_range(&mut rng, 0..3).into_iter().collect::>(); + + let (changesets, _) = random_changeset_range( + &mut rng, + blocks.iter(), + accounts.clone().into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 0..0, + 0..0, + ); + tx.insert_changesets(changesets.clone(), None).expect("insert changesets"); + tx.insert_history(changesets.clone(), None).expect("insert history"); + + let account_occurrences = tx.table::().unwrap().into_iter().fold( + BTreeMap::::new(), + |mut map, (key, _)| { + map.entry(key.key).or_default().add_assign(1); + map + }, + ); + assert!(account_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); + + assert_eq!( + tx.table::().unwrap().len(), + changesets.iter().flatten().count() + ); + + let original_shards = tx.table::().unwrap(); + + let test_prune = |to_block: BlockNumber| { + let prune_mode = PruneMode::Before(to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + PruneModes { account_history: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + account_history: 10, + ..Default::default() + }, + ); + + let provider = tx.inner_rw(); + assert_matches!(pruner.prune_account_history(&provider, to_block, prune_mode), Ok(())); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + changesets[to_block as usize + 1..].iter().flatten().count() + ); + + let actual_shards = tx.table::().unwrap(); + + let expected_shards = original_shards + .iter() + .filter(|(key, _)| key.highest_block_number > to_block) + .map(|(key, blocks)| { + let new_blocks = blocks + .iter(0) + .skip_while(|block| *block <= to_block as usize) + .collect::>(); + (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) + }) + .collect::>(); + + assert_eq!(actual_shards, expected_shards); + + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::AccountHistory).unwrap(), + Some(PruneCheckpoint { block_number: to_block, prune_mode }) + ); + }; + + // Prune first time: no previous checkpoint is present + test_prune(3000); + // Prune second time: previous checkpoint is present, should continue pruning from where + // ended last time + test_prune(4500); + } } diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index 0c2a6dc19172..f61f7e273c92 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -8,8 +8,8 @@ use reth_db::{ use reth_interfaces::test_utils::{ generators, generators::{ - random_block_range, random_contract_account_range, random_eoa_account_range, - random_transition_range, + random_block_range, random_changeset_range, random_contract_account_range, + random_eoa_account_range, }, }; use reth_primitives::{Account, Address, SealedBlock, H256, MAINNET}; @@ -119,7 +119,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { let mut blocks = random_block_range(&mut rng, 0..=num_blocks, H256::zero(), txs_range); - let (transitions, start_state) = random_transition_range( + let (transitions, start_state) = random_changeset_range( &mut rng, blocks.iter().take(2), accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), @@ -139,10 +139,10 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { let offset = transitions.len() as u64; - tx.insert_transitions(transitions, None).unwrap(); + tx.insert_changesets(transitions, None).unwrap(); tx.commit(|tx| updates.flush(tx)).unwrap(); - let (transitions, final_state) = random_transition_range( + let (transitions, final_state) = random_changeset_range( &mut rng, blocks.iter().skip(2), start_state, @@ -150,7 +150,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { key_range, ); - tx.insert_transitions(transitions, Some(offset)).unwrap(); + tx.insert_changesets(transitions, Some(offset)).unwrap(); tx.insert_accounts_and_storages(final_state).unwrap(); diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index fe0b6d3b404c..14943d38a2c0 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -92,7 +92,7 @@ mod tests { }; use reth_interfaces::test_utils::{ generators, - generators::{random_block_range, random_contract_account_range, random_transition_range}, + generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; use reth_primitives::{hex_literal::hex, Address, BlockNumber, H160, H256, MAINNET}; @@ -408,7 +408,7 @@ mod tests { let blocks = random_block_range(&mut rng, start..=end, H256::zero(), 0..3); - let (transitions, _) = random_transition_range( + let (transitions, _) = random_changeset_range( &mut rng, blocks.iter(), accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), @@ -417,7 +417,7 @@ mod tests { ); // add block changeset from block 1. - self.tx.insert_transitions(transitions, Some(start))?; + self.tx.insert_changesets(transitions, Some(start))?; Ok(()) } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index a17c5f14e7c9..4d746817e2ac 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -91,7 +91,7 @@ mod tests { }; use reth_interfaces::test_utils::{ generators, - generators::{random_block_range, random_contract_account_range, random_transition_range}, + generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; use reth_primitives::{ hex_literal::hex, Address, BlockNumber, StorageEntry, H160, H256, MAINNET, U256, @@ -422,7 +422,7 @@ mod tests { let blocks = random_block_range(&mut rng, start..=end, H256::zero(), 0..3); - let (transitions, _) = random_transition_range( + let (transitions, _) = random_changeset_range( &mut rng, blocks.iter(), accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), @@ -431,7 +431,7 @@ mod tests { ); // add block changeset from block 1. - self.tx.insert_transitions(transitions, Some(start))?; + self.tx.insert_changesets(transitions, Some(start))?; Ok(()) } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index ffcc427b58ce..64d27426c551 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -376,8 +376,7 @@ mod tests { use reth_interfaces::test_utils::{ generators, generators::{ - random_block, random_block_range, random_contract_account_range, - random_transition_range, + random_block, random_block_range, random_changeset_range, random_contract_account_range, }, }; use reth_primitives::{ @@ -533,7 +532,7 @@ mod tests { blocks.extend(random_block_range(&mut rng, start..=end, head_hash, 0..3)); self.tx.insert_blocks(blocks.iter(), None)?; - let (transitions, final_state) = random_transition_range( + let (transitions, final_state) = random_changeset_range( &mut rng, blocks.iter(), accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), @@ -541,7 +540,7 @@ mod tests { 0..256, ); // add block changeset from block 1. - self.tx.insert_transitions(transitions, Some(start))?; + self.tx.insert_changesets(transitions, Some(start))?; self.tx.insert_accounts_and_storages(final_state)?; // Calculate state root diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index a4df2b207f91..630ba97f7c06 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -9,11 +9,12 @@ use reth_db::{ transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, DatabaseEnv, DatabaseError as DbError, }; +use reth_interfaces::test_utils::generators::ChangeSet; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StorageEntry, TxHash, TxNumber, H256, MAINNET, U256, }; -use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, ProviderFactory}; +use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, HistoryWriter, ProviderFactory}; use std::{ borrow::Borrow, collections::BTreeMap, @@ -347,35 +348,62 @@ impl TestTransaction { }) } - /// Insert collection of Vec<([Address], [Account], Vec<[StorageEntry]>)> into - /// corresponding tables. - pub fn insert_transitions( + /// Insert collection of [ChangeSet] into corresponding tables. + pub fn insert_changesets( &self, - transitions: I, - transition_offset: Option, + changesets: I, + block_offset: Option, ) -> Result<(), DbError> where - I: IntoIterator)>>, + I: IntoIterator, { - let offset = transition_offset.unwrap_or_default(); + let offset = block_offset.unwrap_or_default(); self.commit(|tx| { - transitions.into_iter().enumerate().try_for_each(|(transition_id, changes)| { - changes.into_iter().try_for_each(|(address, old_account, old_storage)| { - let tid = offset + transition_id as u64; + changesets.into_iter().enumerate().try_for_each(|(block, changeset)| { + changeset.into_iter().try_for_each(|(address, old_account, old_storage)| { + let block = offset + block as u64; // Insert into account changeset. tx.put::( - tid, + block, AccountBeforeTx { address, info: Some(old_account) }, )?; - let tid_address = (tid, address).into(); + let block_address = (block, address).into(); // Insert into storage changeset. old_storage.into_iter().try_for_each(|entry| { - tx.put::(tid_address, entry) + tx.put::(block_address, entry) }) }) }) }) } + + pub fn insert_history( + &self, + changesets: I, + block_offset: Option, + ) -> reth_interfaces::Result<()> + where + I: IntoIterator, + { + let mut accounts = BTreeMap::>::new(); + let mut storages = BTreeMap::<(Address, H256), Vec>::new(); + + for (block, changeset) in changesets.into_iter().enumerate() { + for (address, _, storage_entries) in changeset { + accounts.entry(address).or_default().push(block as u64); + for storage_entry in storage_entries { + storages.entry((address, storage_entry.key)).or_default().push(block as u64); + } + } + } + + let provider = self.factory.provider_rw()?; + provider.insert_account_history_index(accounts)?; + provider.insert_storage_history_index(storages)?; + provider.commit()?; + + Ok(()) + } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 446065457c3c..19bab6e50791 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -17,7 +17,7 @@ use reth_db::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }, - table::{Key, Table}, + table::Table, tables, transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, @@ -621,31 +621,23 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { /// Prune the table for the specified pre-sorted key iterator. /// Returns number of rows pruned. - pub fn prune_table( + pub fn prune_table_with_iterator( &self, - keys: impl IntoIterator, - ) -> std::result::Result - where - T: Table, - K: Key, - { - self.prune_table_in_batches::(keys, usize::MAX, |_| {}) + keys: impl IntoIterator, + ) -> std::result::Result { + self.prune_table_with_iterator_in_batches::(keys, usize::MAX, |_| {}) } /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after /// every `batch_size` pruned rows. /// /// Returns number of rows pruned. - pub fn prune_table_in_batches( + pub fn prune_table_with_iterator_in_batches( &self, - keys: impl IntoIterator, + keys: impl IntoIterator, batch_size: usize, mut batch_callback: impl FnMut(usize), - ) -> std::result::Result - where - T: Table, - K: Key, - { + ) -> std::result::Result { let mut cursor = self.tx.cursor_write::()?; let mut deleted = 0; @@ -667,6 +659,36 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { Ok(deleted) } + /// Prune the table for the specified key range, calling `chunk_callback` after every + /// `batch_size` pruned rows. + /// + /// Returns number of rows pruned. + pub fn prune_table_with_range_in_batches( + &self, + keys: impl RangeBounds, + batch_size: usize, + mut batch_callback: impl FnMut(usize), + ) -> std::result::Result { + let mut cursor = self.tx.cursor_write::()?; + let mut walker = cursor.walk_range(keys)?; + let mut deleted = 0; + + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + deleted += 1; + + if deleted % batch_size == 0 { + batch_callback(batch_size); + } + } + + if deleted % batch_size != 0 { + batch_callback(deleted % batch_size); + } + + Ok(deleted) + } + /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> Result> From 8f1bc8a799d6f9fe81db42f5948b3507cd2cd2cd Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 16:06:57 +0100 Subject: [PATCH 324/722] feat(pruner): storage history (#4006) --- crates/prune/src/pruner.rs | 222 +++++++++++++++++++++++++++++- crates/rpc/rpc-builder/src/lib.rs | 2 +- 2 files changed, 220 insertions(+), 4 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 57775390e74a..cc71421bf1fc 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -5,7 +5,7 @@ use rayon::prelude::*; use reth_db::{ abstraction::cursor::{DbCursorRO, DbCursorRW}, database::Database, - models::ShardedKey, + models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress, ShardedKey}, tables, transaction::DbTxMut, BlockNumberList, @@ -31,6 +31,7 @@ pub struct BatchSizes { transaction_lookup: usize, transaction_senders: usize, account_history: usize, + storage_history: usize, } impl Default for BatchSizes { @@ -40,6 +41,7 @@ impl Default for BatchSizes { transaction_lookup: 10000, transaction_senders: 10000, account_history: 10000, + storage_history: 10000, } } } @@ -122,6 +124,12 @@ impl Pruner { self.prune_account_history(&provider, to_block, prune_mode)?; } + if let Some((to_block, prune_mode)) = + self.modes.prune_target_block_storage_history(tip_block_number)? + { + self.prune_storage_history(&provider, to_block, prune_mode)?; + } + provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); @@ -446,6 +454,120 @@ impl Pruner { Ok(()) } + + /// Prune storage history up to the provided block, inclusive. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_storage_history( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + prune_mode: PruneMode, + ) -> PrunerResult { + let from_block = provider + .get_prune_checkpoint(PrunePart::StorageHistory)? + .map(|checkpoint| checkpoint.block_number + 1) + .unwrap_or_default(); + let block_range = from_block..=to_block; + let total = block_range.clone().count(); + let range = BlockNumberAddress::range(block_range); + + let mut processed = 0; + provider.prune_table_with_range_in_batches::( + range, + self.batch_sizes.storage_history, + |entries| { + processed += entries; + trace!( + target: "pruner", + %entries, + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + "Pruned storage history (changesets)" + ); + }, + )?; + + let mut cursor = provider.tx_ref().cursor_write::()?; + // Prune `StorageHistory` table: + // 1. If the shard has `highest_block_number` less than or equal to the target block number + // for pruning, delete the shard completely. + // 2. If the shard has `highest_block_number` greater than the target block number for + // pruning, filter block numbers inside the shard which are less than the target + // block number for pruning. + while let Some(result) = cursor.next()? { + let (key, blocks): (StorageShardedKey, BlockNumberList) = result; + + if key.sharded_key.highest_block_number <= to_block { + // If shard consists only of block numbers less than the target one, delete shard + // completely. + cursor.delete_current()?; + if key.sharded_key.highest_block_number == to_block { + // Shard contains only block numbers up to the target one, so we can skip to the + // next storage slot for this address. It is guaranteed that further shards for + // this address and storage slot will not contain the target block number, as + // it's in this shard. + cursor.seek_exact(StorageShardedKey::last(key.address, key.sharded_key.key))?; + } + } else { + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this address and storage slot + // will not contain the target block number, as it's in this shard. + let blocks = blocks + .iter(0) + .skip_while(|block| *block <= to_block as usize) + .collect::>(); + if blocks.is_empty() { + // If there are no more blocks in this shard, we need to remove it, as empty + // shards are not allowed. + if key.sharded_key.highest_block_number == u64::MAX { + // If current shard is the last shard for this address and storage slot, + // replace it with the previous shard. + if let Some((prev_key, prev_value)) = cursor.prev()? { + if prev_key.address == key.address && + prev_key.sharded_key.key == key.sharded_key.key + { + cursor.delete_current()?; + // Upsert will replace the last shard for this address and storage + // slot with the previous value + cursor.upsert(key.clone(), prev_value)?; + } + } + } else { + // If current shard is not the last shard for this address, just delete it. + cursor.delete_current()?; + } + } else { + cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(blocks))?; + } + + // Jump to the next address + cursor.seek_exact(StorageShardedKey::last(key.address, key.sharded_key.key))?; + } + + processed += 1; + if processed % self.batch_sizes.storage_history == 0 { + trace!( + target: "pruner", + entries = self.batch_sizes.storage_history, + "Pruned storage history (indices)" + ); + } + } + + if processed % self.batch_sizes.storage_history != 0 { + trace!( + target: "pruner", + entries = processed % self.batch_sizes.storage_history, + "Pruned storage history (indices)" + ); + } + + provider.save_prune_checkpoint( + PrunePart::StorageHistory, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } } #[cfg(test)] @@ -690,7 +812,7 @@ mod tests { let (changesets, _) = random_changeset_range( &mut rng, blocks.iter(), - accounts.clone().into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), 0..0, 0..0, ); @@ -698,7 +820,7 @@ mod tests { tx.insert_history(changesets.clone(), None).expect("insert history"); let account_occurrences = tx.table::().unwrap().into_iter().fold( - BTreeMap::::new(), + BTreeMap::<_, usize>::new(), |mut map, (key, _)| { map.entry(key.key).or_default().add_assign(1); map @@ -764,4 +886,98 @@ mod tests { // ended last time test_prune(4500); } + + #[test] + fn prune_storage_history() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let block_num = 7000; + let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let accounts = + random_eoa_account_range(&mut rng, 0..3).into_iter().collect::>(); + + let (changesets, _) = random_changeset_range( + &mut rng, + blocks.iter(), + accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))), + 1..2, + 1..2, + ); + tx.insert_changesets(changesets.clone(), None).expect("insert changesets"); + tx.insert_history(changesets.clone(), None).expect("insert history"); + + let storage_occurences = tx.table::().unwrap().into_iter().fold( + BTreeMap::<_, usize>::new(), + |mut map, (key, _)| { + map.entry((key.address, key.sharded_key.key)).or_default().add_assign(1); + map + }, + ); + assert!(storage_occurences.into_iter().any(|(_, occurrences)| occurrences > 1)); + + assert_eq!( + tx.table::().unwrap().len(), + changesets.iter().flatten().flat_map(|(_, _, entries)| entries).count() + ); + + let original_shards = tx.table::().unwrap(); + + let test_prune = |to_block: BlockNumber| { + let prune_mode = PruneMode::Before(to_block); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + PruneModes { storage_history: Some(prune_mode), ..Default::default() }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + storage_history: 10, + ..Default::default() + }, + ); + + let provider = tx.inner_rw(); + assert_matches!(pruner.prune_storage_history(&provider, to_block, prune_mode), Ok(())); + provider.commit().expect("commit"); + + assert_eq!( + tx.table::().unwrap().len(), + changesets[to_block as usize + 1..] + .iter() + .flatten() + .flat_map(|(_, _, entries)| entries) + .count() + ); + + let actual_shards = tx.table::().unwrap(); + + let expected_shards = original_shards + .iter() + .filter(|(key, _)| key.sharded_key.highest_block_number > to_block) + .map(|(key, blocks)| { + let new_blocks = blocks + .iter(0) + .skip_while(|block| *block <= to_block as usize) + .collect::>(); + (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) + }) + .collect::>(); + + assert_eq!(actual_shards, expected_shards); + + assert_eq!( + tx.inner().get_prune_checkpoint(PrunePart::StorageHistory).unwrap(), + Some(PruneCheckpoint { block_number: to_block, prune_mode }) + ); + }; + + // Prune first time: no previous checkpoint is present + test_prune(3000); + // Prune second time: previous checkpoint is present, should continue pruning from where + // ended last time + test_prune(4500); + } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index dadccb1c7b67..6b2f7461caae 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1530,7 +1530,7 @@ impl TransportRpcModules<()> { let other = other.into(); self.merge_http(other.clone())?; self.merge_ws(other.clone())?; - self.merge_ipc(other.clone())?; + self.merge_ipc(other)?; Ok(()) } From bddb60b3fcf4d340aab75908cf3e6ad68dbe22ce Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 16:08:38 +0100 Subject: [PATCH 325/722] feat(engine): require `VALID` latest FCU status before pruning (#3954) --- crates/consensus/beacon/src/engine/mod.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7c74bc395751..e7531d421740 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1710,10 +1710,15 @@ where // we're pending if both engine messages and sync events are pending (fully drained) let is_pending = engine_messages_pending && sync_pending; - // check prune events if pipeline is idle AND (pruning is running and we need to - // prioritize checking its events OR no engine and sync messages are pending and we may - // start pruning) - if this.sync.is_pipeline_idle() && (this.is_prune_active() || is_pending) { + // Poll prune controller if all conditions are met: + // 1. Pipeline is idle + // 2. Pruning is running and we need to prioritize checking its events OR no engine and + // sync messages are pending and we may start pruning + // 3. Latest FCU status is VALID + if this.sync.is_pipeline_idle() && + (this.is_prune_active() || is_pending) && + this.forkchoice_state_tracker.is_latest_valid() + { if let Some(ref mut prune) = this.prune { match prune.poll(cx, this.blockchain.canonical_tip().number) { Poll::Ready(prune_event) => { From 86ccf2f724c4781943e466bd65b8a79a011070a9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 2 Aug 2023 16:36:01 +0100 Subject: [PATCH 326/722] feat(pruner): account and storage history metrics (#4035) --- crates/prune/src/pruner.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index cc71421bf1fc..f34e800110df 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -121,13 +121,23 @@ impl Pruner { if let Some((to_block, prune_mode)) = self.modes.prune_target_block_account_history(tip_block_number)? { + let part_start = Instant::now(); self.prune_account_history(&provider, to_block, prune_mode)?; + self.metrics + .get_prune_part_metrics(PrunePart::AccountHistory) + .duration_seconds + .record(part_start.elapsed()) } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_storage_history(tip_block_number)? { + let part_start = Instant::now(); self.prune_storage_history(&provider, to_block, prune_mode)?; + self.metrics + .get_prune_part_metrics(PrunePart::StorageHistory) + .duration_seconds + .record(part_start.elapsed()) } provider.commit()?; From d595834d2032d878326e738e806232bd46969002 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 2 Aug 2023 17:58:22 +0200 Subject: [PATCH 327/722] chore: make ipcpath arg default (#4036) --- bin/reth/src/args/rpc_server_args.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 13e9f2016732..16315b17aebe 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -103,8 +103,8 @@ pub struct RpcServerArgs { pub ipcdisable: bool, /// Filename for IPC socket/pipe within the datadir - #[arg(long)] - pub ipcpath: Option, + #[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())] + pub ipcpath: String, /// Auth server address to listen on #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] @@ -433,9 +433,8 @@ impl RpcServerArgs { } if self.is_ipc_enabled() { - config = config.with_ipc(self.ipc_server_builder()).with_ipc_endpoint( - self.ipcpath.as_ref().unwrap_or(&constants::DEFAULT_IPC_ENDPOINT.to_string()), - ); + config = + config.with_ipc(self.ipc_server_builder()).with_ipc_endpoint(self.ipcpath.clone()); } config From 94dfeb3adeb48f95e8b3f34f229bba6682b34eda Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 2 Aug 2023 18:36:48 +0200 Subject: [PATCH 328/722] fix: validate headers in full block downloader (#4034) --- crates/consensus/beacon/src/engine/mod.rs | 5 +- crates/consensus/beacon/src/engine/sync.rs | 38 +++++-- crates/interfaces/src/consensus.rs | 22 ++++ crates/interfaces/src/p2p/full_block.rs | 119 ++++++++++++--------- 4 files changed, 122 insertions(+), 62 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index e7531d421740..f1c7c12b05ab 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -26,7 +26,7 @@ use reth_primitives::{ Head, Header, SealedBlock, SealedHeader, H256, U256, }; use reth_provider::{ - BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ProviderError, + BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, }; use reth_prune::Pruner; @@ -208,6 +208,7 @@ where + BlockIdReader + CanonChainTracker + StageCheckpointReader + + ChainSpecProvider + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { @@ -279,6 +280,7 @@ where task_spawner.clone(), run_pipeline_continuously, max_block, + blockchain.chain_spec(), ); let prune = pruner.map(|pruner| EnginePruneController::new(pruner, task_spawner)); let mut this = Self { @@ -1651,6 +1653,7 @@ where + BlockIdReader + CanonChainTracker + StageCheckpointReader + + ChainSpecProvider + Unpin + 'static, { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index ba988f1be87f..24b4d04a3b4c 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -1,6 +1,6 @@ //! Sync management for the engine implementation. -use crate::engine::metrics::EngineSyncMetrics; +use crate::{engine::metrics::EngineSyncMetrics, BeaconConsensus}; use futures::FutureExt; use reth_db::database::Database; use reth_interfaces::p2p::{ @@ -8,12 +8,13 @@ use reth_interfaces::p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; -use reth_primitives::{BlockNumber, SealedBlock, H256}; +use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, H256}; use reth_stages::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap}, + sync::Arc, task::{ready, Context, Poll}, }; use tokio::sync::oneshot; @@ -68,9 +69,13 @@ where pipeline_task_spawner: Box, run_pipeline_continuously: bool, max_block: Option, + chain_spec: Arc, ) -> Self { Self { - full_block_client: FullBlockClient::new(client), + full_block_client: FullBlockClient::new( + client, + Arc::new(BeaconConsensus::new(chain_spec)), + ), pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), pending_pipeline_target: None, @@ -394,7 +399,8 @@ mod tests { }; use reth_interfaces::{p2p::either::EitherDownloader, test_utils::TestFullBlockClient}; use reth_primitives::{ - stage::StageCheckpoint, BlockBody, ChainSpec, ChainSpecBuilder, SealedHeader, MAINNET, + constants::ETHEREUM_BLOCK_GAS_LIMIT, stage::StageCheckpoint, BlockBody, ChainSpec, + ChainSpecBuilder, Header, SealedHeader, MAINNET, }; use reth_provider::{test_utils::TestExecutorFactory, PostState}; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -491,6 +497,7 @@ mod tests { fn build( self, pipeline: Pipeline, + chain_spec: Arc, ) -> EngineSyncController> where DB: Database + 'static, @@ -508,6 +515,7 @@ mod tests { // run_pipeline_continuously: false here until we want to test this false, self.max_block, + chain_spec, ) } } @@ -539,10 +547,11 @@ mod tests { checkpoint: StageCheckpoint::new(5), done: true, })])) - .build(chain_spec); + .build(chain_spec.clone()); - let mut sync_controller = - TestSyncControllerBuilder::new().with_client(client.clone()).build(pipeline); + let mut sync_controller = TestSyncControllerBuilder::new() + .with_client(client.clone()) + .build(pipeline, chain_spec); let tip = client.highest_block().expect("there should be blocks here"); sync_controller.set_pipeline_sync_target(tip.hash); @@ -577,20 +586,27 @@ mod tests { ); let client = TestFullBlockClient::default(); - let mut header = SealedHeader::default(); + let mut header = Header { + base_fee_per_gas: Some(7), + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + ..Default::default() + } + .seal_slow(); let body = BlockBody::default(); for _ in 0..10 { header.parent_hash = header.hash_slow(); header.number += 1; + header.timestamp += 1; header = header.header.seal_slow(); client.insert(header.clone(), body.clone()); } // set up a pipeline - let pipeline = TestPipelineBuilder::new().build(chain_spec); + let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); - let mut sync_controller = - TestSyncControllerBuilder::new().with_client(client.clone()).build(pipeline); + let mut sync_controller = TestSyncControllerBuilder::new() + .with_client(client.clone()) + .build(pipeline, chain_spec); let tip = client.highest_block().expect("there should be blocks here"); diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index 76c2175c9af7..9176bd06d7f2 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -31,6 +31,28 @@ pub trait Consensus: Debug + Send + Sync { parent: &SealedHeader, ) -> Result<(), ConsensusError>; + /// Validates the given headers + /// + /// This ensures that the first header is valid on its own and all subsequent headers are valid + /// on its own and valid against its parent. + /// + /// Note: this expects that the headers are in natural order (ascending block number) + fn validate_header_range(&self, headers: &[SealedHeader]) -> Result<(), ConsensusError> { + if headers.is_empty() { + return Ok(()) + } + let first = headers.first().expect("checked empty"); + self.validate_header(first)?; + let mut parent = first; + for child in headers.iter().skip(1) { + self.validate_header(child)?; + self.validate_header_against_parent(child, parent)?; + parent = child; + } + + Ok(()) + } + /// Validate if the header is correct and follows the consensus specification, including /// computed properties (like total difficulty). /// diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 6cc4e4b61bbb..2d3f0482f581 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -1,5 +1,6 @@ +use super::headers::client::HeadersRequest; use crate::{ - consensus::ConsensusError, + consensus::{Consensus, ConsensusError}, p2p::{ bodies::client::{BodiesClient, SingleBodyRequest}, error::PeerRequestResult, @@ -16,22 +17,28 @@ use std::{ fmt::Debug, future::Future, pin::Pin, + sync::Arc, task::{ready, Context, Poll}, }; use tracing::debug; -use super::headers::client::HeadersRequest; - /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] pub struct FullBlockClient { client: Client, + consensus: Arc, } impl FullBlockClient { /// Creates a new instance of `FullBlockClient`. - pub fn new(client: Client) -> Self { - Self { client } + pub fn new(client: Client, consensus: Arc) -> Self { + Self { client, consensus } + } + + /// Returns a client with Test consensus + #[cfg(feature = "test-utils")] + pub fn test_client(client: Client) -> Self { + Self::new(client, Arc::new(crate::test_utils::TestConsensus::default())) } } @@ -95,6 +102,7 @@ where headers: None, pending_headers: VecDeque::new(), bodies: HashMap::new(), + consensus: Arc::clone(&self.consensus), } } } @@ -186,7 +194,7 @@ where if let Some(header) = maybe_header { if header.hash() != this.hash { debug!(target: "downloaders", expected=?this.hash, received=?header.hash, "Received wrong header"); - // received bad header + // received a different header than requested this.client.report_bad_message(peer) } else { this.header = Some(header); @@ -352,6 +360,8 @@ where { /// The client used to fetch headers and bodies. client: Client, + /// The consensus instance used to validate the blocks. + consensus: Arc, /// The block hash to start fetching from (inclusive). start_hash: H256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -381,6 +391,8 @@ where } /// Inserts a block body, matching it with the `next_header`. + /// + /// Note: this assumes the response matches the next header in the queue. fn insert_body(&mut self, body_response: BodyResponse) { if let Some(header) = self.pending_headers.pop_front() { self.bodies.insert(header, body_response); @@ -388,8 +400,8 @@ where } /// Inserts multiple block bodies. - fn insert_bodies(&mut self, bodies: Vec) { - for body in bodies { + fn insert_bodies(&mut self, bodies: impl IntoIterator) { + for body in bodies.into_iter() { self.insert_body(body); } } @@ -461,6 +473,46 @@ where Some(response) } + fn on_headers_response(&mut self, headers: WithPeerId>) { + let (peer, mut headers_falling) = + headers.map(|h| h.into_iter().map(|h| h.seal_slow()).collect::>()).split(); + + // fill in the response if it's the correct length + if headers_falling.len() == self.count as usize { + // sort headers from highest to lowest block number + headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); + + // check the starting hash + if headers_falling[0].hash() != self.start_hash { + // received a different header than requested + self.client.report_bad_message(peer); + } else { + let headers_rising = headers_falling.iter().rev().cloned().collect::>(); + // ensure the downloaded headers are valid + if let Err(err) = self.consensus.validate_header_range(&headers_rising) { + debug!(target: "downloaders", %err, ?self.start_hash, "Received bad header response"); + self.client.report_bad_message(peer); + return + } + + // get the bodies request so it can be polled later + let hashes = headers_falling.iter().map(|h| h.hash()).collect::>(); + + // populate the pending headers + self.pending_headers = headers_falling.clone().into(); + + // set the actual request if it hasn't been started yet + if !self.has_bodies_request_started() { + // request the bodies for the downloaded headers + self.request.bodies = Some(self.client.get_block_bodies(hashes)); + } + + // set the headers response + self.headers = Some(headers_falling); + } + } + } + /// Returns whether or not a bodies request has been started, returning false if there is no /// pending request. fn has_bodies_request_started(&self) -> bool { @@ -500,39 +552,7 @@ where RangeResponseResult::Header(res) => { match res { Ok(headers) => { - let (peer, mut headers) = headers - .map(|h| { - h.iter().map(|h| h.clone().seal_slow()).collect::>() - }) - .split(); - - // fill in the response if it's the correct length - if headers.len() == this.count as usize { - // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); - - // check the starting hash - if headers[0].hash() != this.start_hash { - // received bad response - this.client.report_bad_message(peer); - } else { - // get the bodies request so it can be polled later - let hashes = - headers.iter().map(|h| h.hash()).collect::>(); - - // populate the pending headers - this.pending_headers = headers.clone().into(); - - // set the actual request if it hasn't been started yet - if !this.has_bodies_request_started() { - this.request.bodies = - Some(this.client.get_block_bodies(hashes)); - } - - // set the headers response - this.headers = Some(headers); - } - } + this.on_headers_response(headers); } Err(err) => { debug!(target: "downloaders", %err, ?this.start_hash, "Header range download failed"); @@ -561,10 +581,9 @@ where // first insert the received bodies this.insert_bodies( new_bodies - .iter() - .map(|resp| WithPeerId::new(peer, resp.clone())) - .map(BodyResponse::PendingValidation) - .collect::>(), + .into_iter() + .map(|resp| WithPeerId::new(peer, resp)) + .map(BodyResponse::PendingValidation), ); if !this.is_bodies_complete() { @@ -723,7 +742,7 @@ mod tests { let header = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); - let client = FullBlockClient::new(client); + let client = FullBlockClient::test_client(client); let received = client.get_full_block(header.hash()).await; assert_eq!(received, SealedBlock::new(header, body)); @@ -735,7 +754,7 @@ mod tests { let header = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); - let client = FullBlockClient::new(client); + let client = FullBlockClient::test_client(client); let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); @@ -754,7 +773,7 @@ mod tests { header = header.header.seal_slow(); client.insert(header.clone(), body.clone()); } - let client = FullBlockClient::new(client); + let client = FullBlockClient::test_client(client); let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); @@ -780,7 +799,7 @@ mod tests { header = header.header.seal_slow(); client.insert(header.clone(), body.clone()); } - let client = FullBlockClient::new(client); + let client = FullBlockClient::test_client(client); let future = client.get_full_block_range(header.hash(), 1); let mut stream = FullBlockRangeStream::from(future); @@ -826,7 +845,7 @@ mod tests { header = header.header.seal_slow(); client.insert(header.clone(), body.clone()); } - let client = FullBlockClient::new(client); + let client = FullBlockClient::test_client(client); let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); From 2e1ef4dfa549b345ac03217eec4091fda57a1e38 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 2 Aug 2023 18:06:17 +0100 Subject: [PATCH 329/722] feat(prune): don't prune changesets if taking an incremental hashing/trie route (#4025) eth tests unrelated, new blob txs --- bin/reth/src/chain/import.rs | 6 ++ bin/reth/src/debug_cmd/execution.rs | 5 ++ bin/reth/src/debug_cmd/merkle.rs | 3 +- bin/reth/src/node/mod.rs | 12 ++-- bin/reth/src/stage/dump/merkle.rs | 28 ++++----- bin/reth/src/stage/run.rs | 23 +++----- crates/stages/benches/criterion.rs | 4 +- crates/stages/src/stages/execution.rs | 59 +++++++++++++++++-- crates/stages/src/stages/hashing_account.rs | 26 ++------ crates/stages/src/stages/hashing_storage.rs | 33 ++--------- crates/stages/src/stages/merkle.rs | 59 +++++-------------- crates/stages/src/stages/mod.rs | 1 + crates/storage/provider/src/post_state/mod.rs | 8 +-- 13 files changed, 122 insertions(+), 145 deletions(-) diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index e08f7164b585..429e5c796194 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -185,6 +185,12 @@ impl ImportCommand { max_blocks: config.stages.execution.max_blocks, max_changes: config.stages.execution.max_changes, }, + config + .stages + .merkle + .clean_threshold + .max(config.stages.account_hashing.clean_threshold) + .max(config.stages.storage_hashing.clean_threshold), config.prune.map(|prune| prune.parts).unwrap_or_default(), )), ) diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 82de9e055f41..4cbe6848c2fb 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -142,6 +142,11 @@ impl Command { .set(ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: None, max_changes: None }, + stage_conf + .merkle + .clean_threshold + .max(stage_conf.account_hashing.clean_threshold) + .max(stage_conf.storage_hashing.clean_threshold), config.prune.map(|prune| prune.parts).unwrap_or_default(), )), ) diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 894d0d3fb209..1de670e3a831 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -14,7 +14,7 @@ use reth_provider::{ProviderFactory, StageCheckpointReader}; use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, + StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, }, ExecInput, PipelineError, Stage, }; @@ -96,6 +96,7 @@ impl Command { let mut execution_stage = ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(1), max_changes: None }, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), ); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 184949c4cf6e..ed3beefba42d 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -755,6 +755,11 @@ impl Command { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, }, + stage_config + .merkle + .clean_threshold + .max(stage_config.account_hashing.clean_threshold) + .max(stage_config.storage_hashing.clean_threshold), prune_config.map(|prune| prune.parts).unwrap_or_default(), ) .with_metrics_tx(metrics_tx), @@ -762,17 +767,12 @@ impl Command { .set(AccountHashingStage::new( stage_config.account_hashing.clean_threshold, stage_config.account_hashing.commit_threshold, - config.prune.map(|prune| prune.parts).unwrap_or_default(), )) .set(StorageHashingStage::new( stage_config.storage_hashing.clean_threshold, stage_config.storage_hashing.commit_threshold, - config.prune.map(|prune| prune.parts).unwrap_or_default(), - )) - .set(MerkleStage::new_execution( - stage_config.merkle.clean_threshold, - config.prune.map(|prune| prune.parts).unwrap_or_default(), )) + .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) .set(TransactionLookupStage::new(stage_config.transaction_lookup.commit_threshold)) .set(IndexAccountHistoryStage::new( stage_config.index_account_history.commit_threshold, diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 4339ef94e71f..69ca457fd2dc 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -7,7 +7,7 @@ use reth_provider::ProviderFactory; use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, + StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, }, Stage, UnwindInput, }; @@ -70,6 +70,7 @@ async fn unwind_and_copy( let mut exec_stage = ExecutionStage::new( reth_revm::Factory::new(db_tool.chain.clone()), ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None }, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), ); @@ -86,22 +87,14 @@ async fn unwind_and_copy( // Bring hashes to TO - AccountHashingStage { - clean_threshold: u64::MAX, - commit_threshold: u64::MAX, - prune_modes: PruneModes::none(), - } - .execute(&provider, execute_input) - .await - .unwrap(); - StorageHashingStage { - clean_threshold: u64::MAX, - commit_threshold: u64::MAX, - prune_modes: PruneModes::none(), - } - .execute(&provider, execute_input) - .await - .unwrap(); + AccountHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } + .execute(&provider, execute_input) + .await + .unwrap(); + StorageHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } + .execute(&provider, execute_input) + .await + .unwrap(); let unwind_inner_tx = provider.into_tx(); @@ -132,7 +125,6 @@ async fn dry_run( clean_threshold: u64::MAX, /* Forces updating the root instead of calculating * from * scratch */ - prune_modes: Default::default(), } .execute( &provider, diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 9953bfc8a0e6..53cb8ec5f484 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -202,28 +202,19 @@ impl Command { max_blocks: Some(batch_size), max_changes: None, }, + config.stages.merkle.clean_threshold, config.prune.map(|prune| prune.parts).unwrap_or_default(), )), None, ) } StageEnum::TxLookup => (Box::new(TransactionLookupStage::new(batch_size)), None), - StageEnum::AccountHashing => ( - Box::new(AccountHashingStage::new( - 1, - batch_size, - config.prune.map(|prune| prune.parts).unwrap_or_default(), - )), - None, - ), - StageEnum::StorageHashing => ( - Box::new(StorageHashingStage::new( - 1, - batch_size, - config.prune.map(|prune| prune.parts).unwrap_or_default(), - )), - None, - ), + StageEnum::AccountHashing => { + (Box::new(AccountHashingStage::new(1, batch_size)), None) + } + StageEnum::StorageHashing => { + (Box::new(StorageHashingStage::new(1, batch_size)), None) + } StageEnum::Merkle => ( Box::new(MerkleStage::default_execution()), Some(Box::new(MerkleStage::default_unwind())), diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index d9b079d29847..8fce2e37035e 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -95,7 +95,7 @@ fn merkle(c: &mut Criterion) { // don't need to run each stage for that many times group.sample_size(10); - let stage = MerkleStage::Both { clean_threshold: u64::MAX, prune_modes: Default::default() }; + let stage = MerkleStage::Both { clean_threshold: u64::MAX }; measure_stage( &mut group, setup::unwind_hashes, @@ -104,7 +104,7 @@ fn merkle(c: &mut Criterion) { "Merkle-incremental".to_string(), ); - let stage = MerkleStage::Both { clean_threshold: 0, prune_modes: Default::default() }; + let stage = MerkleStage::Both { clean_threshold: 0 }; measure_stage( &mut group, setup::unwind_hashes, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index fc8e789a1f05..2d0332a2ade0 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -1,7 +1,8 @@ use crate::{ - ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, - UnwindOutput, + stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, ExecInput, ExecOutput, MetricEvent, + MetricEventsSender, Stage, StageError, UnwindInput, UnwindOutput, }; +use num_traits::Zero; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, @@ -59,6 +60,11 @@ pub struct ExecutionStage { executor_factory: EF, /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, + /// The highest threshold (in number of blocks) for switching between incremental + /// and full calculations across [`super::MerkleStage`], [`super::AccountHashingStage`] and + /// [`super::StorageHashingStage`]. This is required to figure out if can prune or not + /// changesets on subsequent pipeline runs. + external_clean_threshold: u64, /// Pruning configuration. prune_modes: PruneModes, } @@ -68,16 +74,28 @@ impl ExecutionStage { pub fn new( executor_factory: EF, thresholds: ExecutionStageThresholds, + external_clean_threshold: u64, prune_modes: PruneModes, ) -> Self { - Self { metrics_tx: None, executor_factory, thresholds, prune_modes } + Self { + metrics_tx: None, + external_clean_threshold, + executor_factory, + thresholds, + prune_modes, + } } /// Create an execution stage with the provided executor factory. /// /// The commit threshold will be set to 10_000. pub fn new_with_factory(executor_factory: EF) -> Self { - Self::new(executor_factory, ExecutionStageThresholds::default(), PruneModes::default()) + Self::new( + executor_factory, + ExecutionStageThresholds::default(), + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + PruneModes::default(), + ) } /// Set the metric events sender. @@ -98,6 +116,7 @@ impl ExecutionStage { let start_block = input.next_block(); let max_block = input.target(); + let prune_modes = self.adjust_prune_modes(provider, start_block, max_block)?; // Build executor let mut executor = @@ -110,7 +129,7 @@ impl ExecutionStage { // Execute block range let mut state = PostState::default(); - state.add_prune_modes(self.prune_modes); + state.add_prune_modes(prune_modes); for block_number in start_block..=max_block { let td = provider @@ -163,6 +182,35 @@ impl ExecutionStage { done, }) } + + /// Adjusts the prune modes related to changesets. + /// + /// This function verifies whether the [`super::MerkleStage`] or Hashing stages will run from + /// scratch. If at least one stage isn't starting anew, it implies that pruning of + /// changesets cannot occur. This is determined by checking the highest clean threshold + /// (`self.external_clean_threshold`) across the stages. + /// + /// Given that `start_block` changes with each checkpoint, it's necessary to inspect + /// [`tables::AccountsTrie`] to ensure that [`super::MerkleStage`] hasn't + /// been previously executed. + fn adjust_prune_modes( + &self, + provider: &DatabaseProviderRW<'_, &DB>, + start_block: u64, + max_block: u64, + ) -> Result { + let mut prune_modes = self.prune_modes; + + // If we're not executing MerkleStage from scratch (by threshold or first-sync), then erase + // changeset related pruning configurations + if !(max_block - start_block > self.external_clean_threshold || + provider.tx_ref().entries::()?.is_zero()) + { + prune_modes.account_history = None; + prune_modes.storage_history = None; + } + Ok(prune_modes) + } } fn execution_checkpoint( @@ -438,6 +486,7 @@ mod tests { ExecutionStage::new( factory, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), ) } diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 7eed29bd33db..ccb6fb960fd0 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -15,7 +15,6 @@ use reth_primitives::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, StageCheckpoint, StageId, }, - PruneModes, }; use reth_provider::{AccountExtReader, DatabaseProviderRW, HashingWriter}; use std::{ @@ -35,25 +34,18 @@ pub struct AccountHashingStage { pub clean_threshold: u64, /// The maximum number of accounts to process before committing. pub commit_threshold: u64, - /// Prune mode configuration. Required to know if we can actually make an incremental - /// update based on how many changesets exist. - pub prune_modes: PruneModes, } impl AccountHashingStage { /// Create new instance of [AccountHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64, prune_modes: PruneModes) -> Self { - Self { clean_threshold, commit_threshold, prune_modes } + pub fn new(clean_threshold: u64, commit_threshold: u64) -> Self { + Self { clean_threshold, commit_threshold } } } impl Default for AccountHashingStage { fn default() -> Self { - Self { - clean_threshold: 500_000, - commit_threshold: 100_000, - prune_modes: PruneModes::default(), - } + Self { clean_threshold: 500_000, commit_threshold: 100_000 } } } @@ -151,19 +143,12 @@ impl Stage for AccountHashingStage { } let (from_block, to_block) = input.next_block_range().into_inner(); - let has_enough_changesets = self - .prune_modes - .prune_target_block_account_history(to_block)? - .map(|(block_number, _)| block_number) - .unwrap_or_default() < - from_block; // if there are more blocks then threshold it is faster to go over Plain state and hash all // account otherwise take changesets aggregate the sets and apply hashing to // AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as // genesis accounts are not in changeset. - if to_block - from_block > self.clean_threshold || from_block == 1 || !has_enough_changesets - { + if to_block - from_block > self.clean_threshold || from_block == 1 { let tx = provider.tx_ref(); let stage_checkpoint = input .checkpoint @@ -463,7 +448,6 @@ mod tests { pub(crate) tx: TestTransaction, commit_threshold: u64, clean_threshold: u64, - prune_modes: PruneModes, } impl AccountHashingTestRunner { @@ -527,7 +511,6 @@ mod tests { tx: TestTransaction::default(), commit_threshold: 1000, clean_threshold: 1000, - prune_modes: PruneModes::default(), } } } @@ -543,7 +526,6 @@ mod tests { Self::S { commit_threshold: self.commit_threshold, clean_threshold: self.clean_threshold, - prune_modes: self.prune_modes, } } } diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index c6a65b3d80cb..040b6375bd02 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -14,7 +14,7 @@ use reth_primitives::{ CheckpointBlockRange, EntitiesCheckpoint, StageCheckpoint, StageId, StorageHashingCheckpoint, }, - PruneModes, StorageEntry, + StorageEntry, }; use reth_provider::{DatabaseProviderRW, HashingWriter, StorageReader}; use std::{collections::BTreeMap, fmt::Debug}; @@ -29,25 +29,18 @@ pub struct StorageHashingStage { pub clean_threshold: u64, /// The maximum number of slots to process before committing. pub commit_threshold: u64, - /// Prune mode configuration. Required to know if we can actually make an incremental - /// update based on how many changesets exist. - pub prune_modes: PruneModes, } impl StorageHashingStage { /// Create new instance of [StorageHashingStage]. - pub fn new(clean_threshold: u64, commit_threshold: u64, prune_modes: PruneModes) -> Self { - Self { clean_threshold, commit_threshold, prune_modes } + pub fn new(clean_threshold: u64, commit_threshold: u64) -> Self { + Self { clean_threshold, commit_threshold } } } impl Default for StorageHashingStage { fn default() -> Self { - Self { - clean_threshold: 500_000, - commit_threshold: 100_000, - prune_modes: PruneModes::default(), - } + Self { clean_threshold: 500_000, commit_threshold: 100_000 } } } @@ -70,19 +63,12 @@ impl Stage for StorageHashingStage { } let (from_block, to_block) = input.next_block_range().into_inner(); - let has_enough_changesets = self - .prune_modes - .prune_target_block_storage_history(to_block)? - .map(|(block_number, _)| block_number) - .unwrap_or_default() < - from_block; // if there are more blocks then threshold it is faster to go over Plain state and hash all // account otherwise take changesets aggregate the sets and apply hashing to // AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as // genesis accounts are not in changeset, along with their storages. - if to_block - from_block > self.clean_threshold || from_block == 1 || !has_enough_changesets - { + if to_block - from_block > self.clean_threshold || from_block == 1 { let stage_checkpoint = input .checkpoint .and_then(|checkpoint| checkpoint.storage_hashing_stage_checkpoint()); @@ -471,17 +457,11 @@ mod tests { tx: TestTransaction, commit_threshold: u64, clean_threshold: u64, - prune_modes: PruneModes, } impl Default for StorageHashingTestRunner { fn default() -> Self { - Self { - tx: TestTransaction::default(), - commit_threshold: 1000, - clean_threshold: 1000, - prune_modes: PruneModes::default(), - } + Self { tx: TestTransaction::default(), commit_threshold: 1000, clean_threshold: 1000 } } } @@ -496,7 +476,6 @@ mod tests { Self::S { commit_threshold: self.commit_threshold, clean_threshold: self.clean_threshold, - prune_modes: self.prune_modes, } } } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 64d27426c551..d5e510db5c93 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -10,7 +10,7 @@ use reth_primitives::{ hex, stage::{EntitiesCheckpoint, MerkleCheckpoint, StageCheckpoint, StageId}, trie::StoredSubNode, - BlockNumber, PruneModes, SealedHeader, H256, + BlockNumber, SealedHeader, H256, }; use reth_provider::{ DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, @@ -19,6 +19,10 @@ use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; use std::fmt::Debug; use tracing::*; +/// The default threshold (in number of blocks) for switching from incremental trie building +/// of changes to whole rebuild. +pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 50_000; + /// The merkle hashing stage uses input from /// [`AccountHashingStage`][crate::stages::AccountHashingStage] and /// [`StorageHashingStage`][crate::stages::AccountHashingStage] to calculate intermediate hashes @@ -47,9 +51,6 @@ pub enum MerkleStage { /// The threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. clean_threshold: u64, - /// Prune mode configuration. Required to know if we can actually make an incremental root - /// update based on how many changesets exist. - prune_modes: PruneModes, }, /// The unwind portion of the merkle stage. Unwind, @@ -57,13 +58,13 @@ pub enum MerkleStage { /// Able to execute and unwind. Used for tests #[cfg(any(test, feature = "test-utils"))] #[allow(missing_docs)] - Both { clean_threshold: u64, prune_modes: PruneModes }, + Both { clean_threshold: u64 }, } impl MerkleStage { /// Stage default for the [MerkleStage::Execution]. pub fn default_execution() -> Self { - Self::Execution { clean_threshold: 50_000, prune_modes: PruneModes::default() } + Self::Execution { clean_threshold: MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD } } /// Stage default for the [MerkleStage::Unwind]. @@ -72,8 +73,8 @@ impl MerkleStage { } /// Create new instance of [MerkleStage::Execution]. - pub fn new_execution(clean_threshold: u64, prune_modes: PruneModes) -> Self { - Self::Execution { clean_threshold, prune_modes } + pub fn new_execution(clean_threshold: u64) -> Self { + Self::Execution { clean_threshold } } /// Check that the computed state root matches the root in the expected header. @@ -131,26 +132,6 @@ impl MerkleStage { } Ok(provider.save_stage_checkpoint_progress(StageId::MerkleExecute, buf)?) } - - /// When pruning is enabled for account and storage history, we might not have all changesets - /// required for an incremental state root update on a pipeline re-run. - pub fn has_enough_changesets( - &self, - prune_modes: PruneModes, - from_block: BlockNumber, - to_block: BlockNumber, - ) -> Result { - Ok(prune_modes - .prune_target_block_account_history(to_block)? - .map(|(block_number, _)| block_number) - .unwrap_or_default() < - from_block && - prune_modes - .prune_target_block_storage_history(to_block)? - .map(|(block_number, _)| block_number) - .unwrap_or_default() < - from_block) - } } #[async_trait::async_trait] @@ -171,16 +152,14 @@ impl Stage for MerkleStage { provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { - let (threshold, prune_modes) = match self { + let threshold = match self { MerkleStage::Unwind => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))) } - MerkleStage::Execution { clean_threshold, prune_modes } => { - (*clean_threshold, *prune_modes) - } + MerkleStage::Execution { clean_threshold } => *clean_threshold, #[cfg(any(test, feature = "test-utils"))] - MerkleStage::Both { clean_threshold, prune_modes } => (*clean_threshold, *prune_modes), + MerkleStage::Both { clean_threshold } => *clean_threshold, }; let range = input.next_block_range(); @@ -195,10 +174,7 @@ impl Stage for MerkleStage { let mut checkpoint = self.get_execution_checkpoint(provider)?; let (trie_root, entities_checkpoint) = if range.is_empty() { (target_block_root, input.checkpoint().entities_stage_checkpoint().unwrap_or_default()) - } else if to_block - from_block > threshold || - from_block == 1 || - !self.has_enough_changesets(prune_modes, from_block, to_block)? - { + } else if to_block - from_block > threshold || from_block == 1 { // if there are more blocks than threshold it is faster to rebuild the trie let mut entities_checkpoint = if let Some(checkpoint) = checkpoint.as_ref().filter(|c| c.target_block == to_block) @@ -471,16 +447,11 @@ mod tests { struct MerkleTestRunner { tx: TestTransaction, clean_threshold: u64, - prune_modes: PruneModes, } impl Default for MerkleTestRunner { fn default() -> Self { - Self { - tx: TestTransaction::default(), - clean_threshold: 10000, - prune_modes: PruneModes::default(), - } + Self { tx: TestTransaction::default(), clean_threshold: 10000 } } } @@ -492,7 +463,7 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S::Both { clean_threshold: self.clean_threshold, prune_modes: self.prune_modes } + Self::S::Both { clean_threshold: self.clean_threshold } } } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index d1086f95af30..777041fbcac1 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -134,6 +134,7 @@ mod tests { let mut execution_stage = ExecutionStage::new( Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, prune_modes, ); diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 300b7e27d6e4..1aed0e666934 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -528,6 +528,10 @@ impl PostState { std::mem::take(&mut self.storage_changes).inner.into_iter() { for (address, mut storage) in storage_changes.into_iter() { + if self.prune_modes.should_prune_storage_history(block_number, tip) { + continue + } + let storage_id = BlockNumberAddress((block_number, address)); // If the account was created and wiped at the same block, skip all storage changes @@ -561,10 +565,6 @@ impl PostState { } } - if self.prune_modes.should_prune_storage_history(block_number, tip) { - continue - } - for (slot, old_value) in storage.storage { tracing::trace!(target: "provider::post_state", ?storage_id, ?slot, ?old_value, "Storage changed"); storage_changeset_cursor.append_dup( From 248faa4ee8d0a2610f602a46d9fa1d4d0d1fdb82 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 2 Aug 2023 20:34:10 +0200 Subject: [PATCH 330/722] chore: ignore ef blobTx test (#4038) --- testing/ef-tests/src/cases/blockchain_test.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 1bb4b37a06f6..e2f7fc8ac65d 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -182,5 +182,14 @@ pub fn should_skip(path: &Path) -> bool { return true } + if path.file_name() == Some(OsStr::new("ValueOverflow.json")) { + return true + } + + // TODO: re-enable when blobtx are supported + if path.file_name() == Some(OsStr::new("blobtxExample.json")) { + return true + } + false } From 8c70524fc6031dcc268fd771797f35d6229848e7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 2 Aug 2023 20:39:09 +0200 Subject: [PATCH 331/722] chore: dont penalize on dropped connections (#4031) --- crates/net/network/src/transactions.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 55c434593e53..d05c74172724 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -491,7 +491,8 @@ where RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, RequestError::Timeout => ReputationChangeKind::Timeout, RequestError::ChannelClosed | RequestError::ConnectionDropped => { - ReputationChangeKind::Dropped + // peer is already disconnected + return } RequestError::BadResponse => ReputationChangeKind::BadTransactions, }; @@ -561,7 +562,8 @@ where this.on_request_error(req.peer_id, req_err); } Poll::Ready(Err(_)) => { - this.on_request_error(req.peer_id, RequestError::ConnectionDropped) + // request channel closed/dropped + this.on_request_error(req.peer_id, RequestError::ChannelClosed) } } } From ce6e24770e7f3ce6d3fa8356f95d5a4a6cb9a5b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BB=A4=E7=8B=90=E4=B8=80=E5=86=B2?= <43949039+anonymousGiga@users.noreply.github.com> Date: Thu, 3 Aug 2023 17:42:00 +0800 Subject: [PATCH 332/722] Fix: successful execution of 'reth stage --commit' did not write results to the database (#4027) --- bin/reth/src/stage/run.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 53cb8ec5f484..0ec5d7d4fccc 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -265,6 +265,10 @@ impl Command { } } + if self.commit { + provider_rw.commit()?; + } + Ok(()) } } From 704c0987dfbbfb3cd271fe06d95a6b3ebae2da42 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 3 Aug 2023 13:07:37 +0100 Subject: [PATCH 333/722] feat: use `DepositContract` on `ChainSpec` (#4041) Co-authored-by: Matthias Seitz --- bin/reth/src/args/pruning_args.rs | 5 ++- bin/reth/src/init.rs | 2 +- crates/primitives/src/chain/spec.rs | 49 +++++++++++++++++++++++------ crates/primitives/src/hardfork.rs | 4 +-- crates/prune/src/pruner.rs | 2 +- 5 files changed, 47 insertions(+), 15 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index 42f8d6571c44..cb71dce15f0e 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -25,7 +25,10 @@ impl PruningArgs { parts: PruneModes { sender_recovery: Some(PruneMode::Distance(128)), transaction_lookup: None, - receipts: chain_spec.deposit_contract_deployment_block.map(PruneMode::Before), + receipts: chain_spec + .deposit_contract + .as_ref() + .map(|contract| PruneMode::Before(contract.block)), account_history: Some(PruneMode::Distance(128)), storage_history: Some(PruneMode::Distance(128)), }, diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 1e3cae87787f..693940a93a03 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -272,7 +272,7 @@ mod tests { fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, - deposit_contract_deployment_block: None, + deposit_contract: None, }); let db = create_test_rw_db(); diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index f23839d1daea..ca514a628808 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -3,8 +3,8 @@ use crate::{ forkid::ForkFilterKey, header::Head, proofs::genesis_state_root, - BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, SealedHeader, - H256, U256, + Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, + SealedHeader, H160, H256, U256, }; use hex_literal::hex; use once_cell::sync::Lazy; @@ -55,7 +55,11 @@ pub static MAINNET: Lazy> = Lazy::new(|| { (Hardfork::Shanghai, ForkCondition::Timestamp(1681338455)), ]), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 - deposit_contract_deployment_block: Some(11052984), + deposit_contract: Some(DepositContract::new( + H160(hex!("00000000219ab540356cbb839cbe05303d7705fa")), + 11052984, + H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), + )), } .into() }); @@ -91,7 +95,11 @@ pub static GOERLI: Lazy> = Lazy::new(|| { (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)), ]), // https://goerli.etherscan.io/tx/0xa3c07dc59bfdb1bfc2d50920fed2ef2c1c4e0a09fe2325dbc14e07702f965a78 - deposit_contract_deployment_block: Some(4367322), + deposit_contract: Some(DepositContract::new( + H160(hex!("ff50ed3d0ec03ac01d4c79aad74928bff48a7b2b")), + 4367322, + H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), + )), } .into() }); @@ -131,7 +139,11 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { (Hardfork::Shanghai, ForkCondition::Timestamp(1677557088)), ]), // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 - deposit_contract_deployment_block: Some(1273020), + deposit_contract: Some(DepositContract::new( + H160(hex!("7f02c3e3c98b133055b8b348b2ac625669ed295d")), + 1273020, + H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), + )), } .into() }); @@ -169,7 +181,7 @@ pub static DEV: Lazy> = Lazy::new(|| { ), (Hardfork::Shanghai, ForkCondition::Timestamp(0)), ]), - deposit_contract_deployment_block: Some(0), + deposit_contract: None, // TODO: do we even have? } .into() }); @@ -209,9 +221,9 @@ pub struct ChainSpec { /// The active hard forks and their activation conditions pub hardforks: BTreeMap, - /// The block at which the deposit contract for PoS was deployed. + /// The deposit contract deployed for PoS. #[serde(skip, default)] - pub deposit_contract_deployment_block: Option, + pub deposit_contract: Option, } impl ChainSpec { @@ -444,7 +456,7 @@ impl From for ChainSpec { fork_timestamps: ForkTimestamps::from_hardforks(&hardforks), hardforks, paris_block_and_final_difficulty: None, - deposit_contract_deployment_block: None, + deposit_contract: None, } } } @@ -667,7 +679,7 @@ impl ChainSpecBuilder { fork_timestamps: ForkTimestamps::from_hardforks(&self.hardforks), hardforks: self.hardforks, paris_block_and_final_difficulty: None, - deposit_contract_deployment_block: None, + deposit_contract: None, } } } @@ -960,6 +972,23 @@ where } } +/// PoS deposit contract details. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DepositContract { + /// Deposit Contract Address + pub address: Address, + /// Deployment Block + pub block: BlockNumber, + /// `DepositEvent` event signature + pub topic: H256, +} + +impl DepositContract { + fn new(address: Address, block: BlockNumber, topic: H256) -> Self { + DepositContract { address, block, topic } + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs index 724ddf93a39a..f82310d68de9 100644 --- a/crates/primitives/src/hardfork.rs +++ b/crates/primitives/src/hardfork.rs @@ -164,7 +164,7 @@ mod tests { hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]), fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, - deposit_contract_deployment_block: None, + deposit_contract: None, }; assert_eq!(Hardfork::Frontier.fork_id(&spec), None); @@ -179,7 +179,7 @@ mod tests { hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]), fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, - deposit_contract_deployment_block: None, + deposit_contract: None, }; assert_eq!(Hardfork::Shanghai.fork_filter(&spec), None); diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index f34e800110df..5f19431c9e05 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -592,7 +592,7 @@ mod tests { }, }; use reth_primitives::{ - Address, BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, + BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, }; use reth_provider::PruneCheckpointReader; use reth_stages::test_utils::TestTransaction; From 3f9c00ea4042f166620de836c6b28b90b320e054 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 14:36:22 +0200 Subject: [PATCH 334/722] perf: spawn ipc requests (#4039) --- crates/rpc/ipc/src/server/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 7911f037d991..eb376d30da3d 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -279,7 +279,15 @@ impl Service for TowerService { method_sink: self.inner.method_sink.clone(), id_provider: self.inner.id_provider.clone(), }; - Box::pin(ipc::handle_request(request, data).map(Ok)) + + // an ipc connection needs to handle read+write concurrently + // even if the underlying rpc handler spawns the actual work or is does a lot of async any + // additional overhead performed by `handle_request` can result in I/O latencies, for + // example tracing calls are relatively CPU expensive on serde::serialize alone, moving this + // work to a separate task takes the pressure off the connection so all concurrent responses + // are also serialized concurrently and the connection can focus on read+write + let f = tokio::task::spawn(async move { ipc::handle_request(request, data).await }); + Box::pin(async move { f.await.map_err(|err| err.into()) }) } } From e9cb4148964bf555cb486d66e972fd89026c4a4c Mon Sep 17 00:00:00 2001 From: 0x6020c0 <140005734+0x6020c0@users.noreply.github.com> Date: Thu, 3 Aug 2023 14:43:09 +0200 Subject: [PATCH 335/722] feat: add erigons debugTraceCallMany (#3878) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-api/src/debug.rs | 23 +++++- crates/rpc/rpc-types/src/eth/call.rs | 94 +++++++++++++++++++++- crates/rpc/rpc-types/src/eth/mod.rs | 2 +- crates/rpc/rpc/src/debug.rs | 116 ++++++++++++++++++++++++++- 4 files changed, 228 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 0d29042b5efd..6fba56c83d78 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,11 +1,12 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H256}; use reth_rpc_types::{ + state::StateOverride, trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }, - CallRequest, RichBlock, + Bundle, CallRequest, RichBlock, StateContext, }; /// Debug rpc interface. @@ -102,4 +103,24 @@ pub trait DebugApi { block_number: Option, opts: Option, ) -> RpcResult; + + /// The `debug_traceCallMany` method lets you run an `eth_callmany` within the context of the + /// given block execution using the final state of parent block as the base followed by n + /// transactions + /// + /// The first argument is a list of bundles. Each bundle can overwrite the block headers. This + /// will affect all transaction in that bundle. + /// BlockNumber and transaction_index are optinal. Transaction_index + /// specifys the number of tx in the block to replay and -1 means all transactions should be + /// replayed. + /// The trace can be configured similar to `debug_traceTransaction`. + /// State override apply to all bundles. + #[method(name = "traceCallMany")] + async fn debug_trace_call_many( + &self, + bundles: Vec, + state_context: Option, + opts: Option, + state_override: Option, + ) -> RpcResult>; } diff --git a/crates/rpc/rpc-types/src/eth/call.rs b/crates/rpc/rpc-types/src/eth/call.rs index 9aec714c1266..231eb3473c83 100644 --- a/crates/rpc/rpc-types/src/eth/call.rs +++ b/crates/rpc/rpc-types/src/eth/call.rs @@ -1,5 +1,80 @@ -use reth_primitives::{AccessList, Address, Bytes, U256, U64, U8}; -use serde::{Deserialize, Serialize}; +use reth_primitives::{AccessList, Address, BlockId, Bytes, U256, U64, U8}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use crate::BlockOverrides; + +/// Bundle of transactions +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(default, rename_all = "camelCase")] +pub struct Bundle { + /// Transactions + pub transactions: Vec, + /// Block overides + pub block_override: Option, +} + +/// State context for callMany +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(default, rename_all = "camelCase")] +pub struct StateContext { + /// Block Number + pub block_number: Option, + /// Inclusive number of tx to replay in block. -1 means replay all + pub transaction_index: Option, +} + +/// Represents a transaction index where -1 means all transactions +#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)] +pub enum TransactionIndex { + /// -1 means all transactions + #[default] + All, + /// Transaction index + Index(usize), +} + +impl TransactionIndex { + /// Returns true if this is the all variant + pub fn is_all(&self) -> bool { + matches!(self, TransactionIndex::All) + } + + /// Returns the index if this is the index variant + pub fn index(&self) -> Option { + match self { + TransactionIndex::All => None, + TransactionIndex::Index(idx) => Some(*idx), + } + } +} + +impl Serialize for TransactionIndex { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + TransactionIndex::All => serializer.serialize_i8(-1), + TransactionIndex::Index(idx) => idx.serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for TransactionIndex { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + match isize::deserialize(deserializer)? { + -1 => Ok(TransactionIndex::All), + idx if idx < -1 => Err(serde::de::Error::custom(format!( + "Invalid transaction index, expected -1 or positive integer, got {}", + idx + ))), + idx => Ok(TransactionIndex::Index(idx as usize)), + } + } +} /// Call request #[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] @@ -111,6 +186,21 @@ pub struct CallInputError; mod tests { use super::*; + #[test] + fn transaction_index() { + let s = "-1"; + let idx = serde_json::from_str::(s).unwrap(); + assert_eq!(idx, TransactionIndex::All); + + let s = "5"; + let idx = serde_json::from_str::(s).unwrap(); + assert_eq!(idx, TransactionIndex::Index(5)); + + let s = "-2"; + let res = serde_json::from_str::(s); + assert!(res.is_err()); + } + #[test] fn serde_call_request() { let s = r#"{"accessList":[],"data":"0x0902f1ac","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","type":"0x02"}"#; diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index 446e4192f615..528e4cffc769 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -19,7 +19,7 @@ mod work; pub use account::*; pub use block::*; -pub use call::{CallInput, CallInputError, CallRequest}; +pub use call::{Bundle, CallInput, CallInputError, CallRequest, StateContext}; pub use fee::{FeeHistory, TxGasAndReward}; pub use filter::*; pub use index::Index; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 3fd5bdef06bd..cf7a98b74549 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -2,8 +2,8 @@ use crate::{ eth::{ error::{EthApiError, EthResult}, revm_utils::{ - clone_into_empty_db, inspect, inspect_and_return_db, replay_transactions_until, - result_output, EvmOverrides, + clone_into_empty_db, inspect, inspect_and_return_db, prepare_call_env, + replay_transactions_until, result_output, transact, EvmOverrides, }, EthTransactions, TransactionSource, }, @@ -25,11 +25,12 @@ use reth_revm::{ use reth_rlp::{Decodable, Encodable}; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ + state::StateOverride, trace::geth::{ BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, NoopFrame, TraceResult, }, - BlockError, CallRequest, RichBlock, + BlockError, Bundle, CallRequest, RichBlock, StateContext, }; use reth_tasks::TaskSpawner; use revm::{ @@ -333,6 +334,103 @@ where Ok(frame.into()) } + /// The debug_traceCallMany method lets you run an `eth_callMany` within the context of the + /// given block execution using the first n transactions in the given block as base + pub async fn debug_trace_call_many( + &self, + bundles: Vec, + state_context: Option, + opts: Option, + state_override: Option, + ) -> EthResult> { + if bundles.is_empty() { + return Err(EthApiError::InvalidParams(String::from("bundles are empty."))) + } + + let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); + let transaction_index = transaction_index.unwrap_or_default(); + + let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let ((cfg, block_env, _), block) = futures::try_join!( + self.inner.eth_api.evm_env_at(target_block), + self.inner.eth_api.block_by_id(target_block), + )?; + + let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let tracing_options = opts.unwrap_or_default(); + let gas_limit = self.inner.eth_api.call_gas_limit(); + + // we're essentially replaying the transactions in the block here, hence we need the state + // that points to the beginning of the block, which is the state at the parent block + let mut at = block.parent_hash; + let mut replay_block_txs = true; + + // but if all transactions are to be replayed, we can use the state at the block itself + let num_txs = transaction_index.index().unwrap_or(block.body.len()); + if num_txs == block.body.len() { + at = block.hash; + replay_block_txs = false; + } + + let this = self.clone(); + self.inner + .eth_api + .spawn_with_state_at_block(at.into(), move |state| { + let mut results = Vec::with_capacity(bundles.len()); + let mut db = SubState::new(State::new(state)); + + if replay_block_txs { + // only need to replay the transactions in the block if not all transactions are + // to be replayed + let transactions = block.body.into_iter().take(num_txs); + + // Execute all transactions until index + for tx in transactions { + let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; + let tx = tx_env_with_recovered(&tx); + let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; + let (res, _) = transact(&mut db, env)?; + db.commit(res.state); + } + } + + // Trace all bundles + let mut bundles = bundles.into_iter().peekable(); + while let Some(bundle) = bundles.next() { + //let mut result = Vec::with_capacity(bundle.len()); + let Bundle { transactions, block_override } = bundle; + let overrides = + EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + + let mut transactions = transactions.into_iter().peekable(); + while let Some(tx) = transactions.next() { + let env = prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides.clone(), + )?; + + let (trace, state) = this.trace_transaction( + tracing_options.clone(), + env, + target_block, + &mut db, + )?; + + if bundles.peek().is_none() && transactions.peek().is_none() { + db.commit(state); + } + results.push(trace); + } + } + Ok(results) + }) + .await + } + /// Executes the configured transaction with the environment on the given database. /// /// Returns the trace frame and the state that got updated after executing the transaction. @@ -651,6 +749,18 @@ where Ok(DebugApi::debug_trace_call(self, request, block_number, opts.unwrap_or_default()) .await?) } + + async fn debug_trace_call_many( + &self, + bundles: Vec, + state_context: Option, + opts: Option, + state_override: Option, + ) -> RpcResult> { + let _permit = self.acquire_trace_permit().await; + Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts, state_override) + .await?) + } } impl std::fmt::Debug for DebugApi { From 76a6c92cb3badb01cd43bf0ccf73d9aa2a09806c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 15:04:00 +0200 Subject: [PATCH 336/722] perf: improve ipc poll logic (#4037) --- crates/rpc/ipc/src/server/connection.rs | 87 ++++++++++++++++++------- crates/rpc/ipc/src/server/mod.rs | 2 +- 2 files changed, 65 insertions(+), 24 deletions(-) diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index e502a27de7ac..bd4be7c3e7d4 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,7 +1,7 @@ //! A IPC connection. use crate::stream_codec::StreamCodec; -use futures::{ready, stream::FuturesUnordered, Sink, Stream, StreamExt}; +use futures::{ready, stream::FuturesUnordered, FutureExt, Sink, Stream, StreamExt}; use std::{ collections::VecDeque, future::Future, @@ -129,6 +129,7 @@ pub(crate) struct IpcConnDriver { #[pin] pub(crate) conn: IpcConn>, pub(crate) service: S, + /// rpc requests in progress #[pin] pub(crate) pending_calls: FuturesUnordered, pub(crate) items: VecDeque, @@ -145,7 +146,7 @@ impl Future for IpcConnDriver where S: Service> + Send + 'static, S::Error: Into>, - S::Future: Send, + S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = (); @@ -153,20 +154,21 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); - loop { - // process calls - if !this.pending_calls.is_empty() { - while let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) { - let item = match res { - Ok(Some(resp)) => resp, - Ok(None) => continue, - Err(err) => err.into().to_string(), - }; - this.items.push_back(item); - } + // items are also pushed from external + // this will act as a manual yield point to reduce latencies of the polling future that may + // submit items from an additional source (subscription) + let mut budget = 5; + + // ensure we still have enough budget for another iteration + 'outer: loop { + budget -= 1; + if budget == 0 { + // make sure we're woken up again + cx.waker().wake_by_ref(); + return Poll::Pending } - // write to the sink + // write all responses to the sink while this.conn.as_mut().poll_ready(cx).is_ready() { if let Some(item) = this.items.pop_front() { if let Err(err) = this.conn.as_mut().start_send(item) { @@ -178,17 +180,56 @@ where } } - // read from the stream - match ready!(this.conn.as_mut().poll_next(cx)) { - Some(Ok(item)) => { - let call = this.service.call(item); - this.pending_calls.push(call); + 'inner: loop { + let mut drained = false; + // drain all calls that are ready and put them in the output item queue + if !this.pending_calls.is_empty() { + if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) { + let item = match res { + Ok(Some(resp)) => resp, + Ok(None) => continue 'inner, + Err(err) => err.into().to_string(), + }; + this.items.push_back(item); + continue 'outer + } else { + drained = true; + } } - Some(Err(err)) => { - tracing::warn!("IPC request failed: {:?}", err); - return Poll::Ready(()) + + // read from the stream + match this.conn.as_mut().poll_next(cx) { + Poll::Ready(res) => match res { + Some(Ok(item)) => { + let mut call = this.service.call(item); + match call.poll_unpin(cx) { + Poll::Ready(res) => { + let item = match res { + Ok(Some(resp)) => resp, + Ok(None) => continue 'inner, + Err(err) => err.into().to_string(), + }; + this.items.push_back(item); + continue 'outer + } + Poll::Pending => { + this.pending_calls.push(call); + } + } + } + Some(Err(err)) => { + tracing::warn!("IPC request failed: {:?}", err); + return Poll::Ready(()) + } + None => return Poll::Ready(()), + }, + Poll::Pending => { + if drained || this.pending_calls.is_empty() { + // at this point all things are pending + return Poll::Pending + } + } } - None => return Poll::Ready(()), } } } diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index eb376d30da3d..71d66af922a2 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -300,7 +300,7 @@ async fn spawn_connection( ) where S: Service> + Send + 'static, S::Error: Into>, - S::Future: Send, + S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { let task = tokio::task::spawn(async move { From 8d0aa64ab837d904548b11cd3d4f592fc253643d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 16:09:39 +0200 Subject: [PATCH 337/722] docs: rm some tracing todos + docs --- crates/revm/revm-inspectors/src/tracing/js/bindings.rs | 1 - crates/revm/revm-inspectors/src/tracing/js/builtins.rs | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/js/bindings.rs b/crates/revm/revm-inspectors/src/tracing/js/bindings.rs index a71720ef8294..09f8b28fbde1 100644 --- a/crates/revm/revm-inspectors/src/tracing/js/bindings.rs +++ b/crates/revm/revm-inspectors/src/tracing/js/bindings.rs @@ -491,7 +491,6 @@ pub(crate) struct EvmContext { pub(crate) output: Bytes, /// Number, block number pub(crate) time: String, - // TODO more fields pub(crate) block_hash: Option, pub(crate) tx_index: Option, pub(crate) tx_hash: Option, diff --git a/crates/revm/revm-inspectors/src/tracing/js/builtins.rs b/crates/revm/revm-inspectors/src/tracing/js/builtins.rs index bdba6945a64f..88bfc2f9b170 100644 --- a/crates/revm/revm-inspectors/src/tracing/js/builtins.rs +++ b/crates/revm/revm-inspectors/src/tracing/js/builtins.rs @@ -16,6 +16,9 @@ use std::collections::HashSet; pub(crate) const BIG_INT_JS: &str = include_str!("bigint.js"); /// Registers all the builtin functions and global bigint property +/// +/// Note: this does not register the `isPrecompiled` builtin, as this requires the precompile +/// addresses, see [PrecompileList::register_callable]. pub(crate) fn register_builtins(ctx: &mut Context<'_>) -> JsResult<()> { let big_int = ctx.eval(Source::from_bytes(BIG_INT_JS.as_bytes()))?; ctx.register_global_property("bigint", big_int, Attribute::all())?; @@ -25,8 +28,6 @@ pub(crate) fn register_builtins(ctx: &mut Context<'_>) -> JsResult<()> { ctx.register_global_callable("toContract", 2, NativeFunction::from_fn_ptr(to_contract))?; ctx.register_global_callable("toContract2", 3, NativeFunction::from_fn_ptr(to_contract2))?; - // TODO: isPrecompiled slice - Ok(()) } From f917d49fb4bffee89811607743e4c67b61130804 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 3 Aug 2023 15:54:16 +0100 Subject: [PATCH 338/722] fix(engine, pruner): prune poll logic, history indices (#4043) --- .../consensus/beacon/src/engine/forkchoice.rs | 16 ++ crates/consensus/beacon/src/engine/mod.rs | 11 +- crates/prune/src/pruner.rs | 255 +++++++++--------- .../src/providers/database/provider.rs | 29 +- 4 files changed, 161 insertions(+), 150 deletions(-) diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 51c67f7eef33..f2858d941731 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -51,6 +51,18 @@ impl ForkchoiceStateTracker { self.latest_status().map(|s| s.is_valid()).unwrap_or(false) } + /// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Syncing] + #[allow(unused)] + pub(crate) fn is_latest_syncing(&self) -> bool { + self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) + } + + /// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Invalid] + #[allow(unused)] + pub(crate) fn is_latest_invalid(&self) -> bool { + self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) + } + /// Returns the last valid head hash. #[allow(unused)] pub(crate) fn last_valid_head(&self) -> Option { @@ -98,6 +110,10 @@ impl ForkchoiceStatus { matches!(self, ForkchoiceStatus::Valid) } + pub(crate) fn is_invalid(&self) -> bool { + matches!(self, ForkchoiceStatus::Invalid) + } + pub(crate) fn is_syncing(&self) -> bool { matches!(self, ForkchoiceStatus::Syncing) } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f1c7c12b05ab..cd6481779253 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1715,12 +1715,13 @@ where // Poll prune controller if all conditions are met: // 1. Pipeline is idle - // 2. Pruning is running and we need to prioritize checking its events OR no engine and - // sync messages are pending and we may start pruning - // 3. Latest FCU status is VALID + // 2. Either of two: + // 1. Pruning is running and we need to prioritize checking its events + // 2. Both engine and sync messages are pending AND latest FCU status is not INVALID, + // so we may start pruning if this.sync.is_pipeline_idle() && - (this.is_prune_active() || is_pending) && - this.forkchoice_state_tracker.is_latest_valid() + (this.is_prune_active() || + is_pending && !this.forkchoice_state_tracker.is_latest_invalid()) { if let Some(ref mut prune) = this.prune { match prune.poll(cx, this.blockchain.canonical_tip().number) { diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 5f19431c9e05..8c1ab001af83 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -6,12 +6,13 @@ use reth_db::{ abstraction::cursor::{DbCursorRO, DbCursorRW}, database::Database, models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress, ShardedKey}, + table::Table, tables, transaction::DbTxMut, BlockNumberList, }; use reth_primitives::{ - Address, BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, + BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, }; use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, @@ -81,6 +82,11 @@ impl Pruner { /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { + trace!( + target: "pruner", + %tip_block_number, + "Pruner started" + ); let start = Instant::now(); let provider = self.provider_factory.provider_rw()?; @@ -143,8 +149,15 @@ impl Pruner { provider.commit()?; self.last_pruned_block_number = Some(tip_block_number); - self.metrics.pruner.duration_seconds.record(start.elapsed()); + let elapsed = start.elapsed(); + self.metrics.pruner.duration_seconds.record(elapsed); + trace!( + target: "pruner", + %tip_block_number, + ?elapsed, + "Pruner finished" + ); Ok(()) } @@ -228,11 +241,11 @@ impl Pruner { provider.prune_table_with_iterator_in_batches::( range, self.batch_sizes.receipts, - |entries| { - processed += entries; + |rows| { + processed += rows; trace!( target: "pruner", - %entries, + %rows, progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), "Pruned receipts" ); @@ -293,11 +306,11 @@ impl Pruner { // Pre-sort hashes to prune them in order hashes.sort_unstable(); - let entries = provider.prune_table_with_iterator::(hashes)?; - processed += entries; + let rows = provider.prune_table_with_iterator::(hashes)?; + processed += rows; trace!( target: "pruner", - %entries, + %rows, progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), "Pruned transaction lookup" ); @@ -336,11 +349,11 @@ impl Pruner { provider.prune_table_with_range_in_batches::( range, self.batch_sizes.transaction_senders, - |entries| { - processed += entries; + |rows, _| { + processed += rows; trace!( target: "pruner", - %entries, + %rows, progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), "Pruned transaction senders" ); @@ -370,92 +383,34 @@ impl Pruner { let range = from_block..=to_block; let total = range.clone().count(); - let mut processed = 0; provider.prune_table_with_range_in_batches::( range, self.batch_sizes.account_history, - |entries| { - processed += entries; + |keys, rows| { trace!( target: "pruner", - %entries, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + %keys, + %rows, + progress = format!("{:.1}%", 100.0 * keys as f64 / total as f64), "Pruned account history (changesets)" ); }, )?; - let mut cursor = provider.tx_ref().cursor_write::()?; - // Prune `AccountHistory` table: - // 1. If the shard has `highest_block_number` less than or equal to the target block number - // for pruning, delete the shard completely. - // 2. If the shard has `highest_block_number` greater than the target block number for - // pruning, filter block numbers inside the shard which are less than the target - // block number for pruning. - while let Some(result) = cursor.next()? { - let (key, blocks): (ShardedKey
, BlockNumberList) = result; - - if key.highest_block_number <= to_block { - // If shard consists only of block numbers less than the target one, delete shard - // completely. - cursor.delete_current()?; - if key.highest_block_number == to_block { - // Shard contains only block numbers up to the target one, so we can skip to the - // next address. It is guaranteed that further shards for this address will not - // contain the target block number, as it's in this shard. - cursor.seek_exact(ShardedKey::last(key.key))?; - } - } else { - // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this address will not contain - // the target block number, as it's in this shard. - let blocks = blocks - .iter(0) - .skip_while(|block| *block <= to_block as usize) - .collect::>(); - if blocks.is_empty() { - // If there are no more blocks in this shard, we need to remove it, as empty - // shards are not allowed. - if key.highest_block_number == u64::MAX { - // If current shard is the last shard for this address, replace it with the - // previous shard. - if let Some((prev_key, prev_value)) = cursor.prev()? { - if prev_key.key == key.key { - cursor.delete_current()?; - // Upsert will replace the last shard for this address with the - // previous value - cursor.upsert(key.clone(), prev_value)?; - } - } - } else { - // If current shard is not the last shard for this address, just delete it. - cursor.delete_current()?; - } - } else { - cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(blocks))?; - } - - // Jump to the next address - cursor.seek_exact(ShardedKey::last(key.key))?; - } - - processed += 1; - if processed % self.batch_sizes.account_history == 0 { + self.prune_history_indices::( + provider, + to_block, + |a, b| a.key == b.key, + |key| ShardedKey::last(key.key), + self.batch_sizes.account_history, + |rows| { trace!( target: "pruner", - entries = self.batch_sizes.account_history, + rows, "Pruned account history (indices)" ); - } - } - - if processed % self.batch_sizes.account_history != 0 { - trace!( - target: "pruner", - entries = processed % self.batch_sizes.account_history, - "Pruned account history (indices)" - ); - } + }, + )?; provider.save_prune_checkpoint( PrunePart::AccountHistory, @@ -478,104 +433,136 @@ impl Pruner { .map(|checkpoint| checkpoint.block_number + 1) .unwrap_or_default(); let block_range = from_block..=to_block; - let total = block_range.clone().count(); let range = BlockNumberAddress::range(block_range); - let mut processed = 0; provider.prune_table_with_range_in_batches::( range, self.batch_sizes.storage_history, - |entries| { - processed += entries; + |keys, rows| { trace!( target: "pruner", - %entries, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + %keys, + %rows, "Pruned storage history (changesets)" ); }, )?; - let mut cursor = provider.tx_ref().cursor_write::()?; - // Prune `StorageHistory` table: + self.prune_history_indices::( + provider, + to_block, + |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, + |key| StorageShardedKey::last(key.address, key.sharded_key.key), + self.batch_sizes.storage_history, + |rows| { + trace!( + target: "pruner", + rows, + "Pruned storage history (indices)" + ); + }, + )?; + + provider.save_prune_checkpoint( + PrunePart::StorageHistory, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } + + /// Prune history indices up to the provided block, inclusive. + fn prune_history_indices( + &self, + provider: &DatabaseProviderRW<'_, DB>, + to_block: BlockNumber, + key_matches: impl Fn(&T::Key, &T::Key) -> bool, + last_key: impl Fn(&T::Key) -> T::Key, + batch_size: usize, + batch_callback: impl Fn(usize), + ) -> PrunerResult + where + T: Table, + T::Key: AsRef>, + { + let mut processed = 0; + let mut cursor = provider.tx_ref().cursor_write::()?; + // Prune history table: // 1. If the shard has `highest_block_number` less than or equal to the target block number // for pruning, delete the shard completely. // 2. If the shard has `highest_block_number` greater than the target block number for // pruning, filter block numbers inside the shard which are less than the target // block number for pruning. while let Some(result) = cursor.next()? { - let (key, blocks): (StorageShardedKey, BlockNumberList) = result; + let (key, blocks): (T::Key, BlockNumberList) = result; - if key.sharded_key.highest_block_number <= to_block { + if key.as_ref().highest_block_number <= to_block { // If shard consists only of block numbers less than the target one, delete shard // completely. cursor.delete_current()?; - if key.sharded_key.highest_block_number == to_block { + if key.as_ref().highest_block_number == to_block { // Shard contains only block numbers up to the target one, so we can skip to the - // next storage slot for this address. It is guaranteed that further shards for - // this address and storage slot will not contain the target block number, as - // it's in this shard. - cursor.seek_exact(StorageShardedKey::last(key.address, key.sharded_key.key))?; + // next sharded key. It is guaranteed that further shards for this sharded key + // will not contain the target block number, as it's in this shard. + cursor.seek_exact(last_key(&key))?; } } else { // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this address and storage slot - // will not contain the target block number, as it's in this shard. - let blocks = blocks + // filter it. It is guaranteed that further shards for this sharded key will not + // contain the target block number, as it's in this shard. + let new_blocks = blocks .iter(0) .skip_while(|block| *block <= to_block as usize) .collect::>(); - if blocks.is_empty() { - // If there are no more blocks in this shard, we need to remove it, as empty - // shards are not allowed. - if key.sharded_key.highest_block_number == u64::MAX { - // If current shard is the last shard for this address and storage slot, - // replace it with the previous shard. - if let Some((prev_key, prev_value)) = cursor.prev()? { - if prev_key.address == key.address && - prev_key.sharded_key.key == key.sharded_key.key + + if blocks.len() != new_blocks.len() { + // If there were blocks less than or equal to the target one + // (so the shard has changed), update the shard. + if new_blocks.is_empty() { + // If there are no more blocks in this shard, we need to remove it, as empty + // shards are not allowed. + if key.as_ref().highest_block_number == u64::MAX { + // If current shard is the last shard for this sharded key, replace it + // with the previous shard. + if let Some(prev_value) = cursor + .prev()? + .filter(|(prev_key, _)| key_matches(prev_key, &key)) + .map(|(_, prev_value)| prev_value) { cursor.delete_current()?; - // Upsert will replace the last shard for this address and storage - // slot with the previous value + // Upsert will replace the last shard for this sharded key with the + // previous value cursor.upsert(key.clone(), prev_value)?; + } else { + // If there's no previous shard for this sharded key, + // just delete last shard completely. + cursor.delete_current()?; } + } else { + // If current shard is not the last shard for this sharded key, + // just delete it. + cursor.delete_current()?; } } else { - // If current shard is not the last shard for this address, just delete it. - cursor.delete_current()?; + cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(new_blocks))?; } - } else { - cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(blocks))?; } // Jump to the next address - cursor.seek_exact(StorageShardedKey::last(key.address, key.sharded_key.key))?; + cursor.seek_exact(last_key(&key))?; } processed += 1; - if processed % self.batch_sizes.storage_history == 0 { - trace!( - target: "pruner", - entries = self.batch_sizes.storage_history, - "Pruned storage history (indices)" - ); + + if processed % batch_size == 0 { + batch_callback(batch_size); } } - if processed % self.batch_sizes.storage_history != 0 { - trace!( - target: "pruner", - entries = processed % self.batch_sizes.storage_history, - "Pruned storage history (indices)" - ); + if processed % batch_size != 0 { + batch_callback(processed % batch_size); } - provider.save_prune_checkpoint( - PrunePart::StorageHistory, - PruneCheckpoint { block_number: to_block, prune_mode }, - )?; - Ok(()) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 19bab6e50791..7a82cac2a371 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -660,33 +660,40 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { } /// Prune the table for the specified key range, calling `chunk_callback` after every - /// `batch_size` pruned rows. + /// `batch_size` pruned rows with number of total unique keys and total rows pruned. For dupsort + /// tables, these numbers will be different as one key can correspond to multiple rows. /// /// Returns number of rows pruned. pub fn prune_table_with_range_in_batches( &self, keys: impl RangeBounds, batch_size: usize, - mut batch_callback: impl FnMut(usize), - ) -> std::result::Result { + mut batch_callback: impl FnMut(usize, usize), + ) -> std::result::Result<(), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; let mut walker = cursor.walk_range(keys)?; - let mut deleted = 0; + let mut deleted_keys = 0; + let mut deleted_rows = 0; + let mut previous_key = None; - while walker.next().transpose()?.is_some() { + while let Some((key, _)) = walker.next().transpose()? { walker.delete_current()?; - deleted += 1; + deleted_rows += 1; + if previous_key.as_ref().map(|previous_key| previous_key != &key).unwrap_or(true) { + deleted_keys += 1; + previous_key = Some(key); + } - if deleted % batch_size == 0 { - batch_callback(batch_size); + if deleted_rows % batch_size == 0 { + batch_callback(deleted_keys, deleted_rows); } } - if deleted % batch_size != 0 { - batch_callback(deleted % batch_size); + if deleted_rows % batch_size != 0 { + batch_callback(deleted_keys, deleted_rows); } - Ok(deleted) + Ok(()) } /// Load shard and remove it. If list is empty, last shard was full or From 77b7d778195702e2a55bfa425e0833ab3a89bbbb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 3 Aug 2023 17:00:56 +0100 Subject: [PATCH 339/722] fix(engine): poll prune first (#4048) --- crates/consensus/beacon/src/engine/mod.rs | 44 +++++++++++++++-------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index cd6481779253..f5025e049e44 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1635,6 +1635,21 @@ where fn is_prune_active(&self) -> bool { !self.is_prune_idle() } + + /// Polls the prune controller, if it exists, and processes the event [`EnginePruneEvent`] + /// emitted by it. + /// + /// Returns [`Option::Some`] if prune controller emitted an event which resulted in the error + /// (see [`Self::on_prune_event`] for error handling) + fn poll_prune( + &mut self, + cx: &mut Context<'_>, + ) -> Option> { + match self.prune.as_mut()?.poll(cx, self.blockchain.canonical_tip().number) { + Poll::Ready(prune_event) => self.on_prune_event(prune_event), + Poll::Pending => None, + } + } } /// On initialization, the consensus engine will poll the message receiver and return @@ -1665,6 +1680,14 @@ where // Process all incoming messages from the CL, these can affect the state of the // SyncController, hence they are polled first, and they're also time sensitive. loop { + // Poll prune controller first if it's active, as we will not be able to process any + // engine messages until it's finished. + if this.is_prune_active() { + if let Some(res) = this.poll_prune(cx) { + return Poll::Ready(res) + } + } + let mut engine_messages_pending = false; let mut sync_pending = false; @@ -1715,23 +1738,14 @@ where // Poll prune controller if all conditions are met: // 1. Pipeline is idle - // 2. Either of two: - // 1. Pruning is running and we need to prioritize checking its events - // 2. Both engine and sync messages are pending AND latest FCU status is not INVALID, - // so we may start pruning + // 2. No engine and sync messages are pending + // 3. Latest FCU status is not INVALID if this.sync.is_pipeline_idle() && - (this.is_prune_active() || - is_pending && !this.forkchoice_state_tracker.is_latest_invalid()) + is_pending && + !this.forkchoice_state_tracker.is_latest_invalid() { - if let Some(ref mut prune) = this.prune { - match prune.poll(cx, this.blockchain.canonical_tip().number) { - Poll::Ready(prune_event) => { - if let Some(res) = this.on_prune_event(prune_event) { - return Poll::Ready(res) - } - } - Poll::Pending => {} - } + if let Some(res) = this.poll_prune(cx) { + return Poll::Ready(res) } } From 0d33585426f52e7a098d0f0892efc618d1a99822 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 18:30:14 +0200 Subject: [PATCH 340/722] chore: move call op match to fn (#4047) --- .../src/tracing/builder/parity.rs | 15 +++----- .../revm/revm-inspectors/src/tracing/types.rs | 35 +++++++++++-------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index fb18366424fa..f1eb1b6060d6 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -7,7 +7,6 @@ use reth_primitives::{Address, U64}; use reth_rpc_types::{trace::parity::*, TransactionInfo}; use revm::{ db::DatabaseRef, - interpreter::opcode, primitives::{AccountInfo, ExecutionResult, ResultAndState, KECCAK_EMPTY}, }; use std::collections::{HashSet, VecDeque}; @@ -301,16 +300,10 @@ impl ParityTraceBuilder { Vec::with_capacity(current.trace.steps.len()); for step in ¤t.trace.steps { - let maybe_sub = match step.op.u8() { - opcode::CALL | - opcode::CALLCODE | - opcode::DELEGATECALL | - opcode::STATICCALL | - opcode::CREATE | - opcode::CREATE2 => { - sub_stack.pop_front().expect("there should be a sub trace") - } - _ => None, + let maybe_sub = if step.is_calllike_op() { + sub_stack.pop_front().expect("there should be a sub trace") + } else { + None }; instructions.push(Self::make_instruction(step, maybe_sub)); diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index b7d864814095..0aee8430533b 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -260,21 +260,13 @@ impl CallTraceNode { let mut item = CallTraceStepStackItem { trace_node: self, step, call_child_id: None }; // If the opcode is a call, put the child trace on the stack - match step.op.u8() { - opcode::CREATE | - opcode::CREATE2 | - opcode::DELEGATECALL | - opcode::CALL | - opcode::STATICCALL | - opcode::CALLCODE => { - // The opcode of this step is a call but it's possible that this step resulted - // in a revert or out of gas error in which case there's no actual child call executed and recorded: - if let Some(call_id) = self.children.get(child_id).copied() { - item.call_child_id = Some(call_id); - child_id += 1; - } + if step.is_calllike_op() { + // The opcode of this step is a call but it's possible that this step resulted + // in a revert or out of gas error in which case there's no actual child call executed and recorded: + if let Some(call_id) = self.children.get(child_id).copied() { + item.call_child_id = Some(call_id); + child_id += 1; } - _ => {} } stack.push(item); } @@ -607,6 +599,21 @@ impl CallTraceStep { log } + /// Returns true if the step is a call operation, any of + /// CALL, CALLCODE, DELEGATECALL, STATICCALL, CREATE, CREATE2 + #[inline] + pub(crate) fn is_calllike_op(&self) -> bool { + matches!( + self.op.u8(), + opcode::CALL | + opcode::DELEGATECALL | + opcode::STATICCALL | + opcode::CREATE | + opcode::CALLCODE | + opcode::CREATE2 + ) + } + // Returns true if the status code is an error or revert, See [InstructionResult::Revert] pub(crate) fn is_error(&self) -> bool { self.status as u8 >= InstructionResult::Revert as u8 From 6ceaad6af9cab4998b5615ffb8881e2bb6dc33c4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 20:30:23 +0200 Subject: [PATCH 341/722] feat: populate gas cost for vm instructions (#4046) --- .../src/tracing/builder/parity.rs | 31 ++++++++++++------- .../revm/revm-inspectors/src/tracing/mod.rs | 12 ++++++- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index f1eb1b6060d6..cf2a50dec205 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -7,7 +7,8 @@ use reth_primitives::{Address, U64}; use reth_rpc_types::{trace::parity::*, TransactionInfo}; use revm::{ db::DatabaseRef, - primitives::{AccountInfo, ExecutionResult, ResultAndState, KECCAK_EMPTY}, + interpreter::opcode::spec_opcode_gas, + primitives::{AccountInfo, ExecutionResult, ResultAndState, SpecId, KECCAK_EMPTY}, }; use std::collections::{HashSet, VecDeque}; @@ -18,6 +19,8 @@ use std::collections::{HashSet, VecDeque}; pub struct ParityTraceBuilder { /// Recorded trace nodes nodes: Vec, + /// The spec id of the EVM. + spec_id: Option, /// How the traces were recorded _config: TracingInspectorConfig, @@ -25,8 +28,12 @@ pub struct ParityTraceBuilder { impl ParityTraceBuilder { /// Returns a new instance of the builder - pub(crate) fn new(nodes: Vec, _config: TracingInspectorConfig) -> Self { - Self { nodes, _config } + pub(crate) fn new( + nodes: Vec, + spec_id: Option, + _config: TracingInspectorConfig, + ) -> Self { + Self { nodes, spec_id, _config } } /// Returns a list of all addresses that appeared as callers. @@ -306,7 +313,7 @@ impl ParityTraceBuilder { None }; - instructions.push(Self::make_instruction(step, maybe_sub)); + instructions.push(self.make_instruction(step, maybe_sub)); } match current.parent { @@ -331,7 +338,7 @@ impl ParityTraceBuilder { /// Creates a VM instruction from a [CallTraceStep] and a [VmTrace] for the subcall if there is /// one - fn make_instruction(step: &CallTraceStep, maybe_sub: Option) -> VmInstruction { + fn make_instruction(&self, step: &CallTraceStep, maybe_sub: Option) -> VmInstruction { let maybe_storage = step.storage_change.map(|storage_change| StorageDelta { key: storage_change.key, val: storage_change.value, @@ -351,12 +358,14 @@ impl ParityTraceBuilder { store: maybe_storage, }); - VmInstruction { - pc: step.pc, - cost: 0, // TODO: use op gas cost - ex: maybe_execution, - sub: maybe_sub, - } + let cost = self + .spec_id + .and_then(|spec_id| { + spec_opcode_gas(spec_id).get(step.op.u8() as usize).map(|op| op.get_gas()) + }) + .unwrap_or_default(); + + VmInstruction { pc: step.pc, cost: cost as u64, ex: maybe_execution, sub: maybe_sub } } } diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index de8a3b3b24a9..be6af4b53e6c 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -10,6 +10,7 @@ use revm::{ opcode, return_ok, CallInputs, CallScheme, CreateInputs, Gas, InstructionResult, Interpreter, OpCode, }, + primitives::SpecId, Database, EVMData, Inspector, JournalEntry, }; use types::{CallTrace, CallTraceStep}; @@ -59,6 +60,10 @@ pub struct TracingInspector { last_call_return_data: Option, /// The gas inspector used to track remaining gas. gas_inspector: GasInspector, + /// The spec id of the EVM. + /// + /// This is filled during execution. + spec_id: Option, } // === impl TracingInspector === @@ -73,12 +78,13 @@ impl TracingInspector { step_stack: vec![], last_call_return_data: None, gas_inspector: Default::default(), + spec_id: None, } } /// Consumes the Inspector and returns a [ParityTraceBuilder]. pub fn into_parity_builder(self) -> ParityTraceBuilder { - ParityTraceBuilder::new(self.traces.arena, self.config) + ParityTraceBuilder::new(self.traces.arena, self.spec_id, self.config) } /// Consumes the Inspector and returns a [GethTraceBuilder]. @@ -170,6 +176,10 @@ impl TracingInspector { // this is the root call which should get the original gas limit of the transaction, // because initialization costs are already subtracted from gas_limit gas_limit = data.env.tx.gas_limit; + + // we set the spec id here because we only need to do this once and this condition is + // hit exactly once + self.spec_id = Some(data.env.cfg.spec_id); } self.trace_stack.push(self.traces.push_trace( From 3f63a0887a2f52b2fc7541a3e4903ace94c2da91 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Aug 2023 22:31:59 +0200 Subject: [PATCH 342/722] fix: only propagate txs that are allowed to be propagated (#4050) --- crates/net/network/src/transactions.rs | 3 +- crates/transaction-pool/src/lib.rs | 12 ++- crates/transaction-pool/src/noop.rs | 37 +++++--- crates/transaction-pool/src/pool/mod.rs | 86 ++++++++++++------- crates/transaction-pool/src/test_utils/mod.rs | 12 ++- crates/transaction-pool/src/traits.rs | 41 ++++++++- crates/transaction-pool/src/validate/mod.rs | 2 +- crates/transaction-pool/tests/it/listeners.rs | 31 ++++++- 8 files changed, 169 insertions(+), 55 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index d05c74172724..7cdf51adbfa9 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -130,7 +130,8 @@ impl TransactionsManager { let network_events = network.event_listener(); let (command_tx, command_rx) = mpsc::unbounded_channel(); - // install a listener for new transactions + // install a listener for new pending transactions that are allowed to be propagated over + // the network let pending = pool.pending_transactions_listener(); Self { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8c4ccc502db2..63e1936a76f7 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -165,8 +165,9 @@ pub use crate::{ }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, - NewTransactionEvent, PoolSize, PoolTransaction, PooledTransaction, PropagateKind, - PropagatedTransactions, TransactionOrigin, TransactionPool, TransactionPoolExt, + NewTransactionEvent, PendingTransactionListenerKind, PoolSize, PoolTransaction, + PooledTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, + TransactionPool, TransactionPoolExt, }, validate::{ EthTransactionValidator, TransactionValidationOutcome, TransactionValidator, @@ -343,8 +344,11 @@ where self.pool.add_all_transactions_event_listener() } - fn pending_transactions_listener(&self) -> Receiver { - self.pool.add_pending_listener() + fn pending_transactions_listener_for( + &self, + kind: PendingTransactionListenerKind, + ) -> Receiver { + self.pool.add_pending_listener(kind) } fn new_transactions_listener(&self) -> Receiver> { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index d935e62c1b2a..37005019da4c 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,10 +4,11 @@ //! to be generic over it. use crate::{ - error::PoolError, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, - NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransaction, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + error::PoolError, traits::PendingTransactionListenerKind, AllPoolTransactions, + AllTransactionsEvents, BestTransactions, BlockInfo, NewTransactionEvent, PoolResult, PoolSize, + PoolTransaction, PooledTransaction, PropagatedTransactions, TransactionEvents, + TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, + ValidPoolTransaction, }; use reth_primitives::{Address, TxHash}; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; @@ -77,7 +78,10 @@ impl TransactionPool for NoopTransactionPool { AllTransactionsEvents { events: mpsc::channel(1).1 } } - fn pending_transactions_listener(&self) -> Receiver { + fn pending_transactions_listener_for( + &self, + _kind: PendingTransactionListenerKind, + ) -> Receiver { mpsc::channel(1).1 } @@ -166,29 +170,40 @@ impl TransactionPool for NoopTransactionPool { /// A [`TransactionValidator`] that does nothing. #[derive(Debug, Clone)] #[non_exhaustive] -pub struct NoopTransactionValidator(PhantomData); +pub struct MockTransactionValidator { + propagate_local: bool, + _marker: PhantomData, +} #[async_trait::async_trait] -impl TransactionValidator for NoopTransactionValidator { +impl TransactionValidator for MockTransactionValidator { type Transaction = T; async fn validate_transaction( &self, - _origin: TransactionOrigin, + origin: TransactionOrigin, transaction: Self::Transaction, ) -> TransactionValidationOutcome { TransactionValidationOutcome::Valid { balance: Default::default(), state_nonce: 0, transaction, - propagate: true, + propagate: if origin.is_local() { self.propagate_local } else { true }, } } } -impl Default for NoopTransactionValidator { +impl MockTransactionValidator { + /// Creates a new [`MockTransactionValidator`] that does not allow local transactions to be + /// propagated. + pub fn no_propagate_local() -> Self { + Self { propagate_local: false, _marker: Default::default() } + } +} + +impl Default for MockTransactionValidator { fn default() -> Self { - NoopTransactionValidator(PhantomData) + MockTransactionValidator { propagate_local: true, _marker: Default::default() } } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index d15596baa2f9..a07565b7cae4 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -96,7 +96,7 @@ mod events; pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; -use crate::pool::txpool::UpdateOutcome; +use crate::{pool::txpool::UpdateOutcome, traits::PendingTransactionListenerKind}; pub use listener::{AllTransactionsEvents, TransactionEvents}; mod best; @@ -119,8 +119,8 @@ pub struct PoolInner { config: PoolConfig, /// Manages listeners for transaction state change events. event_listener: RwLock>, - /// Listeners for new ready transactions. - pending_transaction_listener: Mutex>>, + /// Listeners for new pending transactions. + pending_transaction_listener: Mutex>, /// Listeners for new transactions added to the pool. transaction_listener: Mutex>>>, } @@ -196,15 +196,19 @@ where } /// Adds a new transaction listener to the pool that gets notified about every new _pending_ - /// transaction. - pub fn add_pending_listener(&self) -> mpsc::Receiver { + /// transaction inserted into the pool + pub fn add_pending_listener( + &self, + kind: PendingTransactionListenerKind, + ) -> mpsc::Receiver { const TX_LISTENER_BUFFER_SIZE: usize = 2048; - let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); - self.pending_transaction_listener.lock().push(tx); + let (sender, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); + let listener = PendingTransactionListener { sender, kind }; + self.pending_transaction_listener.lock().push(listener); rx } - /// Adds a new transaction listener to the pool that gets notified about every new transaction + /// Adds a new transaction listener to the pool that gets notified about every new transaction. pub fn add_new_transaction_listener( &self, ) -> mpsc::Receiver> { @@ -318,8 +322,8 @@ where let hash = *added.hash(); // Notify about new pending transactions - if let Some(pending_hash) = added.as_pending() { - self.on_new_pending_transaction(pending_hash); + if added.is_pending() { + self.on_new_pending_transaction(&added); } // Notify tx event listeners @@ -387,20 +391,31 @@ where } /// Notify all listeners about a new pending transaction. - fn on_new_pending_transaction(&self, ready: &TxHash) { + fn on_new_pending_transaction(&self, pending: &AddedTransaction) { + let tx_hash = *pending.hash(); + let propagate_allowed = pending.is_propagate_allowed(); + let mut transaction_listeners = self.pending_transaction_listener.lock(); - transaction_listeners.retain_mut(|listener| match listener.try_send(*ready) { - Ok(()) => true, - Err(err) => { - if matches!(err, mpsc::error::TrySendError::Full(_)) { - debug!( - target: "txpool", - "[{:?}] failed to send pending tx; channel full", - ready, - ); - true - } else { - false + transaction_listeners.retain_mut(|listener| { + if listener.kind.is_propagate_only() && !propagate_allowed { + // only emit this hash to listeners that are only allowed to receive propagate only + // transactions, such as network + return !listener.sender.is_closed() + } + + match listener.sender.try_send(tx_hash) { + Ok(()) => true, + Err(err) => { + if matches!(err, mpsc::error::TrySendError::Full(_)) { + debug!( + target: "txpool", + "[{:?}] failed to send pending tx; channel full", + tx_hash, + ); + true + } else { + false + } } } }); @@ -568,6 +583,14 @@ impl fmt::Debug for PoolInner, + /// Whether to include transactions that should not be propagated over the network. + kind: PendingTransactionListenerKind, +} + /// Tracks an added transaction and all graph changes caused by adding it. #[derive(Debug, Clone)] pub struct AddedPendingTransaction { @@ -599,13 +622,9 @@ pub enum AddedTransaction { } impl AddedTransaction { - /// Returns the hash of the transaction if it's pending - pub(crate) fn as_pending(&self) -> Option<&TxHash> { - if let AddedTransaction::Pending(tx) = self { - Some(tx.transaction.hash()) - } else { - None - } + /// Returns whether the transaction is pending + pub(crate) fn is_pending(&self) -> bool { + matches!(self, AddedTransaction::Pending(_)) } /// Returns the hash of the transaction @@ -615,6 +634,13 @@ impl AddedTransaction { AddedTransaction::Parked { transaction, .. } => transaction.hash(), } } + /// Returns if the transaction should be propagated. + pub(crate) fn is_propagate_allowed(&self) -> bool { + match self { + AddedTransaction::Pending(transaction) => transaction.transaction.propagate, + AddedTransaction::Parked { transaction, .. } => transaction.propagate, + } + } /// Converts this type into the event type for listeners pub(crate) fn into_new_transaction_event(self) -> NewTransactionEvent { diff --git a/crates/transaction-pool/src/test_utils/mod.rs b/crates/transaction-pool/src/test_utils/mod.rs index 1a77eb763c4a..e93aabdb6cef 100644 --- a/crates/transaction-pool/src/test_utils/mod.rs +++ b/crates/transaction-pool/src/test_utils/mod.rs @@ -5,7 +5,7 @@ mod mock; mod pool; use crate::{ - noop::NoopTransactionValidator, Pool, PoolTransaction, TransactionOrigin, + noop::MockTransactionValidator, Pool, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, }; use async_trait::async_trait; @@ -13,9 +13,15 @@ pub use mock::*; use std::{marker::PhantomData, sync::Arc}; /// A [Pool] used for testing -pub type TestPool = Pool, MockOrdering>; +pub type TestPool = Pool, MockOrdering>; /// Returns a new [Pool] used for testing purposes pub fn testing_pool() -> TestPool { - Pool::new(NoopTransactionValidator::default(), MockOrdering::default(), Default::default()) + testing_pool_with_validator(MockTransactionValidator::default()) +} +/// Returns a new [Pool] used for testing purposes +pub fn testing_pool_with_validator( + validator: MockTransactionValidator, +) -> TestPool { + Pool::new(validator, MockOrdering::default(), Default::default()) } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2947cf38fbca..fe5dcddc1535 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -105,10 +105,23 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns a new transaction change event stream for _all_ transactions in the pool. fn all_transactions_event_listener(&self) -> AllTransactionsEvents; - /// Returns a new Stream that yields transactions hashes for new ready transactions. + /// Returns a new Stream that yields transactions hashes for new __pending__ transactions + /// inserted into the pool that are allowed to be propagated. /// - /// Consumer: RPC - fn pending_transactions_listener(&self) -> Receiver; + /// Note: This is intended for networking and will __only__ yield transactions that are allowed + /// to be propagated over the network. + /// + /// Consumer: RPC/P2P + fn pending_transactions_listener(&self) -> Receiver { + self.pending_transactions_listener_for(PendingTransactionListenerKind::PropagateOnly) + } + + /// Returns a new Stream that yields transactions hashes for new __pending__ transactions + /// inserted into the pool depending on the given [PendingTransactionListenerKind] argument. + fn pending_transactions_listener_for( + &self, + kind: PendingTransactionListenerKind, + ) -> Receiver; /// Returns a new stream that yields new valid transactions added to the pool. fn new_transactions_listener(&self) -> Receiver>; @@ -273,6 +286,28 @@ pub trait TransactionPoolExt: TransactionPool { fn update_accounts(&self, accounts: Vec); } +/// Determines what kind of new pending transactions should be emitted by a stream of pending +/// transactions. +/// +/// This gives control whether to include transactions that are allowed to be propagated. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum PendingTransactionListenerKind { + /// Any new pending transactions + All, + /// Only transactions that are allowed to be propagated. + /// + /// See also [ValidPoolTransaction] + PropagateOnly, +} + +impl PendingTransactionListenerKind { + /// Returns true if we're only interested in transactions that are allowed to be propagated. + #[inline] + pub fn is_propagate_only(&self) -> bool { + matches!(self, Self::PropagateOnly) + } +} + /// A Helper type that bundles all transactions in the pool. #[derive(Debug, Clone)] pub struct AllPoolTransactions { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index aabe58845ccf..3c758bd749f2 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -117,7 +117,7 @@ pub struct ValidPoolTransaction { pub transaction: T, /// The identifier for this transaction. pub transaction_id: TransactionId, - /// Whether to propagate the transaction. + /// Whether it is allowed to propagate the transaction. pub propagate: bool, /// Timestamp when this was added to the pool. pub timestamp: Instant, diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index 1c68d89ee045..6503dedc2bf3 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -1,8 +1,11 @@ use assert_matches::assert_matches; use reth_transaction_pool::{ - test_utils::{testing_pool, MockTransactionFactory}, - FullTransactionEvent, TransactionEvent, TransactionOrigin, TransactionPool, + noop::MockTransactionValidator, + test_utils::{testing_pool, testing_pool_with_validator, MockTransactionFactory}, + FullTransactionEvent, PendingTransactionListenerKind, TransactionEvent, TransactionOrigin, + TransactionPool, }; +use std::{future::poll_fn, task::Poll}; use tokio_stream::StreamExt; #[tokio::test(flavor = "multi_thread")] @@ -37,3 +40,27 @@ async fn txpool_listener_all() { Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() ); } + +#[tokio::test(flavor = "multi_thread")] +async fn txpool_listener_propagate_only() { + let txpool = testing_pool_with_validator(MockTransactionValidator::no_propagate_local()); + let mut mock_tx_factory = MockTransactionFactory::default(); + let transaction = mock_tx_factory.create_eip1559(); + let expected = *transaction.hash(); + let mut listener_network = txpool.pending_transactions_listener(); + let mut listener_all = + txpool.pending_transactions_listener_for(PendingTransactionListenerKind::All); + let result = + txpool.add_transaction(TransactionOrigin::Local, transaction.transaction.clone()).await; + assert!(result.is_ok()); + + let inserted = listener_all.recv().await.unwrap(); + assert_eq!(inserted, expected); + + poll_fn(|cx| { + // no propagation + assert!(listener_network.poll_recv(cx).is_pending()); + Poll::Ready(()) + }) + .await; +} From 82a2a6f41616076ba82ef383fcfddd175f2e5772 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Aug 2023 14:39:07 +0200 Subject: [PATCH 343/722] feat: extend RethCliExt with payload builder (#4022) --- bin/reth/src/args/payload_builder_args.rs | 35 ++++-- bin/reth/src/args/rpc_server_args.rs | 18 ++- bin/reth/src/cli/config.rs | 48 ++++++++ bin/reth/src/cli/ext.rs | 105 ++++++++++-------- bin/reth/src/cli/mod.rs | 5 +- bin/reth/src/node/mod.rs | 29 +++-- .../src/main.rs | 7 +- 7 files changed, 158 insertions(+), 89 deletions(-) create mode 100644 bin/reth/src/cli/config.rs diff --git a/bin/reth/src/args/payload_builder_args.rs b/bin/reth/src/args/payload_builder_args.rs index f63e59257bbc..2f3837db7457 100644 --- a/bin/reth/src/args/payload_builder_args.rs +++ b/bin/reth/src/args/payload_builder_args.rs @@ -1,11 +1,13 @@ -use crate::{args::utils::parse_duration_from_secs, version::default_extradata}; +use crate::{ + args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig, + version::default_extradata, +}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; -use reth_primitives::{bytes::BytesMut, constants::MAXIMUM_EXTRA_DATA_SIZE}; -use reth_rlp::Encodable; -use std::{ffi::OsStr, time::Duration}; +use reth_primitives::constants::MAXIMUM_EXTRA_DATA_SIZE; +use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder #[derive(Debug, Args, PartialEq, Default)] @@ -36,12 +38,25 @@ pub struct PayloadBuilderArgs { pub max_payload_tasks: usize, } -impl PayloadBuilderArgs { - /// Returns the rlp-encoded extradata bytes. - pub fn extradata_bytes(&self) -> reth_primitives::bytes::Bytes { - let mut extradata = BytesMut::new(); - self.extradata.as_bytes().encode(&mut extradata); - extradata.freeze() +impl PayloadBuilderConfig for PayloadBuilderArgs { + fn extradata(&self) -> Cow<'_, str> { + self.extradata.as_str().into() + } + + fn interval(&self) -> Duration { + self.interval + } + + fn deadline(&self) -> Duration { + self.deadline + } + + fn max_gas_limit(&self) -> u64 { + self.max_gas_limit + } + + fn max_payload_tasks(&self) -> usize { + self.max_payload_tasks } } diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 16315b17aebe..842d6b97e635 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -2,7 +2,7 @@ use crate::{ args::GasPriceOracleArgs, - cli::ext::{NoopArgsExt, RethRpcConfig, RethRpcServerArgsExt}, + cli::{config::RethRpcConfig, ext::RethNodeCommandExt}, }; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -57,7 +57,7 @@ pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Args)] #[command(next_help_heading = "RPC")] -pub struct RpcServerArgs { +pub struct RpcServerArgs { /// Enable the HTTP-RPC server #[arg(long, default_value_if("dev", "true", "true"))] pub http: bool, @@ -163,13 +163,9 @@ pub struct RpcServerArgs { /// Maximum number of env cache entries. #[arg(long, default_value_t = DEFAULT_ENV_CACHE_MAX_LEN)] pub env_cache_len: u32, - - /// Additional arguments for rpc. - #[clap(flatten)] - pub ext: Ext, } -impl RpcServerArgs { +impl RpcServerArgs { /// Returns the max request size in bytes. pub fn rpc_max_request_size_bytes(&self) -> u32 { self.rpc_max_request_size * 1024 * 1024 @@ -227,7 +223,7 @@ impl RpcServerArgs { /// for the auth server that handles the `engine_` API that's accessed by the consensus /// layer. #[allow(clippy::too_many_arguments)] - pub async fn start_servers( + pub async fn start_servers( &self, provider: Provider, pool: Pool, @@ -236,6 +232,7 @@ impl RpcServerArgs { events: Events, engine_api: Engine, jwt_secret: JwtSecret, + ext: &Ext, ) -> eyre::Result<(RpcServerHandle, AuthServerHandle)> where Provider: BlockReaderIdExt @@ -252,6 +249,7 @@ impl RpcServerArgs { Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, Engine: EngineApiServer, + Ext: RethNodeCommandExt, { let auth_config = self.auth_server_config(jwt_secret)?; @@ -267,7 +265,7 @@ impl RpcServerArgs { .build_with_auth_server(module_config, engine_api); // apply configured customization - self.ext.extend_rpc_modules(self, &mut registry, &mut rpc_modules)?; + ext.extend_rpc_modules(self, &mut registry, &mut rpc_modules)?; let server_config = self.rpc_server_config(); let launch_rpc = rpc_modules.start_server(server_config).map_ok(|handle| { @@ -448,7 +446,7 @@ impl RpcServerArgs { } } -impl RethRpcConfig for RpcServerArgs { +impl RethRpcConfig for RpcServerArgs { fn is_ipc_enabled(&self) -> bool { // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. !self.ipcdisable diff --git a/bin/reth/src/cli/config.rs b/bin/reth/src/cli/config.rs new file mode 100644 index 000000000000..a698b3a5d472 --- /dev/null +++ b/bin/reth/src/cli/config.rs @@ -0,0 +1,48 @@ +//! Config traits for various node components. + +use reth_revm::primitives::bytes::BytesMut; +use reth_rlp::Encodable; +use reth_rpc_builder::EthConfig; +use std::{borrow::Cow, time::Duration}; + +/// A trait that provides configured RPC server. +/// +/// This provides all basic config values for the RPC server and is implemented by the +/// [RpcServerArgs](crate::args::RpcServerArgs) type. +pub trait RethRpcConfig { + /// Returns whether ipc is enabled. + fn is_ipc_enabled(&self) -> bool; + + /// The configured ethereum RPC settings. + fn eth_config(&self) -> EthConfig; + + // TODO extract more functions from RpcServerArgs +} + +/// A trait that provides payload builder settings. +/// +/// This provides all basic payload builder settings and is implemented by the +/// [PayloadBuilderArgs](crate::args::PayloadBuilderArgs) type. +pub trait PayloadBuilderConfig { + /// Block extra data set by the payload builder. + fn extradata(&self) -> Cow<'_, str>; + + /// Returns the rlp-encoded extradata bytes. + fn extradata_rlp_bytes(&self) -> reth_primitives::bytes::Bytes { + let mut extradata = BytesMut::new(); + self.extradata().as_bytes().encode(&mut extradata); + extradata.freeze() + } + + /// The interval at which the job should build a new payload after the last. + fn interval(&self) -> Duration; + + /// The deadline for when the payload builder job should resolve. + fn deadline(&self) -> Duration; + + /// Target gas ceiling for built blocks. + fn max_gas_limit(&self) -> u64; + + /// Maximum number of tasks to spawn for building a payload. + fn max_payload_tasks(&self) -> usize; +} diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index 11db2ca347d7..bd9f6eb81018 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -1,73 +1,36 @@ //! Support for integrating customizations into the CLI. +use crate::cli::config::{PayloadBuilderConfig, RethRpcConfig}; use clap::Args; +use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_network_api::{NetworkInfo, Peers}; +use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_primitives::ChainSpec; use reth_provider::{ BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, StateProviderFactory, }; -use reth_rpc_builder::{EthConfig, RethModuleRegistry, TransportRpcModules}; +use reth_rpc_builder::{RethModuleRegistry, TransportRpcModules}; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; -use std::fmt; +use std::{fmt, sync::Arc}; /// A trait that allows for extending parts of the CLI with additional functionality. pub trait RethCliExt { - /// Extends the rpc arguments for the node - type RpcExt: RethRpcServerArgsExt; + /// Provides additional configuration for the node command. + type Node: RethNodeCommandExt; } impl RethCliExt for () { - type RpcExt = NoopArgsExt; + type Node = DefaultRethNodeCommandConfig; } -/// An [Args] extension that does nothing. -#[derive(Debug, Clone, Copy, Default, Args)] -pub struct NoopArgsExt; - -/// A trait that provides configured RPC server. -/// -/// This provides all basic config values for the RPC server and is implemented by the -/// [RpcServerArgs](crate::args::RpcServerArgs) type. -pub trait RethRpcConfig { - /// Returns whether ipc is enabled. - fn is_ipc_enabled(&self) -> bool; - - /// The configured ethereum RPC settings. - fn eth_config(&self) -> EthConfig; - - // TODO extract more functions from RpcServerArgs -} - -/// A trait that allows further customization of the RPC server via CLI. -pub trait RethRpcServerArgsExt: fmt::Debug + clap::Args { +/// A trait that allows for extending parts of the CLI with additional functionality. +pub trait RethNodeCommandExt: fmt::Debug + clap::Args { /// Allows for registering additional RPC modules for the transports. /// /// This is expected to call the merge functions of [TransportRpcModules], for example /// [TransportRpcModules::merge_configured] - fn extend_rpc_modules( - &self, - config: &Conf, - registry: &mut RethModuleRegistry, - modules: &mut TransportRpcModules<()>, - ) -> eyre::Result<()> - where - Conf: RethRpcConfig, - Provider: BlockReaderIdExt - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static; -} - -impl RethRpcServerArgsExt for NoopArgsExt { fn extend_rpc_modules( &self, _config: &Conf, @@ -91,4 +54,50 @@ impl RethRpcServerArgsExt for NoopArgsExt { { Ok(()) } + + /// Configures the [PayloadBuilderService] for the node, spawns it and returns the + /// [PayloadBuilderHandle]. + /// + /// By default this spawns a [BasicPayloadJobGenerator] with the default configuration + /// [BasicPayloadJobGeneratorConfig]. + fn spawn_payload_builder_service( + &self, + conf: &Conf, + provider: Provider, + pool: Pool, + executor: Tasks, + chain_spec: Arc, + ) -> eyre::Result + where + Conf: PayloadBuilderConfig, + Provider: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Pool: TransactionPool + Unpin + 'static, + Tasks: TaskSpawner + Clone + Unpin + 'static, + { + let payload_generator = BasicPayloadJobGenerator::new( + provider, + pool, + executor.clone(), + BasicPayloadJobGeneratorConfig::default() + .interval(conf.interval()) + .deadline(conf.deadline()) + .max_payload_tasks(conf.max_payload_tasks()) + .extradata(conf.extradata_rlp_bytes()) + .max_gas_limit(conf.max_gas_limit()), + chain_spec, + ); + let (payload_service, payload_builder) = PayloadBuilderService::new(payload_generator); + + executor.spawn_critical("payload builder service", Box::pin(payload_service)); + + Ok(payload_builder) + } + + // TODO move network related functions here } + +/// The default configuration for the reth node command [Command](crate::node::Command). +#[derive(Debug, Clone, Copy, Default, Args)] +pub struct DefaultRethNodeCommandConfig; + +impl RethNodeCommandExt for DefaultRethNodeCommandConfig {} diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 19ffa3fb2153..3939ad145790 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -3,7 +3,7 @@ use crate::{ args::utils::genesis_value_parser, chain, cli::ext::RethCliExt, - config, db, debug_cmd, + db, debug_cmd, dirs::{LogsDir, PlatformPath}, node, p2p, runner::CliRunner, @@ -19,6 +19,7 @@ use reth_tracing::{ }; use std::sync::Arc; +pub mod config; pub mod ext; /// The main reth cli interface. @@ -127,7 +128,7 @@ pub enum Commands { TestVectors(test_vectors::Command), /// Write config to stdout #[command(name = "config")] - Config(config::Command), + Config(crate::config::Command), /// Various debug routines #[command(name = "debug")] Debug(debug_cmd::Command), diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index ed3beefba42d..6117d39cf44f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -8,7 +8,7 @@ use crate::{ DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, - cli::ext::RethCliExt, + cli::ext::{RethCliExt, RethNodeCommandExt}, dirs::{DataDirPath, MaybePlatformPath}, init::init_genesis, node::cl_events::ConsensusLayerHealthEvents, @@ -22,7 +22,7 @@ use eyre::Context; use fdlimit::raise_fd_limit; use futures::{future::Either, pin_mut, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::{AutoSealBuilder, AutoSealConsensus, MiningMode}; -use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; + use reth_beacon_consensus::{BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN}; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -44,7 +44,7 @@ use reth_interfaces::{ }; use reth_network::{error::NetworkError, NetworkConfig, NetworkHandle, NetworkManager}; use reth_network_api::NetworkInfo; -use reth_payload_builder::PayloadBuilderService; + use reth_primitives::{ stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, SealedHeader, H256, @@ -127,7 +127,7 @@ pub struct Command { network: NetworkArgs, #[clap(flatten)] - rpc: RpcServerArgs, + rpc: RpcServerArgs, #[clap(flatten)] txpool: TxPoolArgs, @@ -146,6 +146,10 @@ pub struct Command { #[clap(flatten)] pruning: PruningArgs, + + /// Additional cli arguments + #[clap(flatten)] + pub ext: Ext::Node, } impl Command { @@ -276,22 +280,14 @@ impl Command { let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); - let payload_generator = BasicPayloadJobGenerator::new( + debug!(target: "reth::cli", "Spawning payload builder service"); + let payload_builder = self.ext.spawn_payload_builder_service( + &self.builder, blockchain_db.clone(), transaction_pool.clone(), ctx.task_executor.clone(), - BasicPayloadJobGeneratorConfig::default() - .interval(self.builder.interval) - .deadline(self.builder.deadline) - .max_payload_tasks(self.builder.max_payload_tasks) - .extradata(self.builder.extradata_bytes()) - .max_gas_limit(self.builder.max_gas_limit), Arc::clone(&self.chain), - ); - let (payload_service, payload_builder) = PayloadBuilderService::new(payload_generator); - - debug!(target: "reth::cli", "Spawning payload builder service"); - ctx.task_executor.spawn_critical("payload builder service", payload_service); + )?; let max_block = if let Some(block) = self.debug.max_block { Some(block) @@ -451,6 +447,7 @@ impl Command { blockchain_tree, engine_api, jwt_secret, + &self.ext, ) .await?; diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/additional-rpc-namespace-in-cli/src/main.rs index 46df9a6bb2be..ea15042164e4 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/additional-rpc-namespace-in-cli/src/main.rs @@ -15,7 +15,8 @@ use clap::Parser; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth::{ cli::{ - ext::{RethCliExt, RethRpcConfig, RethRpcServerArgsExt}, + config::RethRpcConfig, + ext::{RethCliExt, RethNodeCommandExt}, Cli, }, network::{NetworkInfo, Peers}, @@ -37,7 +38,7 @@ struct MyRethCliExt; impl RethCliExt for MyRethCliExt { /// This tells the reth CLI to install the `txpool` rpc namespace via `RethCliTxpoolExt` - type RpcExt = RethCliTxpoolExt; + type Node = RethCliTxpoolExt; } /// Our custom cli args extension that adds one flag to reth default CLI. @@ -48,7 +49,7 @@ struct RethCliTxpoolExt { pub enable_ext: bool, } -impl RethRpcServerArgsExt for RethCliTxpoolExt { +impl RethNodeCommandExt for RethCliTxpoolExt { // This is the entrypoint for the CLI to extend the RPC server with custom rpc namespaces. fn extend_rpc_modules( &self, From 689b9d635823022470b30e4112b722a48fce144d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 4 Aug 2023 14:26:23 +0100 Subject: [PATCH 344/722] fix(pruner): tx number range with genesis (#4061) --- crates/prune/src/pruner.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 8c1ab001af83..913aede6780f 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -196,13 +196,16 @@ impl Pruner { prune_part: PrunePart, to_block: BlockNumber, ) -> reth_interfaces::Result>> { - let checkpoint = provider.get_prune_checkpoint(prune_part)?.unwrap_or(PruneCheckpoint { - block_number: 0, // No checkpoint, fresh pruning - prune_mode: PruneMode::Full, // Doesn't matter in this case, can be anything - }); - // Get first transaction of the next block after the highest pruned one + let from_block_number = provider + .get_prune_checkpoint(prune_part)? + // Checkpoint exists, prune from the next block after the highest pruned one + .map(|checkpoint| checkpoint.block_number + 1) + // No checkpoint exists, prune from genesis + .unwrap_or(0); + + // Get first transaction let from_tx_num = - provider.block_body_indices(checkpoint.block_number + 1)?.map(|body| body.first_tx_num); + provider.block_body_indices(from_block_number)?.map(|body| body.first_tx_num); // If no block body index is found, the DB is either corrupted or we've already pruned up to // the latest block, so there's no thing to prune now. let Some(from_tx_num) = from_tx_num else { return Ok(None) }; From ff1ef294ccdd334e0b1a9184575beb4c643593c1 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Fri, 4 Aug 2023 19:27:14 +0530 Subject: [PATCH 345/722] (feat):add private variant in tx origin (#4059) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/noop.rs | 6 +++++- crates/transaction-pool/src/traits.rs | 5 +++++ crates/transaction-pool/src/validate/eth.rs | 7 +++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 37005019da4c..920a98b0d682 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -188,7 +188,11 @@ impl TransactionValidator for MockTransactionValidator { balance: Default::default(), state_nonce: 0, transaction, - propagate: if origin.is_local() { self.propagate_local } else { true }, + propagate: match origin { + TransactionOrigin::External => true, + TransactionOrigin::Local => self.propagate_local, + TransactionOrigin::Private => false, + }, } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fe5dcddc1535..9bcda6ec40f9 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -404,6 +404,11 @@ pub enum TransactionOrigin { /// This is usually considered an "untrusted" source, for example received from another in the /// network. External, + /// Transaction is originated locally and is intended to remain private. + /// + /// This type of transaction should not be propagated to the network. It's meant for + /// private usage within the local node only. + Private, } // === impl TransactionOrigin === diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index bfcf04028ad0..168546a71ae0 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -461,8 +461,11 @@ where state_nonce: account.nonce, transaction, // by this point assume all external transactions should be propagated - propagate: matches!(origin, TransactionOrigin::External) || - self.propagate_local_transactions, + propagate: match origin { + TransactionOrigin::External => true, + TransactionOrigin::Local => self.propagate_local_transactions, + TransactionOrigin::Private => false, + }, } } } From b673b6c15856896c8c12c0f0a39f59fbce343381 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 4 Aug 2023 16:39:15 +0100 Subject: [PATCH 346/722] feat(engine): set `eth_syncing = true` if pruner is active (#4063) --- crates/consensus/beacon/src/engine/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f5025e049e44..93167e8a9817 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1599,6 +1599,10 @@ where EnginePruneEvent::Started(tip_block_number) => { trace!(target: "consensus::engine", %tip_block_number, "Pruner started"); self.metrics.pruner_runs.increment(1); + // Engine can't process any FCU/payload messages from CL while we're pruning, as + // pruner needs an exclusive write access to the database. To prevent CL from + // sending us unneeded updates, we need to respond `true` on `eth_syncing` request. + self.sync_state_updater.update_sync_state(SyncState::Syncing); } EnginePruneEvent::TaskDropped => { error!(target: "consensus::engine", "Failed to receive spawned pruner"); @@ -1606,6 +1610,7 @@ where } EnginePruneEvent::Finished { result } => { trace!(target: "consensus::engine", ?result, "Pruner finished"); + self.sync_state_updater.update_sync_state(SyncState::Idle); match result { Ok(_) => { // Update the state and hashes of the blockchain tree if possible. From 64188a398714277be7ef157407413cd02650e11e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 4 Aug 2023 16:40:37 +0100 Subject: [PATCH 347/722] chore(engine): downgrade new payload buffering log to debug (#4068) --- crates/consensus/beacon/src/engine/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 93167e8a9817..faab247c422d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1062,7 +1062,7 @@ where self.try_insert_new_payload(block) } else { if self.is_prune_active() { - warn!(target: "consensus::engine", "Pruning is in progress, buffering new payload."); + debug!(target: "consensus::engine", "Pruning is in progress, buffering new payload."); } self.try_buffer_payload(block) }; From 443383b3070bcf9a49aeed2640376b8b7f9707a8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Aug 2023 20:14:40 +0200 Subject: [PATCH 348/722] chore: make txpool cargo test compile (#4058) --- Cargo.toml | 4 ++++ bin/reth/Cargo.toml | 2 +- crates/interfaces/Cargo.toml | 4 ++-- crates/net/eth-wire/Cargo.toml | 8 ++++---- crates/primitives/Cargo.toml | 8 ++++---- crates/storage/codecs/Cargo.toml | 8 ++++---- crates/storage/db/Cargo.toml | 8 ++++---- crates/transaction-pool/Cargo.toml | 6 ++++-- crates/trie/Cargo.toml | 2 +- 9 files changed, 28 insertions(+), 22 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5b1333c2fb37..602c36e327f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -143,3 +143,7 @@ jsonrpsee-types = { version = "0.19" } ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } + +### misc-testing +proptest = "1.0" +arbitrary = "1.1" \ No newline at end of file diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 704955498688..6f636cbb5143 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -63,7 +63,7 @@ metrics-util = "0.14.0" metrics-process = "1.0.9" # test vectors generation -proptest = "1.0" +proptest.workspace = true # tui comfy-table = "7.0" diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index c0525a778412..9705f00bbff1 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -30,7 +30,7 @@ auto_impl = "1.0" thiserror.workspace = true tracing.workspace = true rand.workspace = true -arbitrary = { version = "1.1.7", features = ["derive"], optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } secp256k1 = { workspace = true, default-features = false, features = [ "alloc", "recovery", @@ -44,7 +44,7 @@ clap = { version = "4", features = ["derive"], optional = true } reth-db = { path = "../storage/db", features = ["test-utils"] } tokio = { workspace = true, features = ["full"] } tokio-stream = { workspace = true, features = ["sync"] } -arbitrary = { version = "1.1.7", features = ["derive"] } +arbitrary = { workspace = true, features = ["derive"] } hex-literal = "0.3" secp256k1 = { workspace = true, features = [ "alloc", diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 19a78317c8aa..d3042c94feee 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -40,8 +40,8 @@ smol_str = "0.2" async-trait.workspace = true # arbitrary utils -arbitrary = { version = "1.1.7", features = ["derive"], optional = true } -proptest = { version = "1.0", optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } proptest-derive = { version = "0.3", optional = true } [dev-dependencies] @@ -56,8 +56,8 @@ hex = "0.4" rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -arbitrary = { version = "1.1.7", features = ["derive"] } -proptest = { version = "1.0" } +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-derive = "0.3" [features] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index d181a462f1f3..0931d4fe4376 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -65,8 +65,8 @@ plain_hasher = "0.2" hash-db = "0.15" # arbitrary utils -arbitrary = { version = "1.1.7", features = ["derive"], optional = true } -proptest = { version = "1.0", optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } proptest-derive = { version = "0.3", optional = true } strum = { workspace = true, features = ["derive"] } @@ -76,8 +76,8 @@ hex-literal = "0.3" test-fuzz = "4" rand.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } -arbitrary = { version = "1.1.7", features = ["derive"] } -proptest = { version = "1.0" } +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-derive = "0.3" assert_matches = "1.5.0" toml = "0.7.4" diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 3b6719ae0ad8..458ef3063659 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -21,8 +21,8 @@ codecs-derive = { path = "./derive", default-features = false } revm-primitives = { workspace = true, features = ["serde"] } # arbitrary utils -arbitrary = { version = "1.1.7", features = ["derive"], optional = true } -proptest = { version = "1.0", optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } proptest-derive = { version = "0.3", optional = true } [dev-dependencies] @@ -32,6 +32,6 @@ serde = "1.0" modular-bitfield = "0.11.2" test-fuzz = "4" -arbitrary = { version = "1.1.7", features = ["derive"] } -proptest = { version = "1.0" } +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-derive = "0.3" diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 9e2d405b1973..e980a8e71e0f 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -41,8 +41,8 @@ derive_more = "0.99" eyre = "0.6.8" # arbitrary utils -arbitrary = { version = "1.1.7", features = ["derive"], optional = true } -proptest = { version = "1.0", optional = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } proptest-derive = { version = "0.3", optional = true } [dev-dependencies] @@ -65,8 +65,8 @@ secp256k1.workspace = true async-trait.workspace = true -arbitrary = { version = "1.1.7", features = ["derive"] } -proptest = { version = "1.0" } +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-derive = "0.3" serde_json.workspace = true diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index dadfedff611e..5cf13b257542 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -44,12 +44,13 @@ auto_impl = "1.0" # testing rand = { workspace = true, optional = true } paste = { version = "1.0", optional = true } -proptest = { version = "1.0", optional = true } +proptest = { workspace = true, optional = true } [dev-dependencies] +reth-primitives = { workspace = true, features = ["arbitrary"] } paste = "1.0" rand = "0.8" -proptest = "1.0" +proptest.workspace = true criterion = "0.5" assert_matches = "1.5" @@ -61,4 +62,5 @@ arbitrary = ["proptest", "reth-primitives/arbitrary"] [[bench]] name = "reorder" +required-features = ["test-utils"] harness = false diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 428561838694..3c37d4295e8e 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -41,7 +41,7 @@ reth-provider.workspace = true triehash = "0.8" # misc -proptest = "1.0" +proptest.workspace = true tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } tokio-stream.workspace = true criterion = "0.5" From df94dba14b039edc0d666eb881191e692893ba20 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Aug 2023 22:00:41 +0200 Subject: [PATCH 349/722] chore: explicitly set max allowed connections for auth server (#4067) --- crates/rpc/rpc-builder/src/auth.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 95e64b8fbfe2..f30b29ee35a2 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -215,6 +215,11 @@ impl AuthServerConfigBuilder { // payload bodies limit for `engine_getPayloadBodiesByRangeV` // ~750MB per response should be enough .max_response_body_size(750 * 1024 * 1024) + // Connections to this server are always authenticated, hence this only affects + // connections from the CL or any other client that uses JWT, this should be + // more than enough so that the CL (or multiple CL nodes) will never get rate + // limited + .max_connections(500) // bump the default request size slightly, there aren't any methods exposed with // dynamic request params that can exceed this .max_request_body_size(25 * 1024 * 1024) From 544c51cc9ff15d0626cdb63666ad3d195c806be0 Mon Sep 17 00:00:00 2001 From: prames <134806363+0xprames@users.noreply.github.com> Date: Fri, 4 Aug 2023 17:13:17 -0400 Subject: [PATCH 350/722] feat(txpool) feed new pending transactions to BestTxns iterator (#4053) --- crates/transaction-pool/src/pool/best.rs | 38 +++++++++++++++++++++ crates/transaction-pool/src/pool/pending.rs | 12 +++++++ crates/transaction-pool/src/traits.rs | 4 +++ crates/transaction-pool/tests/it/main.rs | 2 ++ crates/transaction-pool/tests/it/pending.rs | 25 ++++++++++++++ 5 files changed, 81 insertions(+) create mode 100644 crates/transaction-pool/tests/it/pending.rs diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index c5190327f09c..61b47d44f2e4 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -7,6 +7,7 @@ use std::{ collections::{BTreeMap, BTreeSet, HashSet}, sync::Arc, }; +use tokio::sync::broadcast::Receiver; use tracing::debug; /// An iterator that returns transactions that can be executed on the current state (*best* @@ -61,6 +62,12 @@ pub(crate) struct BestTransactions { pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. pub(crate) invalid: HashSet, + /// Used to recieve any new pending transactions that have been added to the pool after this + /// iterator was snapshotted + /// + /// These new pending transactions are inserted into this iterator's pool before yielding the + /// next value + pub(crate) new_transaction_reciever: Receiver>, } impl BestTransactions { @@ -76,6 +83,36 @@ impl BestTransactions { pub(crate) fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { self.all.get(&id.unchecked_ancestor()?) } + + /// Non-blocking read on the new pending transactions subscription channel + fn try_recv(&mut self) -> Option> { + match self.new_transaction_reciever.try_recv() { + Ok(tx) => Some(tx), + // note TryRecvError::Lagged can be returned here, which is an error that attempts to + // correct itself on consecutive try_recv() attempts + + // the cost of ignoring this error is allowing old transactions to get + // overwritten after the chan buffer size is met + + // this case is still better than the existing iterator behavior where no new + // pending txs are surfaced to consumers + Err(_) => None, + } + } + + /// Checks for new transactions that have come into the PendingPool after this iterator was + /// created and inserts them + fn add_new_transactions(&mut self) { + while let Some(pending_tx) = self.try_recv() { + let tx = pending_tx.transaction.clone(); + // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked + let tx_id = *tx.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(pending_tx.clone()); + } + self.all.insert(tx_id, pending_tx); + } + } } impl crate::traits::BestTransactions for BestTransactions { @@ -89,6 +126,7 @@ impl Iterator for BestTransactions { fn next(&mut self) -> Option { loop { + self.add_new_transactions(); // Remove the next independent tx with the highest priority let best = self.independent.pop_last()?; let hash = best.transaction.hash(); diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 0ec5be69b56e..3455332090d7 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -10,6 +10,7 @@ use std::{ collections::{BTreeMap, BTreeSet}, sync::Arc, }; +use tokio::sync::broadcast; /// A pool of validated and gapless transactions that are ready to be executed on the current state /// and are waiting to be included in a block. @@ -42,6 +43,9 @@ pub(crate) struct PendingPool { /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). size_of: SizeTracker, + /// Used to broadcast new transactions that have been added to the PendingPool to existing + /// snapshots of this pool. + new_transaction_notifier: broadcast::Sender>, } // === impl PendingPool === @@ -49,6 +53,7 @@ pub(crate) struct PendingPool { impl PendingPool { /// Create a new pool instance. pub(crate) fn new(ordering: T) -> Self { + let (new_transaction_notifier, _) = broadcast::channel(200); Self { ordering, submission_id: 0, @@ -56,6 +61,7 @@ impl PendingPool { all: Default::default(), independent_transactions: Default::default(), size_of: Default::default(), + new_transaction_notifier, } } @@ -82,6 +88,7 @@ impl PendingPool { all: self.by_id.clone(), independent: self.independent_transactions.clone(), invalid: Default::default(), + new_transaction_reciever: self.new_transaction_notifier.subscribe(), } } @@ -223,6 +230,11 @@ impl PendingPool { } self.all.insert(tx.clone()); + // send the new transaction to any existing pendingpool snapshot iterators + if self.new_transaction_notifier.receiver_count() > 0 { + let _ = self.new_transaction_notifier.send(tx.clone()); + } + self.by_id.insert(tx_id, tx); } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9bcda6ec40f9..c002d0a79832 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -477,6 +477,10 @@ impl ChangedAccount { /// /// This makes no assumptions about the order of the transactions, but expects that _all_ /// transactions are valid (no nonce gaps.) for the tracked state of the pool. +/// +/// Note: this iterator will always return the best transaction that it currently knows. +/// There is no guarantee transactions will be returned sequentially in decreasing +/// priority order. pub trait BestTransactions: Iterator + Send { /// Mark the transaction as invalid. /// diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index 1b91bc6d8c70..409be67792d4 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -2,5 +2,7 @@ #[cfg(feature = "test-utils")] mod listeners; +#[cfg(feature = "test-utils")] +mod pending; fn main() {} diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs new file mode 100644 index 000000000000..99742ea71506 --- /dev/null +++ b/crates/transaction-pool/tests/it/pending.rs @@ -0,0 +1,25 @@ +use assert_matches::assert_matches; +use reth_transaction_pool::{ + test_utils::{testing_pool, MockTransactionFactory}, + TransactionOrigin, TransactionPool, +}; + +#[tokio::test(flavor = "multi_thread")] +async fn txpool_new_pending_txs() { + let txpool = testing_pool(); + let mut mock_tx_factory = MockTransactionFactory::default(); + let transaction = mock_tx_factory.create_eip1559(); + + let added_result = + txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; + assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + + let mut best_txns = txpool.best_transactions(); + assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); + assert_matches!(best_txns.next(), None); + let transaction = mock_tx_factory.create_eip1559(); + let added_result = + txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; + assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); +} From cac4049a0828e869a35a9fe58f67c1b0ff1450d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 5 Aug 2023 16:00:05 +0200 Subject: [PATCH 351/722] chore: relax network blockreader trait bounds (#4079) --- crates/net/network/src/manager.rs | 4 ++-- crates/net/network/src/state.rs | 5 ++--- crates/net/network/src/swarm.rs | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 78ec6d332111..d52fa7824ffd 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -42,7 +42,7 @@ use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::ReputationChangeKind; use reth_primitives::{listener::EventListeners, ForkId, NodeRecord, PeerId, H256}; -use reth_provider::BlockReader; +use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{EthProtocolInfo, NetworkStatus}; use std::{ net::SocketAddr, @@ -156,7 +156,7 @@ impl NetworkManager { impl NetworkManager where - C: BlockReader, + C: BlockNumReader, { /// Creates the manager of a new network. /// diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 446a67962927..bf8ba0a0cd4a 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -12,13 +12,12 @@ use crate::{ peers::{PeerAction, PeersManager}, FetchClient, }; - use reth_eth_wire::{ capability::Capabilities, BlockHashNumber, DisconnectReason, NewBlockHashes, Status, }; use reth_network_api::PeerKind; use reth_primitives::{ForkId, PeerId, H256}; -use reth_provider::BlockReader; +use reth_provider::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -70,7 +69,7 @@ pub struct NetworkState { impl NetworkState where - C: BlockReader, + C: BlockNumReader, { /// Create a new state instance with the given params pub(crate) fn new( diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index f296ebe277bf..b3e7a0363478 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ DisconnectReason, EthVersion, Status, }; use reth_primitives::PeerId; -use reth_provider::BlockReader; +use reth_provider::{BlockNumReader, BlockReader}; use std::{ io, net::SocketAddr, @@ -77,7 +77,7 @@ pub(crate) struct Swarm { impl Swarm where - C: BlockReader, + C: BlockNumReader, { /// Configures a new swarm instance. pub(crate) fn new( From c0abfcedff3bee08d61c6d219dc8a365424b0a0f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 5 Aug 2023 16:57:12 +0200 Subject: [PATCH 352/722] chore: rename command to NodeCommand and make fields pub (#4080) --- bin/reth/src/cli/ext.rs | 2 +- bin/reth/src/cli/mod.rs | 2 +- bin/reth/src/node/mod.rs | 66 ++++++++++++++++++++++------------------ 3 files changed, 39 insertions(+), 31 deletions(-) diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index bd9f6eb81018..1d6a6a4ad321 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -96,7 +96,7 @@ pub trait RethNodeCommandExt: fmt::Debug + clap::Args { // TODO move network related functions here } -/// The default configuration for the reth node command [Command](crate::node::Command). +/// The default configuration for the reth node command [Command](crate::node::NodeCommand). #[derive(Debug, Clone, Copy, Default, Args)] pub struct DefaultRethNodeCommandConfig; diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 3939ad145790..50acdf8c0540 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -107,7 +107,7 @@ pub fn run() -> eyre::Result<()> { pub enum Commands { /// Start the node #[command(name = "node")] - Node(node::Command), + Node(node::NodeCommand), /// Initialize the database from a genesis file. #[command(name = "init")] Init(chain::InitCommand), diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 6117d39cf44f..d1a612c91464 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -22,7 +22,6 @@ use eyre::Context; use fdlimit::raise_fd_limit; use futures::{future::Either, pin_mut, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::{AutoSealBuilder, AutoSealConsensus, MiningMode}; - use reth_beacon_consensus::{BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN}; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -44,7 +43,6 @@ use reth_interfaces::{ }; use reth_network::{error::NetworkError, NetworkConfig, NetworkHandle, NetworkManager}; use reth_network_api::NetworkInfo; - use reth_primitives::{ stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, SealedHeader, H256, @@ -82,7 +80,7 @@ pub mod events; /// Start the node #[derive(Debug, Parser)] -pub struct Command { +pub struct NodeCommand { /// The path to the data dir for all reth files and subdirectories. /// /// Defaults to the OS-specific data directory: @@ -91,11 +89,11 @@ pub struct Command { /// - Windows: `{FOLDERID_RoamingAppData}/reth/` /// - macOS: `$HOME/Library/Application Support/reth/` #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] - datadir: MaybePlatformPath, + pub datadir: MaybePlatformPath, /// The path to the configuration file to use. #[arg(long, value_name = "FILE", verbatim_doc_comment)] - config: Option, + pub config: Option, /// The chain this node is running. /// @@ -115,44 +113,52 @@ pub struct Command { value_parser = genesis_value_parser, required = false, )] - chain: Arc, + pub chain: Arc, /// Enable Prometheus metrics. /// /// The metrics will be served at the given interface and port. #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] - metrics: Option, + pub metrics: Option, + /// All networking related arguments #[clap(flatten)] - network: NetworkArgs, + pub network: NetworkArgs, + /// All rpc related arguments #[clap(flatten)] - rpc: RpcServerArgs, + pub rpc: RpcServerArgs, + /// All txpool related arguments with --txpool prefix #[clap(flatten)] - txpool: TxPoolArgs, + pub txpool: TxPoolArgs, + /// All payload builder related arguments #[clap(flatten)] - builder: PayloadBuilderArgs, + pub builder: PayloadBuilderArgs, + /// All debug related arguments with --debug prefix #[clap(flatten)] - debug: DebugArgs, + pub debug: DebugArgs, + /// All database related arguments #[clap(flatten)] - db: DatabaseArgs, + pub db: DatabaseArgs, + /// All dev related arguments with --dev prefix #[clap(flatten)] - dev: DevArgs, + pub dev: DevArgs, + /// All pruning related arguments #[clap(flatten)] - pruning: PruningArgs, + pub pruning: PruningArgs, /// Additional cli arguments #[clap(flatten)] pub ext: Ext::Node, } -impl Command { +impl NodeCommand { /// Execute `node` command pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); @@ -825,28 +831,28 @@ mod tests { #[test] fn parse_help_node_command() { - let err = Command::<()>::try_parse_from(["reth", "--help"]).unwrap_err(); + let err = NodeCommand::<()>::try_parse_from(["reth", "--help"]).unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); } #[test] fn parse_common_node_command_chain_args() { for chain in ["mainnet", "sepolia", "goerli"] { - let args: Command = Command::<()>::parse_from(["reth", "--chain", chain]); + let args: NodeCommand = NodeCommand::<()>::parse_from(["reth", "--chain", chain]); assert_eq!(args.chain.chain, chain.parse().unwrap()); } } #[test] fn parse_discovery_port() { - let cmd = Command::<()>::try_parse_from(["reth", "--discovery.port", "300"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300"]).unwrap(); assert_eq!(cmd.network.discovery.port, Some(300)); } #[test] fn parse_port() { let cmd = - Command::<()>::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"]) + NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"]) .unwrap(); assert_eq!(cmd.network.discovery.port, Some(300)); assert_eq!(cmd.network.port, Some(99)); @@ -854,26 +860,27 @@ mod tests { #[test] fn parse_metrics_port() { - let cmd = Command::<()>::try_parse_from(["reth", "--metrics", "9001"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth", "--metrics", "9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = Command::<()>::try_parse_from(["reth", "--metrics", ":9001"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth", "--metrics", ":9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); - let cmd = Command::<()>::try_parse_from(["reth", "--metrics", "localhost:9001"]).unwrap(); + let cmd = + NodeCommand::<()>::try_parse_from(["reth", "--metrics", "localhost:9001"]).unwrap(); assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); } #[test] fn parse_config_path() { - let cmd = - Command::<()>::try_parse_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth", "--config", "my/path/to/reth.toml"]) + .unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let config_path = cmd.config.unwrap_or(data_dir.config_path()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); - let cmd = Command::<()>::try_parse_from(["reth"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); @@ -883,12 +890,13 @@ mod tests { #[test] fn parse_db_path() { - let cmd = Command::<()>::try_parse_from(["reth"]).unwrap(); + let cmd = NodeCommand::<()>::try_parse_from(["reth"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let db_path = data_dir.db_path(); assert!(db_path.ends_with("reth/mainnet/db"), "{:?}", cmd.config); - let cmd = Command::<()>::try_parse_from(["reth", "--datadir", "my/custom/path"]).unwrap(); + let cmd = + NodeCommand::<()>::try_parse_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); let db_path = data_dir.db_path(); assert_eq!(db_path, Path::new("my/custom/path/db")); @@ -896,7 +904,7 @@ mod tests { #[test] fn parse_dev() { - let cmd = Command::<()>::parse_from(["reth", "--dev"]); + let cmd = NodeCommand::<()>::parse_from(["reth", "--dev"]); let chain = DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); From 3aff8de235b767daea4701dec8cf1ea889d26a12 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 5 Aug 2023 17:18:15 +0200 Subject: [PATCH 353/722] chore: make ext fns mut (#4081) --- bin/reth/src/args/rpc_server_args.rs | 2 +- bin/reth/src/cli/ext.rs | 12 +++++++++--- bin/reth/src/node/mod.rs | 4 ++-- examples/additional-rpc-namespace-in-cli/src/main.rs | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 842d6b97e635..67bd39cac552 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -232,7 +232,7 @@ impl RpcServerArgs { events: Events, engine_api: Engine, jwt_secret: JwtSecret, - ext: &Ext, + ext: &mut Ext, ) -> eyre::Result<(RpcServerHandle, AuthServerHandle)> where Provider: BlockReaderIdExt diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index 1d6a6a4ad321..0eaa6a95dcbe 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -16,11 +16,17 @@ use reth_transaction_pool::TransactionPool; use std::{fmt, sync::Arc}; /// A trait that allows for extending parts of the CLI with additional functionality. +/// +/// This is intended as a way to allow to _extend_ the node command. For example, to register +/// additional RPC namespaces. pub trait RethCliExt { - /// Provides additional configuration for the node command. + /// Provides additional configuration for the node CLI command. + /// + /// This supports additional CLI arguments that can be used to modify the node configuration. type Node: RethNodeCommandExt; } +/// The default CLI extension. impl RethCliExt for () { type Node = DefaultRethNodeCommandConfig; } @@ -32,7 +38,7 @@ pub trait RethNodeCommandExt: fmt::Debug + clap::Args { /// This is expected to call the merge functions of [TransportRpcModules], for example /// [TransportRpcModules::merge_configured] fn extend_rpc_modules( - &self, + &mut self, _config: &Conf, _registry: &mut RethModuleRegistry, _modules: &mut TransportRpcModules<()>, @@ -61,7 +67,7 @@ pub trait RethNodeCommandExt: fmt::Debug + clap::Args { /// By default this spawns a [BasicPayloadJobGenerator] with the default configuration /// [BasicPayloadJobGeneratorConfig]. fn spawn_payload_builder_service( - &self, + &mut self, conf: &Conf, provider: Provider, pool: Pool, diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index d1a612c91464..397fce91f142 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -160,7 +160,7 @@ pub struct NodeCommand { impl NodeCommand { /// Execute `node` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute(mut self, ctx: CliContext) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); // Raise the fd limit of the process. @@ -453,7 +453,7 @@ impl NodeCommand { blockchain_tree, engine_api, jwt_secret, - &self.ext, + &mut self.ext, ) .await?; diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/additional-rpc-namespace-in-cli/src/main.rs index ea15042164e4..7eeadcb5c044 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/additional-rpc-namespace-in-cli/src/main.rs @@ -52,7 +52,7 @@ struct RethCliTxpoolExt { impl RethNodeCommandExt for RethCliTxpoolExt { // This is the entrypoint for the CLI to extend the RPC server with custom rpc namespaces. fn extend_rpc_modules( - &self, + &mut self, _config: &Conf, registry: &mut RethModuleRegistry, modules: &mut TransportRpcModules<()>, From 5298868d88ed1e8a919ae27173153656d2763662 Mon Sep 17 00:00:00 2001 From: libevm <95674753+libevm@users.noreply.github.com> Date: Sat, 5 Aug 2023 17:50:08 +0200 Subject: [PATCH 354/722] RFC: Add rpc method eth_callMany (#4070) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-api/src/eth.rs | 15 ++++- crates/rpc/rpc-types/src/eth/call.rs | 12 ++++ crates/rpc/rpc-types/src/eth/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/call.rs | 97 +++++++++++++++++++++++++++- crates/rpc/rpc/src/eth/api/server.rs | 16 ++++- 5 files changed, 135 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index 6ca403dd7d11..385442f5f3f1 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -4,8 +4,9 @@ use reth_primitives::{ AccessListWithGasUsed, Address, BlockId, BlockNumberOrTag, Bytes, H256, H64, U256, U64, }; use reth_rpc_types::{ - state::StateOverride, BlockOverrides, CallRequest, EIP1186AccountProofResponse, FeeHistory, - Index, RichBlock, SyncStatus, Transaction, TransactionReceipt, TransactionRequest, Work, + state::StateOverride, BlockOverrides, Bundle, CallRequest, EIP1186AccountProofResponse, + EthCallResponse, FeeHistory, Index, RichBlock, StateContext, SyncStatus, Transaction, + TransactionReceipt, TransactionRequest, Work, }; /// Eth rpc interface: @@ -153,6 +154,16 @@ pub trait EthApi { block_overrides: Option>, ) -> RpcResult; + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + #[method(name = "callMany")] + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> RpcResult>; + /// Generates an access list for a transaction. /// /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. diff --git a/crates/rpc/rpc-types/src/eth/call.rs b/crates/rpc/rpc-types/src/eth/call.rs index 231eb3473c83..19db85b77e75 100644 --- a/crates/rpc/rpc-types/src/eth/call.rs +++ b/crates/rpc/rpc-types/src/eth/call.rs @@ -23,6 +23,18 @@ pub struct StateContext { pub transaction_index: Option, } +/// CallResponse for eth_callMany +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(default, rename_all = "camelCase")] +pub struct EthCallResponse { + #[serde(skip_serializing_if = "Option::is_none")] + /// eth_call output (if no error) + pub output: Option, + #[serde(skip_serializing_if = "Option::is_none")] + /// eth_call output (if error) + pub error: Option, +} + /// Represents a transaction index where -1 means all transactions #[derive(Debug, Copy, Clone, Eq, PartialEq, Default)] pub enum TransactionIndex { diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index 528e4cffc769..ae249e197280 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -19,7 +19,7 @@ mod work; pub use account::*; pub use block::*; -pub use call::{Bundle, CallInput, CallInputError, CallRequest, StateContext}; +pub use call::{Bundle, CallInput, CallInputError, CallRequest, EthCallResponse, StateContext}; pub use fee::{FeeHistory, TxGasAndReward}; pub use filter::*; pub use index::Index; diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 29cbdcfc4460..d7c0a065325c 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -5,7 +5,7 @@ use crate::{ error::{ensure_success, EthApiError, EthResult, RevertError, RpcInvalidTransactionError}, revm_utils::{ build_call_evm_env, caller_gas_allowance, cap_tx_gas_limit_with_caller_allowance, - get_precompiles, inspect, transact, EvmOverrides, + get_precompiles, inspect, prepare_call_env, transact, EvmOverrides, }, EthTransactions, }, @@ -18,12 +18,16 @@ use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProvider, StateProvid use reth_revm::{ access_list::AccessListInspector, database::{State, SubState}, + env::tx_env_with_recovered, +}; +use reth_rpc_types::{ + state::StateOverride, BlockError, Bundle, CallRequest, EthCallResponse, StateContext, }; -use reth_rpc_types::CallRequest; use reth_transaction_pool::TransactionPool; use revm::{ db::{CacheDB, DatabaseRef}, primitives::{BlockEnv, CfgEnv, Env, ExecutionResult, Halt, TransactTo}, + DatabaseCommit, }; use tracing::trace; @@ -62,6 +66,95 @@ where ensure_success(res.result) } + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + pub async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> EthResult> { + let Bundle { transactions, block_override } = bundle; + if transactions.is_empty() { + return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) + } + + let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); + let transaction_index = transaction_index.unwrap_or_default(); + + let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let ((cfg, block_env, _), block) = + futures::try_join!(self.evm_env_at(target_block), self.block_by_id(target_block))?; + + let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let gas_limit = self.inner.gas_cap; + + // we're essentially replaying the transactions in the block here, hence we need the state + // that points to the beginning of the block, which is the state at the parent block + let mut at = block.parent_hash; + let mut replay_block_txs = true; + + // but if all transactions are to be replayed, we can use the state at the block itself + let num_txs = transaction_index.index().unwrap_or(block.body.len()); + if num_txs == block.body.len() { + at = block.hash; + replay_block_txs = false; + } + + self.spawn_with_state_at_block(at.into(), move |state| { + let mut results = Vec::with_capacity(transactions.len()); + let mut db = SubState::new(State::new(state)); + + if replay_block_txs { + // only need to replay the transactions in the block if not all transactions are + // to be replayed + let transactions = block.body.into_iter().take(num_txs); + + // Execute all transactions until index + for tx in transactions { + let tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; + let tx = tx_env_with_recovered(&tx); + let env = Env { cfg: cfg.clone(), block: block_env.clone(), tx }; + let (res, _) = transact(&mut db, env)?; + db.commit(res.state); + } + } + + let overrides = EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + + let mut transactions = transactions.into_iter().peekable(); + while let Some(tx) = transactions.next() { + let env = prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides.clone(), + )?; + let (res, _) = transact(&mut db, env)?; + + match ensure_success(res.result) { + Ok(output) => { + results.push(EthCallResponse { output: Some(output), error: None }); + } + Err(err) => { + results + .push(EthCallResponse { output: None, error: Some(err.to_string()) }); + } + } + + if transactions.peek().is_some() { + // need to apply the state changes of this call before executing the next call + db.commit(res.state); + } + } + + Ok(results) + }) + .await + } + /// Estimates the gas usage of the `request` with the state. /// /// This will execute the [CallRequest] and find the best gas limit via binary search diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 663308cd895c..32c985cf9831 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -21,8 +21,9 @@ use reth_provider::{ }; use reth_rpc_api::EthApiServer; use reth_rpc_types::{ - state::StateOverride, BlockOverrides, CallRequest, EIP1186AccountProofResponse, FeeHistory, - Index, RichBlock, SyncStatus, TransactionReceipt, TransactionRequest, Work, + state::StateOverride, BlockOverrides, Bundle, CallRequest, EIP1186AccountProofResponse, + EthCallResponse, FeeHistory, Index, RichBlock, StateContext, SyncStatus, TransactionReceipt, + TransactionRequest, Work, }; use reth_transaction_pool::TransactionPool; use serde_json::Value; @@ -245,6 +246,17 @@ where .await?) } + /// Handler for: `eth_callMany` + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> Result> { + trace!(target: "rpc::eth", ?bundle, ?state_context, ?state_override, "Serving eth_callMany"); + Ok(EthApi::call_many(self, bundle, state_context, state_override).await?) + } + /// Handler for: `eth_createAccessList` async fn create_access_list( &self, From a519641e7db569fe307f31612c9bf8f97a25a783 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 5 Aug 2023 18:34:05 +0200 Subject: [PATCH 355/722] chore: use remaining (#4082) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index cf2a50dec205..09eba3520930 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -352,7 +352,7 @@ impl ParityTraceBuilder { }; let maybe_execution = Some(VmExecutedOperation { - used: step.gas_cost, + used: step.gas_remaining, push: step.new_stack.into_iter().map(|new_stack| new_stack.into()).collect(), mem: maybe_memory, store: maybe_storage, From e3457b8866de12c4c1c2cf23c829787d8c7c2fc0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 5 Aug 2023 18:59:36 +0200 Subject: [PATCH 356/722] chore: add missing op and idx fields (#4076) --- .../revm/revm-inspectors/src/tracing/builder/parity.rs | 9 ++++++++- crates/rpc/rpc-types/src/eth/trace/parity.rs | 10 ++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 09eba3520930..b70173a8dbcb 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -365,7 +365,14 @@ impl ParityTraceBuilder { }) .unwrap_or_default(); - VmInstruction { pc: step.pc, cost: cost as u64, ex: maybe_execution, sub: maybe_sub } + VmInstruction { + pc: step.pc, + cost: cost as u64, + ex: maybe_execution, + sub: maybe_sub, + op: Some(step.op.to_string()), + idx: None, + } } } diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index f99f97de6744..60de0b5d15ee 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -291,14 +291,20 @@ pub struct VmTrace { #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct VmInstruction { - /// The program counter. - pub pc: usize, /// The gas cost for this instruction. pub cost: u64, /// Information concerning the execution of the operation. pub ex: Option, + /// The program counter. + pub pc: usize, /// Subordinate trace of the CALL/CREATE if applicable. + #[serde(skip_serializing_if = "Option::is_none")] pub sub: Option, + /// Stringified opcode. + #[serde(skip_serializing_if = "Option::is_none")] + pub op: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub idx: Option, } /// A record of an executed VM operation. From d8b9660e82d2a999f95ea3b91952b957d4922580 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sun, 6 Aug 2023 10:27:38 +0300 Subject: [PATCH 357/722] perf(trie): use available pre-loaded prefixsets (#4062) --- .../src/providers/database/provider.rs | 171 +++++++++++++----- crates/storage/provider/src/traits/hashing.rs | 36 +++- 2 files changed, 154 insertions(+), 53 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 7a82cac2a371..0acc9d6872d5 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -26,6 +26,7 @@ use reth_interfaces::Result; use reth_primitives::{ keccak256, stage::{StageCheckpoint, StageId}, + trie::Nibbles, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PrunePart, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, @@ -37,9 +38,9 @@ use reth_revm_primitives::{ env::{fill_block_env, fill_cfg_and_block_env, fill_cfg_env}, primitives::{BlockEnv, CfgEnv, SpecId}, }; -use reth_trie::StateRoot; +use reth_trie::{prefix_set::PrefixSetMut, StateRoot}; use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet}, + collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::Arc, @@ -1407,25 +1408,47 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider end_block_hash: H256, expected_state_root: H256, ) -> Result<()> { + // Initialize prefix sets. + let mut account_prefix_set = PrefixSetMut::default(); + let mut storage_prefix_set: HashMap = HashMap::default(); + // storage hashing stage { let lists = self.changed_storages_with_range(range.clone())?; let storages = self.plainstate_storages(lists)?; - self.insert_storage_for_hashing(storages)?; + let storage_entries = self.insert_storage_for_hashing(storages)?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + for slot in hashed_slots { + storage_prefix_set + .entry(hashed_address) + .or_default() + .insert(Nibbles::unpack(slot)); + } + } } // account hashing stage { let lists = self.changed_accounts_with_range(range.clone())?; let accounts = self.basic_accounts(lists)?; - self.insert_account_for_hashing(accounts)?; + let hashed_addresses = self.insert_account_for_hashing(accounts)?; + for hashed_address in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + } } // merkle tree { - let (state_root, trie_updates) = - StateRoot::incremental_root_with_updates(&self.tx, range.clone()) - .map_err(Into::::into)?; + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let (state_root, trie_updates) = StateRoot::new(&self.tx) + .with_changed_account_prefixes(account_prefix_set.freeze()) + .with_changed_storage_prefixes( + storage_prefix_set.into_iter().map(|(k, v)| (k, v.freeze())).collect(), + ) + .root_with_updates() + .map_err(Into::::into)?; if state_root != expected_state_root { return Err(ProviderError::StateRootMismatch { got: state_root, @@ -1440,11 +1463,15 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider Ok(()) } - fn unwind_storage_hashing(&self, range: Range) -> Result<()> { + fn unwind_storage_hashing( + &self, + range: Range, + ) -> Result>> { let mut hashed_storage = self.tx.cursor_dup_write::()?; // Aggregate all block changesets and make list of accounts that have been changed. - self.tx + let hashed_storages = self + .tx .cursor_read::()? .walk_range(range)? .collect::, _>>()? @@ -1463,7 +1490,14 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider // hash addresses and collect it inside sorted BTreeMap. // We are doing keccak only once per address. .map(|((address, key), value)| ((keccak256(address), keccak256(key)), value)) - .collect::>() + .collect::>(); + + let mut hashed_storage_keys: HashMap> = HashMap::default(); + for (hashed_address, hashed_slot) in hashed_storages.keys() { + hashed_storage_keys.entry(*hashed_address).or_default().insert(*hashed_slot); + } + + hashed_storages .into_iter() // Apply values to HashedStorage (if Value is zero just remove it); .try_for_each(|((hashed_address, key), value)| -> Result<()> { @@ -1481,50 +1515,58 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider Ok(()) })?; - Ok(()) + Ok(hashed_storage_keys) } fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, - ) -> Result<()> { + ) -> Result>> { // hash values - let hashed = storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| { - let storage = storage.into_iter().fold(BTreeMap::new(), |mut map, entry| { - map.insert(keccak256(entry.key), entry.value); + let hashed_storages = + storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| { + let storage = storage.into_iter().fold(BTreeMap::new(), |mut map, entry| { + map.insert(keccak256(entry.key), entry.value); + map + }); + map.insert(keccak256(address), storage); map }); - map.insert(keccak256(address), storage); - map - }); - let mut hashed_storage = self.tx.cursor_dup_write::()?; + let hashed_storage_keys = + HashMap::from_iter(hashed_storages.iter().map(|(hashed_address, entries)| { + (*hashed_address, BTreeSet::from_iter(entries.keys().copied())) + })); + + let mut hashed_storage_cursor = self.tx.cursor_dup_write::()?; // Hash the address and key and apply them to HashedStorage (if Storage is None // just remove it); - hashed.into_iter().try_for_each(|(hashed_address, storage)| { + hashed_storages.into_iter().try_for_each(|(hashed_address, storage)| { storage.into_iter().try_for_each(|(key, value)| -> Result<()> { - if hashed_storage + if hashed_storage_cursor .seek_by_key_subkey(hashed_address, key)? .filter(|entry| entry.key == key) .is_some() { - hashed_storage.delete_current()?; + hashed_storage_cursor.delete_current()?; } if value != U256::ZERO { - hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; + hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value })?; } Ok(()) }) })?; - Ok(()) + + Ok(hashed_storage_keys) } - fn unwind_account_hashing(&self, range: RangeInclusive) -> Result<()> { - let mut hashed_accounts = self.tx.cursor_write::()?; + fn unwind_account_hashing(&self, range: RangeInclusive) -> Result> { + let mut hashed_accounts_cursor = self.tx.cursor_write::()?; // Aggregate all block changesets and make a list of accounts that have been changed. - self.tx + let hashed_accounts = self + .tx .cursor_read::()? .walk_range(range)? .collect::, _>>()? @@ -1542,28 +1584,32 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider // hash addresses and collect it inside sorted BTreeMap. // We are doing keccak only once per address. .map(|(address, account)| (keccak256(address), account)) - .collect::>() + .collect::>(); + + let hashed_account_keys = BTreeSet::from_iter(hashed_accounts.keys().copied()); + + hashed_accounts .into_iter() // Apply values to HashedState (if Account is None remove it); .try_for_each(|(hashed_address, account)| -> Result<()> { if let Some(account) = account { - hashed_accounts.upsert(hashed_address, account)?; - } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { - hashed_accounts.delete_current()?; + hashed_accounts_cursor.upsert(hashed_address, account)?; + } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { + hashed_accounts_cursor.delete_current()?; } Ok(()) })?; - Ok(()) + Ok(hashed_account_keys) } fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> Result<()> { - let mut hashed_accounts = self.tx.cursor_write::()?; + ) -> Result> { + let mut hashed_accounts_cursor = self.tx.cursor_write::()?; - let hashes_accounts = accounts.into_iter().fold( + let hashed_accounts = accounts.into_iter().fold( BTreeMap::new(), |mut map: BTreeMap>, (address, account)| { map.insert(keccak256(address), account); @@ -1571,15 +1617,18 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider }, ); - hashes_accounts.into_iter().try_for_each(|(hashed_address, account)| -> Result<()> { + let hashed_addresses = BTreeSet::from_iter(hashed_accounts.keys().copied()); + + hashed_accounts.into_iter().try_for_each(|(hashed_address, account)| -> Result<()> { if let Some(account) = account { - hashed_accounts.upsert(hashed_address, account)? - } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { - hashed_accounts.delete_current()?; + hashed_accounts_cursor.upsert(hashed_address, account)? + } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { + hashed_accounts_cursor.delete_current()?; } Ok(()) })?; - Ok(()) + + Ok(hashed_addresses) } } @@ -1718,15 +1767,45 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP if TAKE { let storage_range = BlockNumberAddress::range(range.clone()); - self.unwind_account_hashing(range.clone())?; + // Initialize prefix sets. + let mut account_prefix_set = PrefixSetMut::default(); + let mut storage_prefix_set: HashMap = HashMap::default(); + + // Unwind account hashes. Add changed accounts to account prefix set. + let hashed_addresses = self.unwind_account_hashing(range.clone())?; + for hashed_address in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + } + + // Unwind account history indices. self.unwind_account_history_indices(range.clone())?; - self.unwind_storage_hashing(storage_range.clone())?; + + // Unwind storage hashes. Add changed account and storage keys to corresponding prefix + // sets. + let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + for slot in hashed_slots { + storage_prefix_set + .entry(hashed_address) + .or_default() + .insert(Nibbles::unpack(slot)); + } + } + + // Unwind storage history indices. self.unwind_storage_history_indices(storage_range)?; - // merkle tree - let (new_state_root, trie_updates) = - StateRoot::incremental_root_with_updates(&self.tx, range.clone()) - .map_err(Into::::into)?; + // Calculate the reverted merkle root. + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let (new_state_root, trie_updates) = StateRoot::new(&self.tx) + .with_changed_account_prefixes(account_prefix_set.freeze()) + .with_changed_storage_prefixes( + storage_prefix_set.into_iter().map(|(k, v)| (k, v.freeze())).collect(), + ) + .root_with_updates() + .map_err(Into::::into)?; let parent_number = range.start().saturating_sub(1); let parent_state_root = self diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index dc327ca09cca..5d882553b310 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -2,28 +2,50 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; use reth_interfaces::Result; use reth_primitives::{Account, Address, BlockNumber, StorageEntry, H256}; -use std::ops::{Range, RangeInclusive}; +use std::{ + collections::{BTreeSet, HashMap}, + ops::{Range, RangeInclusive}, +}; /// Hashing Writer #[auto_impl(&, Arc, Box)] pub trait HashingWriter: Send + Sync { - /// Unwind and clear account hashing - fn unwind_account_hashing(&self, range: RangeInclusive) -> Result<()>; + /// Unwind and clear account hashing. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. + fn unwind_account_hashing(&self, range: RangeInclusive) -> Result>; /// Inserts all accounts into [reth_db::tables::AccountHistory] table. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> Result<()>; + ) -> Result>; /// Unwind and clear storage hashing - fn unwind_storage_hashing(&self, range: Range) -> Result<()>; + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. + fn unwind_storage_hashing( + &self, + range: Range, + ) -> Result>>; - /// iterate over storages and insert them to hashing table + /// Iterates over storages and inserts them to hashing table. + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, - ) -> Result<()>; + ) -> Result>>; /// Calculate the hashes of all changed accounts and storages, and finally calculate the state /// root. From c423514321c2d2e67a156681d5602cb1707852e3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 6 Aug 2023 10:09:49 +0000 Subject: [PATCH 358/722] chore(deps): weekly `cargo update` (#4086) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 411 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 321 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72c0bd3f7dea..d19b7c5a5536 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,6 +141,23 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "alloy-rlp" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f938f00332d63a5b0ac687bd6f46d03884638948921d9f8b50c59563d421ae25" +dependencies = [ + "arrayvec", + "bytes", + "smol_str", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -240,6 +257,130 @@ dependencies = [ "derive_arbitrary", ] +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote 1.0.32", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote 1.0.32", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote 1.0.32", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2 1.0.66", + "quote 1.0.32", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "array-init" version = "0.0.4" @@ -294,7 +435,7 @@ checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -305,7 +446,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -478,7 +619,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -561,7 +702,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "bitflags 2.3.3", "boa_interner", @@ -574,7 +715,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -612,17 +753,18 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "boa_macros", "boa_profiler", + "hashbrown 0.14.0", "thin-vec", ] [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "icu_collections", "icu_normalizer", @@ -635,7 +777,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "boa_gc", "boa_macros", @@ -650,18 +792,18 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", "synstructure 0.13.0", ] [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -681,7 +823,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#d459ff1b444ac119fe58e46c4e32e874a8135ce3" +source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" [[package]] name = "brotli" @@ -793,7 +935,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -813,11 +955,12 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "6c6b2562119bf28c3439f7f02db99faf0aa1a8cdfe5772a2ee155d32227239f0" dependencies = [ "jobserver", + "libc", ] [[package]] @@ -937,7 +1080,7 @@ dependencies = [ "heck", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -961,7 +1104,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "serde", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1332,7 +1475,7 @@ dependencies = [ "digest 0.10.7", "fiat-crypto", "platforms", - "rustc_version", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -1345,7 +1488,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1393,7 +1536,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "strsim 0.10.0", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1415,7 +1558,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1466,6 +1609,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +dependencies = [ + "serde", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.32", + "syn 1.0.109", +] + [[package]] name = "derive_arbitrary" version = "1.3.1" @@ -1474,7 +1637,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1511,7 +1674,7 @@ dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.66", "quote 1.0.32", - "rustc_version", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1648,7 +1811,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1868,7 +2031,7 @@ dependencies = [ "num-traits", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1879,7 +2042,7 @@ checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -1901,9 +2064,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -2027,7 +2190,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.27", + "syn 2.0.28", "toml 0.7.6", "walkdir", ] @@ -2045,7 +2208,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "serde_json", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -2071,7 +2234,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.27", + "syn 2.0.28", "tempfile", "thiserror", "tiny-keccak", @@ -2086,7 +2249,7 @@ checksum = "22b3a8269d3df0ed6364bc05b4735b95f4bf830ce3aef87d5e760fb0e93e5b91" dependencies = [ "ethers-core", "reqwest", - "semver", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -2241,6 +2404,17 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.2.1" @@ -2402,7 +2576,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -2513,9 +2687,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aca8bbd8e0707c1887a8bbb7e6b40e228f251ff5d62c8220a4a7a53c73aff006" +checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" dependencies = [ "aho-corasick 1.0.2", "bstr 1.6.0", @@ -2654,6 +2828,10 @@ name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", +] [[package]] name = "hashers" @@ -2691,7 +2869,7 @@ checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" dependencies = [ "atomic-polyfill 0.1.11", "hash32", - "rustc_version", + "rustc_version 0.4.0", "serde", "spin 0.9.8", "stable_deref_trait", @@ -3212,7 +3390,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.4", + "rustix 0.38.7", "windows-sys 0.48.0", ] @@ -3562,9 +3740,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "litemap" @@ -3744,7 +3922,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -4049,7 +4227,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -4289,6 +4467,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +[[package]] +name = "pest" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" +dependencies = [ + "thiserror", + "ucd-trie", +] + [[package]] name = "pharos" version = "0.5.3" @@ -4296,7 +4484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -4329,7 +4517,7 @@ dependencies = [ "phf_shared", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -4358,7 +4546,7 @@ checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -4551,7 +4739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -4898,13 +5086,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick 1.0.2", "memchr", - "regex-automata 0.3.4", + "regex-automata 0.3.6", "regex-syntax 0.7.4", ] @@ -4919,9 +5107,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick 1.0.2", "memchr", @@ -5463,7 +5651,7 @@ dependencies = [ "quote 1.0.32", "regex", "serial_test 0.10.0", - "syn 2.0.27", + "syn 2.0.28", "trybuild", ] @@ -5722,7 +5910,7 @@ version = "0.1.0-alpha.4" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -6109,17 +6297,26 @@ dependencies = [ [[package]] name = "ruint" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e1574d439643c8962edf612a888e7cc5581bcdf36cb64e6bc88466b03b2daa" +checksum = "95294d6e3a6192f3aabf91c38f56505a625aa495533442744185a36d75a790c4" dependencies = [ + "alloy-rlp", "arbitrary", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "parity-scale-codec", "primitive-types", "proptest", + "rand 0.8.5", "rlp", "ruint-macro", "serde", - "thiserror", + "valuable", + "zeroize", ] [[package]] @@ -6146,13 +6343,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.18", ] [[package]] @@ -6162,7 +6368,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ "bitflags 1.3.2", - "errno 0.3.1", + "errno 0.3.2", "io-lifetimes", "libc", "linux-raw-sys 0.1.4", @@ -6171,22 +6377,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "172891ebdceb05aa0005f533a6cbfca599ddd7d966f6f5d4d9b2e70478e70399" dependencies = [ "bitflags 2.3.3", - "errno 0.3.1", + "errno 0.3.2", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.5", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.21.5" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", @@ -6402,6 +6608,15 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.18" @@ -6411,6 +6626,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -6425,9 +6649,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.178" +version = "1.0.181" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60363bdd39a7be0266a520dab25fdc9241d2f987b08a01e01f0ec6d06a981348" +checksum = "6d3e73c93c3240c0bda063c239298e633114c69a888c3e37ca8bb33f343e9890" dependencies = [ "serde_derive", ] @@ -6445,13 +6669,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.178" +version = "1.0.181" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28482318d6641454cb273da158647922d1be6b5a2fcc6165cd89ebdd7ed576b" +checksum = "be02f6cb0cd3a5ec20bbcfbcbd749f57daddb1a0882dc2e46a6c236c90b977ed" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -6511,7 +6735,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -6561,7 +6785,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -6915,7 +7139,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "rustversion", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -7003,9 +7227,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.27" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -7032,7 +7256,7 @@ checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", "unicode-xid 0.2.4", ] @@ -7051,7 +7275,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.4", + "rustix 0.38.7", "windows-sys 0.48.0", ] @@ -7108,7 +7332,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "subprocess", - "syn 2.0.27", + "syn 2.0.28", "test-fuzz-internal", "toolchain_find", ] @@ -7150,7 +7374,7 @@ checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -7165,10 +7389,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" dependencies = [ + "deranged", "itoa", "libc", "num_threads", @@ -7185,9 +7410,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -7264,7 +7489,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -7369,7 +7594,7 @@ dependencies = [ "home", "once_cell", "regex", - "semver", + "semver 1.0.18", "walkdir", ] @@ -7468,7 +7693,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] @@ -7692,6 +7917,12 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -7924,7 +8155,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", "wasm-bindgen-shared", ] @@ -7958,7 +8189,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8174,9 +8405,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11" +checksum = "acaaa1190073b2b101e15083c38ee8ec891b5e05cbee516521e94ec008f61e64" dependencies = [ "memchr", ] @@ -8223,7 +8454,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.0", "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", @@ -8323,7 +8554,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", - "syn 2.0.27", + "syn 2.0.28", ] [[package]] From aaf2d2cf194cc5b2c6e8ec106001ee44db9512c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 6 Aug 2023 14:14:26 +0200 Subject: [PATCH 359/722] chore: add with ext function (#4087) --- bin/reth/src/node/mod.rs | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 397fce91f142..65657b71ff68 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -159,6 +159,40 @@ pub struct NodeCommand { } impl NodeCommand { + /// Replaces the extension of the node command + pub fn with_ext(self, ext: E::Node) -> NodeCommand { + let Self { + datadir, + config, + chain, + metrics, + network, + rpc, + txpool, + builder, + debug, + db, + dev, + pruning, + .. + } = self; + NodeCommand { + datadir, + config, + chain, + metrics, + network, + rpc, + txpool, + builder, + debug, + db, + dev, + pruning, + ext, + } + } + /// Execute `node` command pub async fn execute(mut self, ctx: CliContext) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); From 9569debbb5f5b2ed9b2bda93ea008f0385dcb516 Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Mon, 7 Aug 2023 08:52:27 -0700 Subject: [PATCH 360/722] feat: make base fee computation parameters configurable via chain spec (#3992) --- bin/reth/src/args/rpc_server_args.rs | 3 +- bin/reth/src/init.rs | 1 + crates/consensus/auto-seal/src/lib.rs | 15 ++++-- crates/consensus/auto-seal/src/task.rs | 5 +- crates/consensus/common/src/validation.rs | 6 ++- crates/payload/builder/src/payload.rs | 4 +- crates/primitives/src/basefee.rs | 23 +++++---- crates/primitives/src/chain/mod.rs | 2 +- crates/primitives/src/chain/spec.rs | 48 ++++++++++++++++++- crates/primitives/src/constants/mod.rs | 16 ++++--- crates/primitives/src/hardfork.rs | 4 +- crates/primitives/src/header.rs | 12 +++-- crates/primitives/src/lib.rs | 4 +- crates/rpc/rpc-builder/src/auth.rs | 11 +++-- crates/rpc/rpc/src/eth/api/block.rs | 5 +- crates/rpc/rpc/src/eth/api/call.rs | 7 ++- crates/rpc/rpc/src/eth/api/fees.rs | 7 ++- crates/rpc/rpc/src/eth/api/mod.rs | 18 ++++--- crates/rpc/rpc/src/eth/api/server.rs | 14 ++++-- crates/rpc/rpc/src/eth/api/state.rs | 6 ++- crates/rpc/rpc/src/eth/api/transactions.rs | 13 +++-- .../storage/provider/src/test_utils/mock.rs | 33 ++++++++++--- crates/transaction-pool/src/lib.rs | 8 ++-- crates/transaction-pool/src/maintain.rs | 22 ++++++--- 24 files changed, 210 insertions(+), 77 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 67bd39cac552..0d2cd9bd6620 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -339,9 +339,10 @@ impl RpcServerArgs { ) -> Result where Provider: BlockReaderIdExt + + ChainSpecProvider + + EvmEnvProvider + HeaderProvider + StateProviderFactory - + EvmEnvProvider + Clone + Unpin + 'static, diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 693940a93a03..5296c249762c 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -273,6 +273,7 @@ mod tests { genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, + ..Default::default() }); let db = create_test_rw_db(); diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index abd0fada2c55..7b14f544a483 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -246,10 +246,16 @@ impl StorageInner { /// Fills in pre-execution header fields based on the current best block and given /// transactions. - pub(crate) fn build_header_template(&self, transactions: &Vec) -> Header { + pub(crate) fn build_header_template( + &self, + transactions: &Vec, + chain_spec: Arc, + ) -> Header { // check previous block for base fee - let base_fee_per_gas = - self.headers.get(&self.best_block).and_then(|parent| parent.next_block_base_fee()); + let base_fee_per_gas = self + .headers + .get(&self.best_block) + .and_then(|parent| parent.next_block_base_fee(chain_spec.base_fee_params)); let mut header = Header { parent_hash: self.best_hash, @@ -337,8 +343,9 @@ impl StorageInner { &mut self, transactions: Vec, executor: &mut Executor, + chain_spec: Arc, ) -> Result<(SealedHeader, PostState), BlockExecutionError> { - let header = self.build_header_template(&transactions); + let header = self.build_header_template(&transactions, chain_spec); let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 6d77784b1add..ddb450acea62 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -129,9 +129,10 @@ where // execute the new block let substate = SubState::new(State::new(client.latest().unwrap())); - let mut executor = Executor::new(chain_spec, substate); + let mut executor = Executor::new(Arc::clone(&chain_spec), substate); - match storage.build_and_execute(transactions.clone(), &mut executor) { + match storage.build_and_execute(transactions.clone(), &mut executor, chain_spec) + { Ok((new_header, post_state)) => { // clear all transactions from pool pool.remove_transactions(transactions.iter().map(|tx| tx.hash())); diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index fd7c6140d16c..baf7d5edc104 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -271,7 +271,7 @@ pub fn validate_header_regarding_parent( // By consensus, gas_limit is multiplied by elasticity (*2) on // on exact block that hardfork happens. if chain_spec.fork(Hardfork::London).transitions_at_block(child.number) { - parent_gas_limit = parent.gas_limit * constants::EIP1559_ELASTICITY_MULTIPLIER; + parent_gas_limit = parent.gas_limit * chain_spec.base_fee_params.elasticity_multiplier; } // Check gas limit, max diff between child/parent gas_limit should be max_diff=parent_gas/1024 @@ -298,7 +298,9 @@ pub fn validate_header_regarding_parent( constants::EIP1559_INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have them. - parent.next_block_base_fee().ok_or(ConsensusError::BaseFeeMissing)? + parent + .next_block_base_fee(chain_spec.base_fee_params) + .ok_or(ConsensusError::BaseFeeMissing)? }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff { expected: expected_base_fee, got: base_fee }) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 83cfe75ec1ed..c930585a457d 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -134,7 +134,9 @@ impl PayloadBuilderAttributes { prevrandao: Some(self.prev_randao), gas_limit: U256::from(parent.gas_limit), // calculate basefee based on parent block's gas usage - basefee: U256::from(parent.next_block_base_fee().unwrap_or_default()), + basefee: U256::from( + parent.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(), + ), }; (cfg, block_env) diff --git a/crates/primitives/src/basefee.rs b/crates/primitives/src/basefee.rs index be60c3dcc4a2..3614c850aa17 100644 --- a/crates/primitives/src/basefee.rs +++ b/crates/primitives/src/basefee.rs @@ -1,11 +1,13 @@ //! Helpers for working with EIP-1559 base fee -use crate::constants; - /// Calculate base fee for next block. [EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md) spec -pub fn calculate_next_block_base_fee(gas_used: u64, gas_limit: u64, base_fee: u64) -> u64 { - let gas_target = gas_limit / constants::EIP1559_ELASTICITY_MULTIPLIER; - +pub fn calculate_next_block_base_fee( + gas_used: u64, + gas_limit: u64, + base_fee: u64, + base_fee_params: crate::BaseFeeParams, +) -> u64 { + let gas_target = gas_limit / base_fee_params.elasticity_multiplier; if gas_used == gas_target { return base_fee } @@ -15,14 +17,14 @@ pub fn calculate_next_block_base_fee(gas_used: u64, gas_limit: u64, base_fee: u6 1, base_fee as u128 * gas_used_delta as u128 / gas_target as u128 / - constants::EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR as u128, + base_fee_params.max_change_denominator as u128, ); base_fee + (base_fee_delta as u64) } else { let gas_used_delta = gas_target - gas_used; let base_fee_per_gas_delta = base_fee as u128 * gas_used_delta as u128 / gas_target as u128 / - constants::EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR as u128; + base_fee_params.max_change_denominator as u128; base_fee.saturating_sub(base_fee_per_gas_delta as u64) } @@ -54,7 +56,12 @@ mod tests { for i in 0..base_fee.len() { assert_eq!( next_base_fee[i], - calculate_next_block_base_fee(gas_used[i], gas_limit[i], base_fee[i]) + calculate_next_block_base_fee( + gas_used[i], + gas_limit[i], + base_fee[i], + crate::BaseFeeParams::ethereum(), + ) ); } } diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index b46519f32566..69afd1fe03fc 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -10,7 +10,7 @@ use std::{fmt, str::FromStr}; // The chain spec module. mod spec; pub use spec::{ - AllGenesisFormats, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkCondition, + AllGenesisFormats, BaseFeeParams, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkCondition, ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, }; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index ca514a628808..f82d93052c22 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -1,5 +1,8 @@ use crate::{ - constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS}, + constants::{ + EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, + EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, + }, forkid::ForkFilterKey, header::Head, proofs::genesis_state_root, @@ -60,6 +63,7 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 11052984, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + ..Default::default() } .into() }); @@ -100,6 +104,7 @@ pub static GOERLI: Lazy> = Lazy::new(|| { 4367322, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + ..Default::default() } .into() }); @@ -144,6 +149,7 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { 1273020, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + ..Default::default() } .into() }); @@ -182,10 +188,30 @@ pub static DEV: Lazy> = Lazy::new(|| { (Hardfork::Shanghai, ForkCondition::Timestamp(0)), ]), deposit_contract: None, // TODO: do we even have? + ..Default::default() } .into() }); +/// BaseFeeParams contains the config parameters that control block base fee computation +#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)] +pub struct BaseFeeParams { + /// The base_fee_max_change_denominator from EIP-1559 + pub max_change_denominator: u64, + /// The elasticity multiplier from EIP-1559 + pub elasticity_multiplier: u64, +} + +impl BaseFeeParams { + /// Get the base fee parameters for ethereum mainnet + pub const fn ethereum() -> BaseFeeParams { + BaseFeeParams { + max_change_denominator: EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, + elasticity_multiplier: EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, + } + } +} + /// An Ethereum chain specification. /// /// A chain specification describes: @@ -224,6 +250,24 @@ pub struct ChainSpec { /// The deposit contract deployed for PoS. #[serde(skip, default)] pub deposit_contract: Option, + + /// The parameters that configure how a block's base fee is computed + pub base_fee_params: BaseFeeParams, +} + +impl Default for ChainSpec { + fn default() -> ChainSpec { + ChainSpec { + chain: Default::default(), + genesis_hash: Default::default(), + genesis: Default::default(), + paris_block_and_final_difficulty: Default::default(), + fork_timestamps: Default::default(), + hardforks: Default::default(), + deposit_contract: Default::default(), + base_fee_params: BaseFeeParams::ethereum(), + } + } } impl ChainSpec { @@ -457,6 +501,7 @@ impl From for ChainSpec { hardforks, paris_block_and_final_difficulty: None, deposit_contract: None, + ..Default::default() } } } @@ -680,6 +725,7 @@ impl ChainSpecBuilder { hardforks: self.hardforks, paris_block_and_final_difficulty: None, deposit_contract: None, + ..Default::default() } } } diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 96c55cbbe11b..f3f09c4f3107 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -37,11 +37,15 @@ pub const BEACON_NONCE: u64 = 0u64; /// See . pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; -/// The minimal value the basefee can decrease to. +/// The minimum tx fee below which the txpool will reject the transaction. /// -/// The `BASE_FEE_MAX_CHANGE_DENOMINATOR` is `8`, or 12.5%. -/// Once the base fee has dropped to `7` WEI it cannot decrease further because 12.5% of 7 is less -/// than 1. +/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 +/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` +/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because +/// 12.5% of 7 is less than 1. +/// +/// Note that min base fee under different 1559 parameterizations may differ, but there's no +/// signifant harm in leaving this setting as is. pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; /// Same as [MIN_PROTOCOL_BASE_FEE] but as a U256. @@ -51,10 +55,10 @@ pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; /// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; +pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; /// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_ELASTICITY_MULTIPLIER: u64 = 2; +pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; /// Multiplier for converting gwei to wei. pub const GWEI_TO_WEI: u64 = 1_000_000_000; diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs index f82310d68de9..c30d6a865e6a 100644 --- a/crates/primitives/src/hardfork.rs +++ b/crates/primitives/src/hardfork.rs @@ -162,9 +162,9 @@ mod tests { genesis: Genesis::default(), genesis_hash: None, hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]), - fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, deposit_contract: None, + ..Default::default() }; assert_eq!(Hardfork::Frontier.fork_id(&spec), None); @@ -177,9 +177,9 @@ mod tests { genesis: Genesis::default(), genesis_hash: None, hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]), - fork_timestamps: Default::default(), paris_block_and_final_difficulty: None, deposit_contract: None, + ..Default::default() }; assert_eq!(Hardfork::Shanghai.fork_filter(&spec), None); diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index e39c6a8287ad..8aeab18b84f8 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -3,7 +3,8 @@ use crate::{ blobfee::calculate_excess_blob_gas, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, - BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, H64, U256, + BaseFeeParams, BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, + H64, U256, }; use bytes::{Buf, BufMut, BytesMut}; @@ -176,8 +177,13 @@ impl Header { /// Calculate base fee for next block according to the EIP-1559 spec. /// /// Returns a `None` if no base fee is set, no EIP-1559 support - pub fn next_block_base_fee(&self) -> Option { - Some(calculate_next_block_base_fee(self.gas_used, self.gas_limit, self.base_fee_per_gas?)) + pub fn next_block_base_fee(&self, base_fee_params: BaseFeeParams) -> Option { + Some(calculate_next_block_base_fee( + self.gas_used, + self.gas_limit, + self.base_fee_per_gas?, + base_fee_params, + )) } /// Calculate excess blob gas for the next block according to the EIP-4844 spec. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 249073804f1f..82c0929e150b 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -60,8 +60,8 @@ pub use block::{ }; pub use bloom::Bloom; pub use chain::{ - AllGenesisFormats, Chain, ChainInfo, ChainSpec, ChainSpecBuilder, DisplayHardforks, - ForkCondition, ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, + AllGenesisFormats, BaseFeeParams, Chain, ChainInfo, ChainSpec, ChainSpecBuilder, + DisplayHardforks, ForkCondition, ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, }; pub use compression::*; pub use constants::{ diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index f30b29ee35a2..d0450ab4b42f 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -12,7 +12,8 @@ use jsonrpsee::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_provider::{ - BlockReaderIdExt, EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProviderFactory, + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, + StateProviderFactory, }; use reth_rpc::{ eth::{cache::EthStateCache, gas_oracle::GasPriceOracle}, @@ -40,10 +41,11 @@ pub async fn launch( ) -> Result where Provider: BlockReaderIdExt - + ReceiptProviderIdExt + + ChainSpecProvider + + EvmEnvProvider + HeaderProvider + + ReceiptProviderIdExt + StateProviderFactory - + EvmEnvProvider + Clone + Unpin + 'static, @@ -86,9 +88,10 @@ pub async fn launch_with_eth_api( ) -> Result where Provider: BlockReaderIdExt + + ChainSpecProvider + + EvmEnvProvider + HeaderProvider + StateProviderFactory - + EvmEnvProvider + Clone + Unpin + 'static, diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 1cb547e31535..ab297add7114 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -9,13 +9,14 @@ use crate::{ }; use reth_network_api::NetworkInfo; use reth_primitives::{BlockId, BlockNumberOrTag, TransactionMeta}; -use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::{Block, Index, RichBlock, TransactionReceipt}; use reth_transaction_pool::TransactionPool; impl EthApi where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Send + Sync + 'static, { diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index d7c0a065325c..e47555429ca8 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,7 +14,9 @@ use crate::{ use ethers_core::utils::get_contract_address; use reth_network_api::NetworkInfo; use reth_primitives::{AccessList, BlockId, BlockNumberOrTag, Bytes, U256}; -use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProvider, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, +}; use reth_revm::{ access_list::AccessListInspector, database::{State, SubState}, @@ -38,7 +40,8 @@ const MIN_CREATE_GAS: u64 = 53_000u64; impl EthApi where Pool: TransactionPool + Clone + 'static, - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + Send + Sync + 'static, { /// Estimate gas needed for execution of the `request` at the [BlockId]. diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index 49b0291a1195..5d8cbc4d1ff8 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -8,7 +8,7 @@ use reth_network_api::NetworkInfo; use reth_primitives::{ basefee::calculate_next_block_base_fee, BlockNumberOrTag, SealedHeader, U256, }; -use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::{FeeHistory, TxGasAndReward}; use reth_transaction_pool::TransactionPool; use tracing::debug; @@ -16,7 +16,8 @@ use tracing::debug; impl EthApi where Pool: TransactionPool + Clone + 'static, - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + Send + Sync + 'static, { /// Returns a suggestion for a gas price for legacy transactions. @@ -115,10 +116,12 @@ where // // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().unwrap(); + let chain_spec = self.provider().chain_spec(); base_fee_per_gas.push(U256::from(calculate_next_block_base_fee( last_header.gas_used, last_header.gas_limit, last_header.base_fee_per_gas.unwrap_or_default(), + chain_spec.base_fee_params, ))); Ok(FeeHistory { diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 20ecf114e2a9..d4f30a5ad665 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -16,7 +16,9 @@ use reth_network_api::NetworkInfo; use reth_primitives::{ Address, BlockId, BlockNumberOrTag, ChainInfo, SealedBlock, H256, U256, U64, }; -use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderBox, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, +}; use reth_rpc_types::{SyncInfo, SyncStatus}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::TransactionPool; @@ -79,7 +81,7 @@ pub struct EthApi { impl EthApi where - Provider: BlockReaderIdExt, + Provider: BlockReaderIdExt + ChainSpecProvider, { /// Creates a new, shareable instance using the default tokio task spawner. pub fn new( @@ -194,7 +196,8 @@ where impl EthApi where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, { /// Returns the state at the given [BlockId] enum. pub fn state_at_block_id(&self, at: BlockId) -> EthResult> { @@ -228,7 +231,8 @@ where impl EthApi where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Pool: TransactionPool + Clone + 'static, Network: NetworkInfo + Send + Sync + 'static, { @@ -249,7 +253,8 @@ where // assumed child block is in the next slot latest.timestamp += 12; // base fee of the child block - latest.base_fee_per_gas = latest.next_block_base_fee(); + let chain_spec = self.provider().chain_spec(); + latest.base_fee_per_gas = latest.next_block_base_fee(chain_spec.base_fee_params); PendingBlockEnvOrigin::DerivedFromLatest(latest) }; @@ -327,7 +332,8 @@ impl Clone for EthApi { impl EthApiSpec for EthApi where Pool: TransactionPool + Clone + 'static, - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + 'static, { /// Returns the current ethereum protocol version. diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 32c985cf9831..1df566086d71 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -16,8 +16,8 @@ use reth_primitives::{ AccessListWithGasUsed, Address, BlockId, BlockNumberOrTag, Bytes, H256, H64, U256, U64, }; use reth_provider::{ - BlockIdReader, BlockReader, BlockReaderIdExt, EvmEnvProvider, HeaderProvider, - StateProviderFactory, + BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, + HeaderProvider, StateProviderFactory, }; use reth_rpc_api::EthApiServer; use reth_rpc_types::{ @@ -37,6 +37,7 @@ where Provider: BlockReader + BlockIdReader + BlockReaderIdExt + + ChainSpecProvider + HeaderProvider + StateProviderFactory + EvmEnvProvider @@ -410,12 +411,13 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; use reth_primitives::{ - basefee::calculate_next_block_base_fee, constants::ETHEREUM_BLOCK_GAS_LIMIT, Block, - BlockNumberOrTag, Header, TransactionSigned, H256, U256, + basefee::calculate_next_block_base_fee, + constants::{self, ETHEREUM_BLOCK_GAS_LIMIT}, + BaseFeeParams, Block, BlockNumberOrTag, Header, TransactionSigned, H256, U256, }; use reth_provider::{ test_utils::{MockEthProvider, NoopProvider}, - BlockReader, BlockReaderIdExt, EvmEnvProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, }; use reth_rpc_api::EthApiServer; use reth_rpc_types::FeeHistory; @@ -424,6 +426,7 @@ mod tests { fn build_test_eth_api< P: BlockReaderIdExt + BlockReader + + ChainSpecProvider + EvmEnvProvider + StateProviderFactory + Unpin @@ -538,6 +541,7 @@ mod tests { last_header.gas_used, last_header.gas_limit, last_header.base_fee_per_gas.unwrap_or_default(), + BaseFeeParams::ethereum(), ))); let eth_api = build_test_eth_api(mock_provider); diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 2887ac58fb8f..4d4b29060e2a 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -9,14 +9,16 @@ use reth_primitives::{ U256, }; use reth_provider::{ - AccountReader, BlockReaderIdExt, EvmEnvProvider, StateProvider, StateProviderFactory, + AccountReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, + StateProviderFactory, }; use reth_rpc_types::{EIP1186AccountProofResponse, StorageProof}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; impl EthApi where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Pool: TransactionPool + Clone + 'static, Network: Send + Sync + 'static, { diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 6d869daa6048..6784b62ec0c8 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -19,7 +19,9 @@ use reth_primitives::{ TransactionKind::{Call, Create}, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, H256, U128, U256, U64, }; -use reth_provider::{BlockReaderIdExt, EvmEnvProvider, StateProviderBox, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, +}; use reth_revm::{ database::{State, SubState}, env::{fill_block_env_with_coinbase, tx_env_with_recovered}, @@ -236,7 +238,8 @@ pub trait EthTransactions: Send + Sync { impl EthTransactions for EthApi where Pool: TransactionPool + Clone + 'static, - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + Send + Sync + 'static, { fn call_gas_limit(&self) -> u64 { @@ -668,7 +671,8 @@ where impl EthApi where - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: 'static, { /// Helper function for `eth_getTransactionReceipt` @@ -692,7 +696,8 @@ where impl EthApi where Pool: TransactionPool + 'static, - Provider: BlockReaderIdExt + StateProviderFactory + EvmEnvProvider + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, Network: NetworkInfo + Send + Sync + 'static, { pub(crate) fn sign_request( diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d679a7ed5446..ccb39f6c97b9 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,18 +1,18 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - EvmEnvProvider, HeaderProvider, PostState, PostStateDataProvider, ReceiptProviderIdExt, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, - WithdrawalsProvider, + ChainSpecProvider, EvmEnvProvider, HeaderProvider, PostState, PostStateDataProvider, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, + TransactionsProvider, WithdrawalsProvider, }; use parking_lot::Mutex; use reth_db::models::StoredBlockBodyIndices; use reth_interfaces::{provider::ProviderError, Result}; use reth_primitives::{ keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, - BlockWithSenders, Bytecode, Bytes, ChainInfo, Header, Receipt, SealedBlock, SealedHeader, - StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, - TxNumber, H256, U256, + BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, SealedBlock, + SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, TxHash, TxNumber, H256, U256, }; use reth_revm_primitives::primitives::{BlockEnv, CfgEnv}; use std::{ @@ -22,7 +22,7 @@ use std::{ }; /// A mock implementation for Provider interfaces. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct MockEthProvider { /// Local block store pub blocks: Arc>>, @@ -30,6 +30,19 @@ pub struct MockEthProvider { pub headers: Arc>>, /// Local account store pub accounts: Arc>>, + /// Local chain spec + pub chain_spec: Arc, +} + +impl Default for MockEthProvider { + fn default() -> MockEthProvider { + MockEthProvider { + blocks: Default::default(), + headers: Default::default(), + accounts: Default::default(), + chain_spec: Arc::new(reth_primitives::ChainSpecBuilder::mainnet().build()), + } + } } /// An extended account for local store @@ -160,6 +173,12 @@ impl HeaderProvider for MockEthProvider { } } +impl ChainSpecProvider for MockEthProvider { + fn chain_spec(&self) -> Arc { + self.chain_spec.clone() + } +} + impl TransactionsProvider for MockEthProvider { fn transaction_id(&self, _tx_hash: TxHash) -> Result> { todo!() diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 63e1936a76f7..ccdf752e0dc9 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -92,10 +92,10 @@ //! //! ``` //! use reth_primitives::MAINNET; -//! use reth_provider::StateProviderFactory; +//! use reth_provider::{ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{EthTransactionValidator, Pool, TransactionPool}; -//! async fn t(client: C) where C: StateProviderFactory + Clone + 'static{ +//! async fn t(client: C) where C: StateProviderFactory + ChainSpecProvider + Clone + 'static{ //! let pool = Pool::eth_pool( //! EthTransactionValidator::new(client, MAINNET.clone(), TokioTaskExecutor::default()), //! Default::default(), @@ -117,12 +117,12 @@ //! ``` //! use futures_util::Stream; //! use reth_primitives::MAINNET; -//! use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; +//! use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{EthTransactionValidator, Pool}; //! use reth_transaction_pool::maintain::maintain_transaction_pool_future; //! async fn t(client: C, stream: St) -//! where C: StateProviderFactory + BlockReaderIdExt + Clone + 'static, +//! where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, //! St: Stream + Send + Unpin + 'static, //! { //! let pool = Pool::eth_pool( diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 22cc62ddaab3..1aa446345efd 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -10,7 +10,9 @@ use futures_util::{ FutureExt, Stream, StreamExt, }; use reth_primitives::{Address, BlockHash, BlockNumberOrTag, FromRecoveredTransaction}; -use reth_provider::{BlockReaderIdExt, CanonStateNotification, PostState, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, PostState, StateProviderFactory, +}; use reth_tasks::TaskSpawner; use std::{ borrow::Borrow, @@ -49,7 +51,7 @@ pub fn maintain_transaction_pool_future( config: MaintainPoolConfig, ) -> BoxFuture<'static, ()> where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Send + 'static, + Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -70,7 +72,7 @@ pub async fn maintain_transaction_pool( task_spawner: Tasks, config: MaintainPoolConfig, ) where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Send + 'static, + Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, P: TransactionPoolExt + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -80,10 +82,13 @@ pub async fn maintain_transaction_pool( // ensure the pool points to latest state if let Ok(Some(latest)) = client.block_by_number_or_tag(BlockNumberOrTag::Latest) { let latest = latest.seal_slow(); + let chain_spec = client.chain_spec(); let info = BlockInfo { last_seen_block_hash: latest.hash, last_seen_block_number: latest.number, - pending_basefee: latest.next_block_base_fee().unwrap_or_default(), + pending_basefee: latest + .next_block_base_fee(chain_spec.base_fee_params) + .unwrap_or_default(), }; pool.set_block_info(info); } @@ -204,8 +209,11 @@ pub async fn maintain_transaction_pool( maintained_state = MaintainedPoolState::Drifted; } + let chain_spec = client.chain_spec(); + // base fee for the next block: `new_tip+1` - let pending_block_base_fee = new_tip.next_block_base_fee().unwrap_or_default(); + let pending_block_base_fee = + new_tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); // we know all changed account in the new chain let new_changed_accounts: HashSet<_> = @@ -279,9 +287,11 @@ pub async fn maintain_transaction_pool( CanonStateNotification::Commit { new } => { let (blocks, state) = new.inner(); let tip = blocks.tip(); + let chain_spec = client.chain_spec(); // base fee for the next block: `tip+1` - let pending_block_base_fee = tip.next_block_base_fee().unwrap_or_default(); + let pending_block_base_fee = + tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); let first_block = blocks.first(); trace!( From 269b878f5cdfd8e6444675fd1b25a8ceae02ecbb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 7 Aug 2023 19:07:55 +0200 Subject: [PATCH 361/722] perf: no longer spawn filter tasks (#4096) --- crates/rpc/rpc/src/eth/filter.rs | 36 ++++++-------------------------- 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 280bc0817f1a..a7c5ce53f36a 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -15,11 +15,8 @@ use reth_rpc_api::EthFilterApiServer; use reth_rpc_types::{Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log}; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; -use std::{ - collections::HashMap, future::Future, iter::StepBy, ops::RangeInclusive, sync::Arc, - time::Instant, -}; -use tokio::sync::{oneshot, Mutex}; +use std::{collections::HashMap, iter::StepBy, ops::RangeInclusive, sync::Arc, time::Instant}; +use tokio::sync::Mutex; use tracing::trace; /// The maximum number of headers we read at once when handling a range filter. @@ -69,26 +66,6 @@ where Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, Pool: TransactionPool + 'static, { - /// Executes the given filter on a new task. - /// - /// All the filter handles are implemented asynchronously. However, filtering is still a bit CPU - /// intensive. - async fn spawn_filter_task(&self, c: C) -> Result - where - C: FnOnce(Self) -> F, - F: Future> + Send + 'static, - R: Send + 'static, - { - let (tx, rx) = oneshot::channel(); - let this = self.clone(); - let f = c(this); - self.inner.task_spawner.spawn(Box::pin(async move { - let res = f.await; - let _ = tx.send(res); - })); - rx.await.map_err(|_| FilterError::InternalError)? - } - /// Returns all the filter changes for the given id, if any pub async fn filter_changes(&self, id: FilterId) -> Result { let info = self.inner.provider.chain_info()?; @@ -202,7 +179,7 @@ where /// Handler for `eth_getFilterChanges` async fn filter_changes(&self, id: FilterId) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_getFilterChanges"); - Ok(self.spawn_filter_task(|this| async move { this.filter_changes(id).await }).await?) + Ok(EthFilter::filter_changes(self, id).await?) } /// Returns an array of all logs matching filter with given id. @@ -212,7 +189,7 @@ where /// Handler for `eth_getFilterLogs` async fn filter_logs(&self, id: FilterId) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getFilterLogs"); - Ok(self.spawn_filter_task(|this| async move { this.filter_logs(id).await }).await?) + Ok(EthFilter::filter_logs(self, id).await?) } /// Handler for `eth_uninstallFilter` @@ -232,9 +209,7 @@ where /// Handler for `eth_getLogs` async fn logs(&self, filter: Filter) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getLogs"); - Ok(self - .spawn_filter_task(|this| async move { this.inner.logs_for_filter(filter).await }) - .await?) + Ok(EthFilter::logs(self, filter).await?) } } @@ -269,6 +244,7 @@ struct EthFilterInner { /// maximum number of headers to read at once for range filter max_headers_range: u64, /// The type that can spawn tasks. + #[allow(unused)] task_spawner: Box, } From 64c8dd259c79690bb20b00381541a4b83d20e30a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 7 Aug 2023 19:21:52 +0200 Subject: [PATCH 362/722] fix: record push stack as vec u256 (#4077) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 2 +- crates/revm/revm-inspectors/src/tracing/mod.rs | 5 +++-- crates/revm/revm-inspectors/src/tracing/types.rs | 4 ++-- crates/rpc/rpc-types/src/eth/trace/parity.rs | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index b70173a8dbcb..9ecddcfd45ee 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -353,7 +353,7 @@ impl ParityTraceBuilder { let maybe_execution = Some(VmExecutedOperation { used: step.gas_remaining, - push: step.new_stack.into_iter().map(|new_stack| new_stack.into()).collect(), + push: step.push_stack.clone().unwrap_or_default(), mem: maybe_memory, store: maybe_storage, }); diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index be6af4b53e6c..48f772ebde81 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -275,7 +275,7 @@ impl TracingInspector { op, contract: interp.contract.address, stack, - new_stack: None, + push_stack: None, memory, memory_size: interp.memory.len(), gas_remaining: self.gas_inspector.gas_remaining(), @@ -302,7 +302,8 @@ impl TracingInspector { let step = &mut self.traces.arena[trace_idx].trace.steps[step_idx]; if interp.stack.len() > step.stack.len() { - step.new_stack = interp.stack.data().last().copied(); + // if the stack grew, we need to record the new values + step.push_stack = Some(interp.stack.data()[step.stack.len()..].to_vec()); } if self.config.record_memory_snapshots { diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 0aee8430533b..51fb1d21cb46 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -538,8 +538,8 @@ pub(crate) struct CallTraceStep { pub(crate) contract: Address, /// Stack before step execution pub(crate) stack: Stack, - /// The new stack item placed by this step if any - pub(crate) new_stack: Option, + /// The new stack items placed by this step if any + pub(crate) push_stack: Option>, /// All allocated memory in a step /// /// This will be empty if memory capture is disabled diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 60de0b5d15ee..aa2732732fc3 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -314,7 +314,7 @@ pub struct VmExecutedOperation { /// The total gas used. pub used: u64, /// The stack item placed, if any. - pub push: Vec, + pub push: Vec, /// If altered, the memory delta. pub mem: Option, /// The altered storage value, if any. From 310179a39a1e2a577b9cd6e12811d2260b351228 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:49:18 -0400 Subject: [PATCH 363/722] feat: re-export `c-kzg` types and impl rlp traits (#4084) --- Cargo.lock | 49 ++++++++++++++++++++++++- Cargo.toml | 2 + crates/primitives/Cargo.toml | 5 ++- crates/primitives/src/lib.rs | 5 +++ crates/rlp/Cargo.toml | 5 +++ crates/rlp/src/encode.rs | 71 ++++++++++++++++++++++++++++++++++++ deny.toml | 7 ++++ 7 files changed, 142 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d19b7c5a5536..5fbbc8bb504a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -601,6 +601,27 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "bindgen" +version = "0.64.0" +source = "git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66#0de11f0a521611ac8738b7b01d19dddaf3899e66" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2 1.0.66", + "quote 1.0.32", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.27", + "which", +] + [[package]] name = "bindgen" version = "0.65.1" @@ -909,6 +930,19 @@ dependencies = [ "serde", ] +[[package]] +name = "c-kzg" +version = "0.1.0" +source = "git+https://github.com/ethereum/c-kzg-4844#13cec820c08f45318f82ed4e0da0300042758b92" +dependencies = [ + "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "camino" version = "1.1.6" @@ -3703,7 +3737,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b18cbf29f8ff3542ba22bdce9ac610fcb75d74bb4e2b306b2a2762242025b4f" dependencies = [ - "bindgen 0.64.0", + "bindgen 0.64.0 (registry+https://github.com/rust-lang/crates.io-index)", "errno 0.2.8", "libc", ] @@ -5767,6 +5801,7 @@ dependencies = [ "arbitrary", "assert_matches", "bytes", + "c-kzg", "crc", "criterion", "crunchy", @@ -5893,6 +5928,7 @@ dependencies = [ "arrayvec", "auto_impl", "bytes", + "c-kzg", "criterion", "ethereum-types", "ethnum", @@ -8219,6 +8255,17 @@ dependencies = [ "rustls-webpki", ] +[[package]] +name = "which" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +dependencies = [ + "either", + "libc", + "once_cell", +] + [[package]] name = "widestring" version = "1.0.2" diff --git a/Cargo.toml b/Cargo.toml index 602c36e327f4..4d083e629f1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -143,6 +143,8 @@ jsonrpsee-types = { version = "0.19" } ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } +# for eip-4844 +c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } ### misc-testing proptest = "1.0" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 0931d4fe4376..e73b59d88590 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -22,7 +22,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" ruint = { version = "1.9.0", features = ["primitive-types", "rlp"] } # Bloom - fixed-hash = { version = "0.8", default-features = false, features = ["rustc-hex"] } +fixed-hash = { version = "0.8", default-features = false, features = ["rustc-hex"] } # crypto secp256k1 = { workspace = true, default-features = false, features = [ @@ -31,6 +31,9 @@ secp256k1 = { workspace = true, default-features = false, features = [ "recovery", ] } +# for eip-4844 +c-kzg = { workspace = true } + # used for forkid crc = "3" diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 82c0929e150b..02946310f13a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -142,6 +142,11 @@ pub mod utils { pub use ethers_core::types::serde_helpers; } +/// EIP-4844 + KZG helpers +pub mod kzg { + pub use c_kzg::*; +} + /// Helpers for working with serde pub mod serde_helper; diff --git a/crates/rlp/Cargo.toml b/crates/rlp/Cargo.toml index 653543b51b94..98c25c7e31b6 100644 --- a/crates/rlp/Cargo.toml +++ b/crates/rlp/Cargo.toml @@ -18,6 +18,9 @@ ethereum-types = { version = "0.14", features = ["codec"], optional = true } revm-primitives = { workspace = true, features = ["serde"] } reth-rlp-derive = { path = "./rlp-derive", optional = true } +# for eip-4844 +c-kzg = { workspace = true, optional = true } + [dev-dependencies] reth-rlp = { workspace = true, features = [ "derive", @@ -31,9 +34,11 @@ criterion = "0.5.0" pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } [features] +default = ["kzg"] alloc = [] derive = ["reth-rlp-derive"] std = ["alloc"] +kzg = ["c-kzg"] [[bench]] name = "bench" diff --git a/crates/rlp/src/encode.rs b/crates/rlp/src/encode.rs index 759165a081a4..6d669fd667e1 100644 --- a/crates/rlp/src/encode.rs +++ b/crates/rlp/src/encode.rs @@ -413,6 +413,77 @@ pub fn encode_fixed_size, const LEN: usize>(v: &E) -> Arra out } +#[cfg(feature = "kzg")] +mod kzg_support { + extern crate c_kzg; + + use super::BufMut; + use crate::{Decodable, DecodeError, Encodable}; + use c_kzg::{Blob, Bytes48, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_COMMITMENT}; + use core::ops::Deref; + + impl Encodable for Blob { + fn encode(&self, out: &mut dyn BufMut) { + // Deref is implemented to get the underlying bytes + self.deref().encode(out); + } + + fn length(&self) -> usize { + self.deref().length() + } + } + + impl Decodable for Blob { + fn decode(buf: &mut &[u8]) -> Result { + let bytes: [u8; BYTES_PER_BLOB] = Decodable::decode(buf)?; + Ok(Blob::from(bytes)) + } + } + + impl Encodable for Bytes48 { + fn encode(&self, out: &mut dyn BufMut) { + self.deref().encode(out); + } + + fn length(&self) -> usize { + self.deref().length() + } + } + + impl Decodable for Bytes48 { + fn decode(buf: &mut &[u8]) -> Result { + let bytes: [u8; BYTES_PER_COMMITMENT] = Decodable::decode(buf)?; + Ok(Bytes48::from(bytes)) + } + } + + /// Only [Encodable] is implemented for [KzgCommitment] because this is a validated type - it + /// should be decoded using [Decodable] into a [Bytes48] type, validated, _then_ converted + /// into a [KzgCommitment]. + impl Encodable for KzgCommitment { + fn encode(&self, out: &mut dyn BufMut) { + self.deref().encode(out); + } + + fn length(&self) -> usize { + self.deref().length() + } + } + + /// Only [Encodable] is implemented for [KzgProof] because this is a validated type - it should + /// be decoded using [Decodable] into a [Bytes48] type, validated, _then_ converted into a + /// [KzgProof]. + impl Encodable for KzgProof { + fn encode(&self, out: &mut dyn BufMut) { + self.deref().encode(out); + } + + fn length(&self) -> usize { + self.deref().length() + } + } +} + #[cfg(test)] mod tests { extern crate alloc; diff --git a/deny.toml b/deny.toml index c76bbe889cc0..3c59af057ea3 100644 --- a/deny.toml +++ b/deny.toml @@ -84,6 +84,13 @@ name = "rustls-webpki" expression = "LicenseRef-rustls-webpki" license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] +[[licenses.clarify]] +name = "c-kzg" +expression = "Apache-2.0" +# The crate is in `bindings/rust` so we have to go up two directories for the +# license +license-files = [{ path = "../../LICENSE", hash = 0x13cec820 }] + # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html From c1dfa7beba808dd6edcca1f8395387722c2cd861 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 7 Aug 2023 21:10:12 +0200 Subject: [PATCH 364/722] feat(eip4844): include `TxEip4844` in the primitive `Transaction` enum (#4074) --- crates/consensus/common/src/validation.rs | 16 ++- crates/primitives/src/transaction/mod.rs | 111 +++++++++++++++++- crates/revm/revm-primitives/src/env.rs | 40 ++++++- .../rpc/rpc-types/src/eth/transaction/mod.rs | 10 ++ .../transaction-pool/src/test_utils/mock.rs | 108 +++++++++++++++-- crates/transaction-pool/src/traits.rs | 5 +- 6 files changed, 275 insertions(+), 15 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index baf7d5edc104..ee055d1ffadd 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -7,7 +7,7 @@ use reth_primitives::{ eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, }, BlockNumber, ChainSpec, Hardfork, Header, InvalidTransactionError, SealedBlock, SealedHeader, - Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxLegacy, + Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; use std::collections::{hash_map::Entry, HashMap}; @@ -98,6 +98,20 @@ pub fn validate_transaction_regarding_header( return Err(InvalidTransactionError::TipAboveFeeCap.into()) } + Some(*chain_id) + } + Transaction::Eip4844(TxEip4844 { + chain_id, + max_fee_per_gas, + max_priority_fee_per_gas, + .. + }) => { + // EIP-1559: add more constraints to the tx validation + // https://github.com/ethereum/EIPs/pull/3594 + if max_priority_fee_per_gas > max_fee_per_gas { + return Err(InvalidTransactionError::TipAboveFeeCap.into()) + } + Some(*chain_id) } }; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4f6fdc0335e8..d3f03e082066 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -65,6 +65,18 @@ pub enum Transaction { /// transaction, incentivizing miners to include transactions with higher priority fees per /// gas. Eip1559(TxEip1559), + /// Shard Blob Transactions ([EIP-4844](https://eips.ethereum.org/EIPS/eip-4844)), type `0x3`. + /// + /// Shard Blob Transactions introduce a new transaction type called a blob-carrying transaction + /// to reduce gas costs. These transactions are similar to regular Ethereum transactions but + /// include additional data called a blob. + /// + /// Blobs are larger (~125 kB) and cheaper than the current calldata, providing an immutable + /// and read-only memory for storing transaction data. + /// + /// EIP-4844, also known as proto-danksharding, implements the framework and logic of + /// danksharding, introducing new transaction formats and verification rules. + Eip4844(TxEip4844), } impl Transaction { @@ -116,6 +128,7 @@ impl Transaction { Transaction::Legacy(tx) => tx.nonce = nonce, Transaction::Eip2930(tx) => tx.nonce = nonce, Transaction::Eip1559(tx) => tx.nonce = nonce, + Transaction::Eip4844(tx) => tx.nonce = nonce, } } @@ -125,6 +138,7 @@ impl Transaction { Transaction::Legacy(tx) => tx.value = value, Transaction::Eip2930(tx) => tx.value = value, Transaction::Eip1559(tx) => tx.value = value, + Transaction::Eip4844(tx) => tx.value = value, } } @@ -134,6 +148,7 @@ impl Transaction { Transaction::Legacy(tx) => tx.input = input, Transaction::Eip2930(tx) => tx.input = input, Transaction::Eip1559(tx) => tx.input = input, + Transaction::Eip4844(tx) => tx.input = input, } } @@ -144,6 +159,7 @@ impl Transaction { Transaction::Legacy(tx) => tx.size(), Transaction::Eip2930(tx) => tx.size(), Transaction::Eip1559(tx) => tx.size(), + Transaction::Eip4844(tx) => tx.size(), } } } @@ -166,6 +182,10 @@ impl Compact for Transaction { tx.to_compact(buf); 2 } + Transaction::Eip4844(tx) => { + tx.to_compact(buf); + 3 + } } } @@ -183,6 +203,10 @@ impl Compact for Transaction { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Transaction::Eip1559(tx), buf) } + 3 => { + let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); + (Transaction::Eip4844(tx), buf) + } _ => unreachable!("Junk data in database: unknown Transaction variant"), } } @@ -205,6 +229,7 @@ impl Transaction { Transaction::Legacy(TxLegacy { chain_id, .. }) => *chain_id, Transaction::Eip2930(TxEip2930 { chain_id, .. }) => Some(*chain_id), Transaction::Eip1559(TxEip1559 { chain_id, .. }) => Some(*chain_id), + Transaction::Eip4844(TxEip4844 { chain_id, .. }) => Some(*chain_id), } } @@ -214,6 +239,7 @@ impl Transaction { Transaction::Legacy(TxLegacy { chain_id: ref mut c, .. }) => *c = Some(chain_id), Transaction::Eip2930(TxEip2930 { chain_id: ref mut c, .. }) => *c = chain_id, Transaction::Eip1559(TxEip1559 { chain_id: ref mut c, .. }) => *c = chain_id, + Transaction::Eip4844(TxEip4844 { chain_id: ref mut c, .. }) => *c = chain_id, } } @@ -223,7 +249,8 @@ impl Transaction { match self { Transaction::Legacy(TxLegacy { to, .. }) | Transaction::Eip2930(TxEip2930 { to, .. }) | - Transaction::Eip1559(TxEip1559 { to, .. }) => to, + Transaction::Eip1559(TxEip1559 { to, .. }) | + Transaction::Eip4844(TxEip4844 { to, .. }) => to, } } @@ -238,6 +265,7 @@ impl Transaction { Transaction::Legacy { .. } => TxType::Legacy, Transaction::Eip2930 { .. } => TxType::EIP2930, Transaction::Eip1559 { .. } => TxType::EIP1559, + Transaction::Eip4844 { .. } => TxType::EIP4844, } } @@ -247,6 +275,7 @@ impl Transaction { Transaction::Legacy(TxLegacy { value, .. }) => value, Transaction::Eip2930(TxEip2930 { value, .. }) => value, Transaction::Eip1559(TxEip1559 { value, .. }) => value, + Transaction::Eip4844(TxEip4844 { value, .. }) => value, } } @@ -256,6 +285,7 @@ impl Transaction { Transaction::Legacy(TxLegacy { nonce, .. }) => *nonce, Transaction::Eip2930(TxEip2930 { nonce, .. }) => *nonce, Transaction::Eip1559(TxEip1559 { nonce, .. }) => *nonce, + Transaction::Eip4844(TxEip4844 { nonce, .. }) => *nonce, } } @@ -264,7 +294,8 @@ impl Transaction { match self { Transaction::Legacy(TxLegacy { gas_limit, .. }) | Transaction::Eip2930(TxEip2930 { gas_limit, .. }) | - Transaction::Eip1559(TxEip1559 { gas_limit, .. }) => *gas_limit, + Transaction::Eip1559(TxEip1559 { gas_limit, .. }) | + Transaction::Eip4844(TxEip4844 { gas_limit, .. }) => *gas_limit, } } @@ -275,7 +306,8 @@ impl Transaction { match self { Transaction::Legacy(TxLegacy { gas_price, .. }) | Transaction::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Transaction::Eip1559(TxEip1559 { max_fee_per_gas, .. }) => *max_fee_per_gas, + Transaction::Eip1559(TxEip1559 { max_fee_per_gas, .. }) | + Transaction::Eip4844(TxEip4844 { max_fee_per_gas, .. }) => *max_fee_per_gas, } } @@ -287,7 +319,8 @@ impl Transaction { match self { Transaction::Legacy(_) => None, Transaction::Eip2930(_) => None, - Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) => { + Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | + Transaction::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => { Some(*max_priority_fee_per_gas) } } @@ -304,7 +337,8 @@ impl Transaction { match self { Transaction::Legacy(TxLegacy { gas_price, .. }) | Transaction::Eip2930(TxEip2930 { gas_price, .. }) => *gas_price, - Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) => { + Transaction::Eip1559(TxEip1559 { max_priority_fee_per_gas, .. }) | + Transaction::Eip4844(TxEip4844 { max_priority_fee_per_gas, .. }) => { *max_priority_fee_per_gas } } @@ -318,6 +352,7 @@ impl Transaction { Transaction::Legacy(tx) => tx.gas_price, Transaction::Eip2930(tx) => tx.gas_price, Transaction::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), + Transaction::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), } } @@ -373,6 +408,7 @@ impl Transaction { Transaction::Legacy(TxLegacy { input, .. }) => input, Transaction::Eip2930(TxEip2930 { input, .. }) => input, Transaction::Eip1559(TxEip1559 { input, .. }) => input, + Transaction::Eip4844(TxEip4844 { input, .. }) => input, } } @@ -469,6 +505,33 @@ impl Transaction { len += access_list.length(); len } + Transaction::Eip4844(TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + }) => { + let mut len = 0; + len += chain_id.length(); + len += nonce.length(); + len += gas_limit.length(); + len += max_fee_per_gas.length(); + len += max_priority_fee_per_gas.length(); + len += to.length(); + len += value.length(); + len += access_list.length(); + len += blob_versioned_hashes.length(); + len += max_fee_per_blob_gas.length(); + len += input.0.length(); + len + } } } @@ -531,6 +594,31 @@ impl Transaction { input.0.encode(out); access_list.encode(out); } + Transaction::Eip4844(TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + }) => { + chain_id.encode(out); + nonce.encode(out); + max_priority_fee_per_gas.encode(out); + max_fee_per_gas.encode(out); + gas_limit.encode(out); + to.encode(out); + value.encode(out); + input.0.encode(out); + access_list.encode(out); + max_fee_per_blob_gas.encode(out); + blob_versioned_hashes.encode(out); + } } } } @@ -1012,6 +1100,19 @@ impl TransactionSigned { input: Bytes(Decodable::decode(data)?), access_list: Decodable::decode(data)?, }), + 3 => Transaction::Eip4844(TxEip4844 { + chain_id: Decodable::decode(data)?, + nonce: Decodable::decode(data)?, + max_priority_fee_per_gas: Decodable::decode(data)?, + max_fee_per_gas: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Bytes(Decodable::decode(data)?), + access_list: Decodable::decode(data)?, + max_fee_per_blob_gas: Decodable::decode(data)?, + blob_versioned_hashes: Decodable::decode(data)?, + }), _ => return Err(DecodeError::Custom("unsupported typed transaction type")), }; diff --git a/crates/revm/revm-primitives/src/env.rs b/crates/revm/revm-primitives/src/env.rs index 4a3ee3239282..0d717a9e790e 100644 --- a/crates/revm/revm-primitives/src/env.rs +++ b/crates/revm/revm-primitives/src/env.rs @@ -1,7 +1,7 @@ use crate::config::revm_spec; use reth_primitives::{ recover_signer, Address, Bytes, Chain, ChainSpec, Head, Header, Transaction, TransactionKind, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxLegacy, U256, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, U256, }; use revm::primitives::{AnalysisKind, BlockEnv, CfgEnv, SpecId, TransactTo, TxEnv}; @@ -212,5 +212,43 @@ where }) .collect(); } + Transaction::Eip4844(TxEip4844 { + nonce, + chain_id, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes: _, + max_fee_per_blob_gas: _, + input, + }) => { + tx_env.gas_limit = *gas_limit; + tx_env.gas_price = U256::from(*max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(*max_priority_fee_per_gas)); + tx_env.transact_to = match to { + TransactionKind::Call(to) => TransactTo::Call(*to), + TransactionKind::Create => TransactTo::create(), + }; + tx_env.value = U256::from(*value); + tx_env.data = input.0.clone(); + tx_env.chain_id = Some(*chain_id); + tx_env.nonce = Some(*nonce); + tx_env.access_list = access_list + .0 + .iter() + .map(|l| { + ( + l.address, + l.storage_keys + .iter() + .map(|k| U256::from_be_bytes(k.to_fixed_bytes())) + .collect(), + ) + }) + .collect(); + } } } diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index cb920df7cf0a..734d33f1d22c 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -148,6 +148,16 @@ impl Transaction { }) .collect(), ), + PrimitiveTransaction::Eip4844(tx) => Some( + tx.access_list + .0 + .iter() + .map(|item| AccessListItem { + address: item.address.0.into(), + storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), + }) + .collect(), + ), }; let signature = Signature::from_primitive_signature( diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index ed38faf49d27..19d663593caf 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -14,7 +14,8 @@ use rand::{ use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, hex, Address, FromRecoveredTransaction, IntoRecoveredTransaction, Signature, Transaction, TransactionKind, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxHash, TxLegacy, TxType, H256, U128, U256, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, H256, + U128, U256, }; use std::{ops::Range, sync::Arc, time::Instant}; @@ -38,6 +39,9 @@ macro_rules! set_value { MockTransaction::Eip1559 { ref mut $field, .. } => { *$field = new_value; } + MockTransaction::Eip4844 { ref mut $field, .. } => { + *$field = new_value; + } } }; } @@ -48,6 +52,7 @@ macro_rules! get_value { match $this { MockTransaction::Legacy { $field, .. } => $field, MockTransaction::Eip1559 { $field, .. } => $field, + MockTransaction::Eip4844 { $field, .. } => $field, } }; } @@ -99,6 +104,16 @@ pub enum MockTransaction { to: TransactionKind, value: U256, }, + Eip4844 { + hash: H256, + sender: Address, + nonce: u64, + max_fee_per_gas: u128, + max_priority_fee_per_gas: u128, + gas_limit: u64, + to: TransactionKind, + value: U256, + }, } // === impl MockTransaction === @@ -140,21 +155,27 @@ impl MockTransaction { } pub fn set_priority_fee(&mut self, val: u128) -> &mut Self { - if let MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. }) = self + { *max_priority_fee_per_gas = val; } self } pub fn with_priority_fee(mut self, val: u128) -> Self { - if let MockTransaction::Eip1559 { ref mut max_priority_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { ref mut max_priority_fee_per_gas, .. } | + MockTransaction::Eip4844 { ref mut max_priority_fee_per_gas, .. }) = self + { *max_priority_fee_per_gas = val; } self } pub fn get_priority_fee(&self) -> Option { - if let MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. }) = self + { Some(*max_priority_fee_per_gas) } else { None @@ -162,21 +183,27 @@ impl MockTransaction { } pub fn set_max_fee(&mut self, val: u128) -> &mut Self { - if let MockTransaction::Eip1559 { max_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { max_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_fee_per_gas, .. }) = self + { *max_fee_per_gas = val; } self } pub fn with_max_fee(mut self, val: u128) -> Self { - if let MockTransaction::Eip1559 { ref mut max_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { ref mut max_fee_per_gas, .. } | + MockTransaction::Eip4844 { ref mut max_fee_per_gas, .. }) = self + { *max_fee_per_gas = val; } self } pub fn get_max_fee(&self) -> Option { - if let MockTransaction::Eip1559 { max_fee_per_gas, .. } = self { + if let (MockTransaction::Eip1559 { max_fee_per_gas, .. } | + MockTransaction::Eip4844 { max_fee_per_gas, .. }) = self + { Some(*max_fee_per_gas) } else { None @@ -192,6 +219,10 @@ impl MockTransaction { *max_fee_per_gas = val; *max_priority_fee_per_gas = val; } + MockTransaction::Eip4844 { max_fee_per_gas, max_priority_fee_per_gas, .. } => { + *max_fee_per_gas = val; + *max_priority_fee_per_gas = val; + } } self } @@ -209,6 +240,14 @@ impl MockTransaction { *max_fee_per_gas = val; *max_priority_fee_per_gas = val; } + MockTransaction::Eip4844 { + ref mut max_fee_per_gas, + ref mut max_priority_fee_per_gas, + .. + } => { + *max_fee_per_gas = val; + *max_priority_fee_per_gas = val; + } } self } @@ -217,6 +256,7 @@ impl MockTransaction { match self { MockTransaction::Legacy { gas_price, .. } => *gas_price, MockTransaction::Eip1559 { max_fee_per_gas, .. } => *max_fee_per_gas, + MockTransaction::Eip4844 { max_fee_per_gas, .. } => *max_fee_per_gas, } } @@ -301,6 +341,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { hash, .. } => hash, MockTransaction::Eip1559 { hash, .. } => hash, + MockTransaction::Eip4844 { hash, .. } => hash, } } @@ -308,6 +349,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { sender, .. } => *sender, MockTransaction::Eip1559 { sender, .. } => *sender, + MockTransaction::Eip4844 { sender, .. } => *sender, } } @@ -315,6 +357,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { nonce, .. } => *nonce, MockTransaction::Eip1559 { nonce, .. } => *nonce, + MockTransaction::Eip4844 { nonce, .. } => *nonce, } } @@ -326,6 +369,9 @@ impl PoolTransaction for MockTransaction { MockTransaction::Eip1559 { max_fee_per_gas, value, gas_limit, .. } => { U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value } + MockTransaction::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { + U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value + } } } @@ -337,6 +383,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { gas_price, .. } => *gas_price, MockTransaction::Eip1559 { max_fee_per_gas, .. } => *max_fee_per_gas, + MockTransaction::Eip4844 { max_fee_per_gas, .. } => *max_fee_per_gas, } } @@ -346,6 +393,9 @@ impl PoolTransaction for MockTransaction { MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } => { Some(*max_priority_fee_per_gas) } + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } => { + Some(*max_priority_fee_per_gas) + } } } @@ -368,6 +418,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { gas_price, .. } => *gas_price, MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, + MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas, } } @@ -375,6 +426,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { to, .. } => to, MockTransaction::Eip1559 { to, .. } => to, + MockTransaction::Eip4844 { to, .. } => to, } } @@ -386,6 +438,7 @@ impl PoolTransaction for MockTransaction { match self { MockTransaction::Legacy { .. } => TxType::Legacy.into(), MockTransaction::Eip1559 { .. } => TxType::EIP1559.into(), + MockTransaction::Eip4844 { .. } => TxType::EIP4844.into(), } } @@ -441,6 +494,28 @@ impl FromRecoveredTransaction for MockTransaction { to, value: U256::from(value), }, + Transaction::Eip4844(TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + input, + access_list, + blob_versioned_hashes: _, + max_fee_per_blob_gas: _, + }) => MockTransaction::Eip4844 { + hash, + sender, + nonce, + max_fee_per_gas, + max_priority_fee_per_gas, + gas_limit, + to, + value: U256::from(value), + }, Transaction::Eip2930 { .. } => { unimplemented!() } @@ -527,6 +602,25 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { to: *to, value: U256::from(*value), }, + Transaction::Eip4844(TxEip4844 { + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + input, + .. + }) => MockTransaction::Eip4844 { + sender, + hash: tx_hash, + nonce: *nonce, + max_fee_per_gas: *max_fee_per_gas, + max_priority_fee_per_gas: *max_priority_fee_per_gas, + gas_limit: *gas_limit, + to: *to, + value: U256::from(*value), + }, }) .boxed() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c002d0a79832..7d07a72a8759 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -407,7 +407,7 @@ pub enum TransactionOrigin { /// Transaction is originated locally and is intended to remain private. /// /// This type of transaction should not be propagated to the network. It's meant for - /// private usage within the local node only. + /// private usage within the local node only. Private, } @@ -582,6 +582,7 @@ impl PooledTransaction { Transaction::Legacy(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), Transaction::Eip2930(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), Transaction::Eip1559(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), + Transaction::Eip4844(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), }; let cost = gas_cost + U256::from(transaction.value()); @@ -633,6 +634,7 @@ impl PoolTransaction for PooledTransaction { Transaction::Legacy(tx) => tx.gas_price, Transaction::Eip2930(tx) => tx.gas_price, Transaction::Eip1559(tx) => tx.max_fee_per_gas, + Transaction::Eip4844(tx) => tx.max_fee_per_gas, } } @@ -644,6 +646,7 @@ impl PoolTransaction for PooledTransaction { Transaction::Legacy(_) => None, Transaction::Eip2930(_) => None, Transaction::Eip1559(tx) => Some(tx.max_priority_fee_per_gas), + Transaction::Eip4844(tx) => Some(tx.max_priority_fee_per_gas), } } From 5cc3db9932d468856271c1c038be7cf30d56ad34 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 7 Aug 2023 22:36:30 +0300 Subject: [PATCH 365/722] fix(txpool): pending worst transaction (#4100) --- crates/transaction-pool/src/pool/pending.rs | 22 +++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 3455332090d7..eaff315459be 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -274,7 +274,7 @@ impl PendingPool { /// Removes the worst transaction from this pool. pub(crate) fn pop_worst(&mut self) -> Option>> { - let worst = self.all.iter().next_back().map(|tx| *tx.transaction.id())?; + let worst = self.all.iter().next().map(|tx| *tx.transaction.id())?; self.remove_transaction(&worst) } @@ -350,7 +350,10 @@ impl Ord for PendingTransaction { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}; + use crate::{ + test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, + PoolTransaction, + }; #[test] fn test_enforce_basefee() { @@ -407,4 +410,19 @@ mod tests { assert_eq!(removed.len(), 2); assert!(pool.is_empty()); } + + #[test] + fn evict_worst() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + let t = MockTransaction::eip1559(); + pool.add_transaction(f.validated_arc(t.clone()), 0); + + let t2 = MockTransaction::eip1559().inc_price_by(10); + pool.add_transaction(f.validated_arc(t2), 0); + + // First transaction should be evicted. + assert_eq!(pool.pop_worst().map(|tx| *tx.hash()), Some(*t.hash())); + } } From 40230e74f41685340b144ec2fcf2dd0bf3da0e14 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 7 Aug 2023 22:38:25 +0300 Subject: [PATCH 366/722] fix(txpool): emit events on discarding worst txs (#4101) --- crates/transaction-pool/src/pool/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a07565b7cae4..93003431c5e6 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -377,6 +377,9 @@ where return added } + let mut listener = self.event_listener.write(); + discarded.iter().for_each(|tx| listener.discarded(tx)); + // It may happen that a newly added transaction is immediately discarded, so we need to // adjust the result here added From 91751290c70e0f1b2e9e502af06e219398d2754d Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 8 Aug 2023 11:55:44 +0300 Subject: [PATCH 367/722] fix(trie): dangling storage hashes (#4108) --- .../src/providers/database/provider.rs | 49 ++++++++++++------- crates/storage/provider/src/traits/hashing.rs | 9 ++-- crates/trie/src/prefix_set/loader.rs | 40 +++++++++++---- crates/trie/src/prefix_set/mod.rs | 2 +- crates/trie/src/trie.rs | 30 ++++++++++-- crates/trie/src/updates.rs | 5 ++ 6 files changed, 97 insertions(+), 38 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 0acc9d6872d5..56d4c0f39233 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -40,7 +40,7 @@ use reth_revm_primitives::{ }; use reth_trie::{prefix_set::PrefixSetMut, StateRoot}; use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap}, + collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::Arc, @@ -1411,6 +1411,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider // Initialize prefix sets. let mut account_prefix_set = PrefixSetMut::default(); let mut storage_prefix_set: HashMap = HashMap::default(); + let mut destroyed_accounts = HashSet::default(); // storage hashing stage { @@ -1433,8 +1434,12 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider let lists = self.changed_accounts_with_range(range.clone())?; let accounts = self.basic_accounts(lists)?; let hashed_addresses = self.insert_account_for_hashing(accounts)?; - for hashed_address in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); + for (hashed_address, account) in hashed_addresses { + if account.is_some() { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + } else { + destroyed_accounts.insert(hashed_address); + } } } @@ -1447,6 +1452,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider .with_changed_storage_prefixes( storage_prefix_set.into_iter().map(|(k, v)| (k, v.freeze())).collect(), ) + .with_destroyed_accounts(destroyed_accounts) .root_with_updates() .map_err(Into::::into)?; if state_root != expected_state_root { @@ -1561,7 +1567,10 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider Ok(hashed_storage_keys) } - fn unwind_account_hashing(&self, range: RangeInclusive) -> Result> { + fn unwind_account_hashing( + &self, + range: RangeInclusive, + ) -> Result>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; // Aggregate all block changesets and make a list of accounts that have been changed. @@ -1586,27 +1595,25 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider .map(|(address, account)| (keccak256(address), account)) .collect::>(); - let hashed_account_keys = BTreeSet::from_iter(hashed_accounts.keys().copied()); - hashed_accounts - .into_iter() + .iter() // Apply values to HashedState (if Account is None remove it); .try_for_each(|(hashed_address, account)| -> Result<()> { if let Some(account) = account { - hashed_accounts_cursor.upsert(hashed_address, account)?; - } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { + hashed_accounts_cursor.upsert(*hashed_address, *account)?; + } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } Ok(()) })?; - Ok(hashed_account_keys) + Ok(hashed_accounts) } fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> Result> { + ) -> Result>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = accounts.into_iter().fold( @@ -1617,18 +1624,16 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider }, ); - let hashed_addresses = BTreeSet::from_iter(hashed_accounts.keys().copied()); - - hashed_accounts.into_iter().try_for_each(|(hashed_address, account)| -> Result<()> { + hashed_accounts.iter().try_for_each(|(hashed_address, account)| -> Result<()> { if let Some(account) = account { - hashed_accounts_cursor.upsert(hashed_address, account)? - } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { + hashed_accounts_cursor.upsert(*hashed_address, *account)? + } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } Ok(()) })?; - Ok(hashed_addresses) + Ok(hashed_accounts) } } @@ -1770,11 +1775,16 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP // Initialize prefix sets. let mut account_prefix_set = PrefixSetMut::default(); let mut storage_prefix_set: HashMap = HashMap::default(); + let mut destroyed_accounts = HashSet::default(); // Unwind account hashes. Add changed accounts to account prefix set. let hashed_addresses = self.unwind_account_hashing(range.clone())?; - for hashed_address in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); + for (hashed_address, account) in hashed_addresses { + if account.is_some() { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + } else { + destroyed_accounts.insert(hashed_address); + } } // Unwind account history indices. @@ -1804,6 +1814,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP .with_changed_storage_prefixes( storage_prefix_set.into_iter().map(|(k, v)| (k, v.freeze())).collect(), ) + .with_destroyed_accounts(destroyed_accounts) .root_with_updates() .map_err(Into::::into)?; diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 5d882553b310..ffeb43926bc1 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -3,7 +3,7 @@ use reth_db::models::BlockNumberAddress; use reth_interfaces::Result; use reth_primitives::{Account, Address, BlockNumber, StorageEntry, H256}; use std::{ - collections::{BTreeSet, HashMap}, + collections::{BTreeMap, BTreeSet, HashMap}, ops::{Range, RangeInclusive}, }; @@ -15,7 +15,10 @@ pub trait HashingWriter: Send + Sync { /// # Returns /// /// Set of hashed keys of updated accounts. - fn unwind_account_hashing(&self, range: RangeInclusive) -> Result>; + fn unwind_account_hashing( + &self, + range: RangeInclusive, + ) -> Result>>; /// Inserts all accounts into [reth_db::tables::AccountHistory] table. /// @@ -25,7 +28,7 @@ pub trait HashingWriter: Send + Sync { fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> Result>; + ) -> Result>>; /// Unwind and clear storage hashing /// diff --git a/crates/trie/src/prefix_set/loader.rs b/crates/trie/src/prefix_set/loader.rs index df6393841c6b..0755e32c7b7e 100644 --- a/crates/trie/src/prefix_set/loader.rs +++ b/crates/trie/src/prefix_set/loader.rs @@ -8,7 +8,21 @@ use reth_db::{ DatabaseError, }; use reth_primitives::{keccak256, trie::Nibbles, BlockNumber, StorageEntry, H256}; -use std::{collections::HashMap, ops::RangeInclusive}; +use std::{ + collections::{HashMap, HashSet}, + ops::RangeInclusive, +}; + +/// Loaded prefix sets. +#[derive(Debug, Default)] +pub struct LoadedPrefixSets { + /// The account prefix set + pub account_prefix_set: PrefixSetMut, + /// The mapping of hashed account key to the corresponding storage prefix set + pub storage_prefix_sets: HashMap, + /// The account keys of destroyed accounts + pub destroyed_accounts: HashSet, +} /// A wrapper around a database transaction that loads prefix sets within a given block range. #[derive(Deref)] @@ -29,16 +43,21 @@ where pub fn load( self, range: RangeInclusive, - ) -> Result<(PrefixSetMut, HashMap), DatabaseError> { + ) -> Result { // Initialize prefix sets. - let mut account_prefix_set = PrefixSetMut::default(); - let mut storage_prefix_set: HashMap = HashMap::default(); + let mut loaded_prefix_sets = LoadedPrefixSets::default(); // Walk account changeset and insert account prefixes. - let mut account_cursor = self.cursor_read::()?; - for account_entry in account_cursor.walk_range(range.clone())? { + let mut account_changeset_cursor = self.cursor_read::()?; + let mut account_plain_state_cursor = self.cursor_read::()?; + for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; - account_prefix_set.insert(Nibbles::unpack(keccak256(address))); + let hashed_address = keccak256(address); + loaded_prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address)); + + if account_plain_state_cursor.seek_exact(address)?.is_none() { + loaded_prefix_sets.destroyed_accounts.insert(hashed_address); + } } // Walk storage changeset and insert storage prefixes as well as account prefixes if missing @@ -48,13 +67,14 @@ where for storage_entry in storage_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; let hashed_address = keccak256(address); - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - storage_prefix_set + loaded_prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address)); + loaded_prefix_sets + .storage_prefix_sets .entry(hashed_address) .or_default() .insert(Nibbles::unpack(keccak256(key))); } - Ok((account_prefix_set, storage_prefix_set)) + Ok(loaded_prefix_sets) } } diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index ac25ab297c05..ce2823de9906 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -2,7 +2,7 @@ use reth_primitives::trie::Nibbles; use std::rc::Rc; mod loader; -pub use loader::PrefixSetLoader; +pub use loader::{LoadedPrefixSets, PrefixSetLoader}; /// A container for efficiently storing and checking for the presence of key prefixes. /// diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index c9df6c9f850b..cc443d668009 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -16,7 +16,10 @@ use reth_primitives::{ Address, BlockNumber, StorageEntry, H256, }; use reth_rlp::Encodable; -use std::{collections::HashMap, ops::RangeInclusive}; +use std::{ + collections::{HashMap, HashSet}, + ops::RangeInclusive, +}; /// StateRoot is used to compute the root node of a state trie. pub struct StateRoot<'a, 'b, TX, H> { @@ -29,6 +32,8 @@ pub struct StateRoot<'a, 'b, TX, H> { /// A map containing storage changes with the hashed address as key and a set of storage key /// prefixes as the value. pub changed_storage_prefixes: HashMap, + /// A map containing keys of accounts that were destroyed. + pub destroyed_accounts: HashSet, /// Previous intermediate state. previous_state: Option, /// The number of updates after which the intermediate progress should be returned. @@ -48,6 +53,12 @@ impl<'a, 'b, TX, H> StateRoot<'a, 'b, TX, H> { self } + /// Set the destroyed accounts. + pub fn with_destroyed_accounts(mut self, accounts: HashSet) -> Self { + self.destroyed_accounts = accounts; + self + } + /// Set the threshold. pub fn with_threshold(mut self, threshold: u64) -> Self { self.threshold = threshold; @@ -75,6 +86,7 @@ impl<'a, 'b, TX, H> StateRoot<'a, 'b, TX, H> { tx: self.tx, changed_account_prefixes: self.changed_account_prefixes, changed_storage_prefixes: self.changed_storage_prefixes, + destroyed_accounts: self.destroyed_accounts, threshold: self.threshold, previous_state: self.previous_state, hashed_cursor_factory, @@ -92,6 +104,7 @@ where tx, changed_account_prefixes: PrefixSetMut::default().freeze(), changed_storage_prefixes: HashMap::default(), + destroyed_accounts: HashSet::default(), previous_state: None, threshold: 100_000, hashed_cursor_factory: tx, @@ -108,12 +121,17 @@ where tx: &'a TX, range: RangeInclusive, ) -> Result { - let (account_prefixes, storage_prefixes) = PrefixSetLoader::new(tx).load(range)?; + let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; Ok(Self::new(tx) - .with_changed_account_prefixes(account_prefixes.freeze()) + .with_changed_account_prefixes(loaded_prefix_sets.account_prefix_set.freeze()) .with_changed_storage_prefixes( - storage_prefixes.into_iter().map(|(k, v)| (k, v.freeze())).collect(), - )) + loaded_prefix_sets + .storage_prefix_sets + .into_iter() + .map(|(k, v)| (k, v.freeze())) + .collect(), + ) + .with_destroyed_accounts(loaded_prefix_sets.destroyed_accounts)) } /// Computes the state root of the trie with the changed account and storage prefixes and @@ -342,6 +360,8 @@ where trie_updates.extend(walker_updates.into_iter()); trie_updates.extend_with_account_updates(hash_builder_updates); + trie_updates + .extend_with_deletes(self.destroyed_accounts.into_iter().map(TrieKey::StorageTrie)); Ok(StateRootProgress::Complete(root, hashed_entries_walked, trie_updates)) } diff --git a/crates/trie/src/updates.rs b/crates/trie/src/updates.rs index dc5c086e0d7a..f270f1854308 100644 --- a/crates/trie/src/updates.rs +++ b/crates/trie/src/updates.rs @@ -99,6 +99,11 @@ impl TrieUpdates { })); } + /// Extend the updates with deletes. + pub fn extend_with_deletes(&mut self, keys: impl Iterator) { + self.extend(keys.map(|key| (key, TrieOp::Delete))); + } + /// Flush updates all aggregated updates to the database. pub fn flush<'a, 'tx, TX>(self, tx: &'a TX) -> Result<(), reth_db::DatabaseError> where From 32dd9af5317d0a3a213dc47710ab9f4588af2dd5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 8 Aug 2023 11:59:43 +0300 Subject: [PATCH 368/722] feat(cli): storage tries recovery (#4109) --- bin/reth/src/cli/mod.rs | 6 +- bin/reth/src/lib.rs | 1 + bin/reth/src/recover/mod.rs | 29 ++++++++ bin/reth/src/recover/storage_tries.rs | 96 +++++++++++++++++++++++++++ 4 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 bin/reth/src/recover/mod.rs create mode 100644 bin/reth/src/recover/storage_tries.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 50acdf8c0540..979ffa5e601e 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -5,7 +5,7 @@ use crate::{ cli::ext::RethCliExt, db, debug_cmd, dirs::{LogsDir, PlatformPath}, - node, p2p, + node, p2p, recover, runner::CliRunner, stage, test_vectors, version::{LONG_VERSION, SHORT_VERSION}, @@ -77,6 +77,7 @@ impl Cli { Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), } } @@ -132,6 +133,9 @@ pub enum Commands { /// Various debug routines #[command(name = "debug")] Debug(debug_cmd::Command), + /// Scripts for node recovery + #[command(name = "recover")] + Recover(recover::Command), } /// The log configuration. diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 8320b6808321..dbaa2ce58a60 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -34,6 +34,7 @@ pub mod init; pub mod node; pub mod p2p; pub mod prometheus_exporter; +pub mod recover; pub mod runner; pub mod stage; pub mod test_vectors; diff --git a/bin/reth/src/recover/mod.rs b/bin/reth/src/recover/mod.rs new file mode 100644 index 000000000000..dd649abacc46 --- /dev/null +++ b/bin/reth/src/recover/mod.rs @@ -0,0 +1,29 @@ +//! `reth recover` command. +use clap::{Parser, Subcommand}; + +use crate::runner::CliContext; + +mod storage_tries; + +/// `reth recover` command +#[derive(Debug, Parser)] +pub struct Command { + #[clap(subcommand)] + command: Subcommands, +} + +/// `reth recover` subcommands +#[derive(Subcommand, Debug)] +pub enum Subcommands { + /// Recover the node by deleting dangling storage tries. + StorageTries(storage_tries::Command), +} + +impl Command { + /// Execute `recover` command + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + match self.command { + Subcommands::StorageTries(command) => command.execute(ctx).await, + } + } +} diff --git a/bin/reth/src/recover/storage_tries.rs b/bin/reth/src/recover/storage_tries.rs new file mode 100644 index 000000000000..49e39011deba --- /dev/null +++ b/bin/reth/src/recover/storage_tries.rs @@ -0,0 +1,96 @@ +use crate::{ + args::utils::genesis_value_parser, + dirs::{DataDirPath, MaybePlatformPath}, + init::init_genesis, + runner::CliContext, +}; +use clap::Parser; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRW}, + init_db, tables, + transaction::DbTx, +}; +use reth_primitives::{keccak256, ChainSpec}; +use reth_provider::{AccountExtReader, BlockNumReader, ProviderFactory}; +use std::{fs, sync::Arc}; +use tracing::*; + +/// `reth recover storage-tries` command +#[derive(Debug, Parser)] +pub struct Command { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + /// + /// Built-in chains: + /// - mainnet + /// - goerli + /// - sepolia + #[arg( + long, + value_name = "CHAIN_OR_PATH", + verbatim_doc_comment, + default_value = "mainnet", + value_parser = genesis_value_parser + )] + chain: Arc, + + /// The number of blocks in the past to look through. + #[arg(long, default_value_t = 100)] + lookback: u64, +} + +impl Command { + /// Execute `storage-tries` recovery command + pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let db_path = data_dir.db_path(); + fs::create_dir_all(&db_path)?; + let db = Arc::new(init_db(db_path, None)?); + + debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); + init_genesis(db.clone(), self.chain.clone())?; + + let factory = ProviderFactory::new(&db, self.chain.clone()); + let mut provider = factory.provider_rw()?; + + let best_block = provider.best_block_number()?; + + let block_range = best_block.saturating_sub(self.lookback)..=best_block; + let changed_accounts = provider.changed_accounts_with_range(block_range)?; + let destroyed_accounts = provider + .basic_accounts(changed_accounts)? + .into_iter() + .filter_map(|(address, acc)| acc.is_none().then_some(address)) + .collect::>(); + + info!(target: "reth::cli", destroyed = destroyed_accounts.len(), "Starting recovery of storage tries"); + + let mut deleted_tries = 0; + let tx_mut = provider.tx_mut(); + let mut storage_trie_cursor = tx_mut.cursor_dup_read::()?; + for address in destroyed_accounts { + let hashed_address = keccak256(address); + if storage_trie_cursor.seek_exact(hashed_address)?.is_some() { + deleted_tries += 1; + trace!(target: "reth::cli", ?address, ?hashed_address, "Deleting storage trie"); + storage_trie_cursor.delete_current_duplicates()?; + } + } + + provider.commit()?; + info!(target: "reth::cli", deleted = deleted_tries, "Finished recovery"); + + Ok(()) + } +} From 759eaf59660fc4313c12244db7fe283ea8c3e8fc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Aug 2023 05:01:18 -0400 Subject: [PATCH 369/722] chore: unused deps sanity check (#4106) --- Cargo.lock | 2 +- crates/rpc/rpc/src/eth/api/server.rs | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5fbbc8bb504a..984cb8c1a0e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -618,7 +618,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.27", + "syn 2.0.28", "which", ] diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 1df566086d71..0ec7535a29a1 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -411,9 +411,8 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; use reth_primitives::{ - basefee::calculate_next_block_base_fee, - constants::{self, ETHEREUM_BLOCK_GAS_LIMIT}, - BaseFeeParams, Block, BlockNumberOrTag, Header, TransactionSigned, H256, U256, + basefee::calculate_next_block_base_fee, constants::ETHEREUM_BLOCK_GAS_LIMIT, BaseFeeParams, + Block, BlockNumberOrTag, Header, TransactionSigned, H256, U256, }; use reth_provider::{ test_utils::{MockEthProvider, NoopProvider}, From f59f3a706db8660b95c5e4d62a81a3a10c41e929 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 8 Aug 2023 11:20:40 +0200 Subject: [PATCH 370/722] chore: update cargo.lock (#4110) From 62f39a5a151c5f4ddc9bf0851725923989df0412 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Aug 2023 05:14:52 -0400 Subject: [PATCH 371/722] feat: do not propagate full 4844 transactions (#4105) Co-authored-by: Matthias Seitz --- crates/net/network/src/transactions.rs | 28 +++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 7cdf51adbfa9..9f9dfd0682ff 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -19,7 +19,8 @@ use reth_interfaces::{ use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; use reth_primitives::{ - FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, H256, + FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, TxType, + H256, }; use reth_rlp::Encodable; use reth_transaction_pool::{ @@ -247,10 +248,24 @@ where let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); let mut full_transactions = FullTransactionsBuilder::default(); + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to the + // peer. for tx in to_propagate.iter() { if peer.transactions.insert(tx.hash()) { hashes.push(tx); - full_transactions.push(tx); + + // Do not send full 4844 transaction hashes to peers. + // + // Nodes MUST NOT automatically broadcast blob transactions to their peers. + // Instead, those transactions are only announced using + // `NewPooledTransactionHashes` messages, and can then be manually requested + // via `GetPooledTransactions`. + // + // From: + if tx.tx_type() != TxType::EIP4844 { + full_transactions.push(tx); + } } } let mut new_pooled_hashes = hashes.build(); @@ -612,7 +627,6 @@ where /// A transaction that's about to be propagated to multiple peers. struct PropagateTransaction { - tx_type: u8, size: usize, transaction: Arc, } @@ -624,8 +638,12 @@ impl PropagateTransaction { self.transaction.hash() } + fn tx_type(&self) -> TxType { + self.transaction.tx_type() + } + fn new(transaction: Arc) -> Self { - Self { tx_type: transaction.tx_type().into(), size: transaction.length(), transaction } + Self { size: transaction.length(), transaction } } } @@ -685,7 +703,7 @@ impl PooledTransactionsHashesBuilder { PooledTransactionsHashesBuilder::Eth68(msg) => { msg.hashes.push(tx.hash()); msg.sizes.push(tx.size); - msg.types.push(tx.tx_type); + msg.types.push(tx.transaction.tx_type().into()); } } } From 4b0b63766e7f25729be5a8bc614dfdcf17db97c0 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 8 Aug 2023 10:53:12 +0100 Subject: [PATCH 372/722] feat: add `TransactionSigned::recover_signers` (#4098) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/consensus/auto-seal/src/lib.rs | 6 ++---- crates/primitives/Cargo.toml | 1 + crates/primitives/src/block.rs | 2 +- crates/primitives/src/transaction/mod.rs | 20 +++++++++++++++++++ crates/revm/src/executor.rs | 7 ++----- .../src/providers/database/provider.rs | 19 +++++++++--------- 7 files changed, 37 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 984cb8c1a0e1..997d83087b49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,6 +5820,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", + "rayon", "reth-codecs", "reth-rlp", "reth-rlp-derive", diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 7b14f544a483..afd47662f10d 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -349,10 +349,8 @@ impl StorageInner { let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; - let senders = - block.body.iter().map(|tx| tx.recover_signer()).collect::>>().ok_or( - BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), - )?; + let senders = TransactionSigned::recover_signers(block.body.iter(), block.body.len()) + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e73b59d88590..83945592190d 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -60,6 +60,7 @@ impl-serde = "0.4.0" once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } paste = "1.0" +rayon = "1.7" # proof related triehash = "0.8" diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 0c9866ba28ef..6ae1a8e062e0 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -163,7 +163,7 @@ impl SealedBlock { /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. pub fn senders(&self) -> Option> { - self.body.iter().map(|tx| tx.recover_signer()).collect::>>() + TransactionSigned::recover_signers(self.body.iter(), self.body.len()) } /// Seal sealed block with recovered transaction senders. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d3f03e082066..fae91a3318ba 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -7,6 +7,7 @@ use bytes::{Buf, BytesMut}; use derive_more::{AsRef, Deref}; pub use error::InvalidTransactionError; pub use meta::TransactionMeta; +use rayon::prelude::{ParallelBridge, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use reth_rlp::{ length_of_length, Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, @@ -32,6 +33,10 @@ mod signature; mod tx_type; pub(crate) mod util; +// Expected number of transactions where we can expect a speed-up by recovering the senders in +// parallel. +const PARALLEL_SENDER_RECOVERY_THRESHOLD: usize = 10; + /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -937,6 +942,21 @@ impl TransactionSigned { self.signature.recover_signer(signature_hash) } + /// Recovers a list of signers from a transaction list iterator + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [Self::recover_signer]. + pub fn recover_signers<'a>( + txes: impl Iterator + Send, + num_txes: usize, + ) -> Option> { + if num_txes < PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.map(|tx| tx.recover_signer()).collect() + } else { + txes.cloned().par_bridge().map(|tx| tx.recover_signer()).collect() + } + } + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] /// /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index 5ce714521fba..e52bdb4bc19f 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -84,11 +84,8 @@ where Err(BlockValidationError::SenderRecoveryError.into()) } } else { - body.iter() - .map(|tx| { - tx.recover_signer().ok_or(BlockValidationError::SenderRecoveryError.into()) - }) - .collect() + TransactionSigned::recover_signers(body.iter(), body.len()) + .ok_or(BlockValidationError::SenderRecoveryError.into()) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 56d4c0f39233..fe691b0441d7 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -22,7 +22,10 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; -use reth_interfaces::Result; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError}, + Result, +}; use reth_primitives::{ keccak256, stage::{StageCheckpoint, StageId}, @@ -1910,14 +1913,12 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' let tx_iter = if Some(block.body.len()) == senders_len { block.body.into_iter().zip(senders.unwrap()).collect::>() } else { - block - .body - .into_iter() - .map(|tx| { - let signer = tx.recover_signer(); - (tx, signer.unwrap_or_default()) - }) - .collect::>() + let senders = TransactionSigned::recover_signers(block.body.iter(), block.body.len()) + .ok_or(BlockExecutionError::Validation( + BlockValidationError::SenderRecoveryError, + ))?; + + block.body.into_iter().zip(senders).collect() }; for (transaction, sender) in tx_iter { From 3d1857636dcac3fc510a7c754852438b74280944 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 8 Aug 2023 13:09:52 +0200 Subject: [PATCH 373/722] fix: bad recursion in logs (#4113) --- crates/rpc/rpc-api/src/lib.rs | 1 + crates/rpc/rpc-builder/tests/it/http.rs | 28 +++++++++++++++++++++++-- crates/rpc/rpc/src/eth/filter.rs | 2 +- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 073631c986e7..27a4afc389c9 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -67,6 +67,7 @@ pub mod clients { debug::DebugApiClient, engine::{EngineApiClient, EngineEthApiClient}, eth::EthApiClient, + eth_filter::EthFilterApiClient, net::NetApiClient, otterscan::OtterscanClient, rpc::RpcApiServer, diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 9dbfb2136f02..c1414d556c95 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -14,10 +14,11 @@ use reth_primitives::{ }; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, - DebugApiClient, NetApiClient, OtterscanClient, TraceApiClient, Web3ApiClient, + DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, + Web3ApiClient, }; use reth_rpc_builder::RethRpcModule; -use reth_rpc_types::{trace::filter::TraceFilter, CallRequest, Index, TransactionRequest}; +use reth_rpc_types::{trace::filter::TraceFilter, CallRequest, Filter, Index, TransactionRequest}; use std::collections::HashSet; fn is_unimplemented(err: Error) -> bool { @@ -30,6 +31,20 @@ fn is_unimplemented(err: Error) -> bool { } } +async fn test_filter_calls(client: &C) +where + C: ClientT + SubscriptionClientT + Sync, +{ + EthFilterApiClient::new_filter(client, Filter::default()).await.unwrap(); + EthFilterApiClient::new_pending_transaction_filter(client).await.unwrap(); + let id = EthFilterApiClient::new_block_filter(client).await.unwrap(); + EthFilterApiClient::filter_changes(client, id.clone()).await.unwrap(); + EthFilterApiClient::logs(client, Filter::default()).await.unwrap(); + let id = EthFilterApiClient::new_filter(client, Filter::default()).await.unwrap(); + EthFilterApiClient::filter_logs(client, id.clone()).await.unwrap(); + EthFilterApiClient::uninstall_filter(client, id).await.unwrap(); +} + async fn test_basic_admin_calls(client: &C) where C: ClientT + SubscriptionClientT + Sync, @@ -240,6 +255,15 @@ where )); } +#[tokio::test(flavor = "multi_thread")] +async fn test_call_filter_functions_http() { + reth_tracing::init_test_tracing(); + + let handle = launch_http(vec![RethRpcModule::Eth]).await; + let client = handle.http_client().unwrap(); + test_filter_calls(&client).await; +} + #[tokio::test(flavor = "multi_thread")] async fn test_call_admin_functions_http() { reth_tracing::init_test_tracing(); diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index a7c5ce53f36a..5332de726b0b 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -209,7 +209,7 @@ where /// Handler for `eth_getLogs` async fn logs(&self, filter: Filter) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getLogs"); - Ok(EthFilter::logs(self, filter).await?) + Ok(self.inner.logs_for_filter(filter).await?) } } From 7fb9b8414f67b9c69732649738435c28ac35b99c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 8 Aug 2023 12:30:50 +0100 Subject: [PATCH 374/722] feat(bin): temporarily disable full node (#4112) --- bin/reth/src/args/pruning_args.rs | 10 ++++++---- bin/reth/src/node/mod.rs | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index cb71dce15f0e..1c33ce644726 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -18,14 +18,16 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, chain_spec: Arc) -> Option { - if self.full { + pub fn prune_config(&self, _chain_spec: Arc) -> eyre::Result> { + Ok(if self.full { + eyre::bail!("full node is not supported yet, keep an eye on next releases"); + #[allow(unreachable_code)] Some(PruneConfig { block_interval: 5, parts: PruneModes { sender_recovery: Some(PruneMode::Distance(128)), transaction_lookup: None, - receipts: chain_spec + receipts: _chain_spec .deposit_contract .as_ref() .map(|contract| PruneMode::Before(contract.block)), @@ -35,6 +37,6 @@ impl PruningArgs { }) } else { None - } + }) } } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 65657b71ff68..83e6cd1c86dc 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -337,7 +337,7 @@ impl NodeCommand { None }; - let prune_config = self.pruning.prune_config(Arc::clone(&self.chain)).or(config.prune); + let prune_config = self.pruning.prune_config(Arc::clone(&self.chain))?.or(config.prune); // Configure the pipeline let (mut pipeline, client) = if self.dev.dev { From bbe9d56ef40dfab9860f3bd22d9612b1851a1ec5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 8 Aug 2023 14:03:43 +0200 Subject: [PATCH 375/722] Revert "feat: add `TransactionSigned::recover_signers`" (#4115) --- Cargo.lock | 1 - crates/consensus/auto-seal/src/lib.rs | 6 ++++-- crates/primitives/Cargo.toml | 1 - crates/primitives/src/block.rs | 2 +- crates/primitives/src/transaction/mod.rs | 20 ------------------- crates/revm/src/executor.rs | 7 +++++-- .../src/providers/database/provider.rs | 19 +++++++++--------- 7 files changed, 19 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 997d83087b49..984cb8c1a0e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,7 +5820,6 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", - "rayon", "reth-codecs", "reth-rlp", "reth-rlp-derive", diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index afd47662f10d..7b14f544a483 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -349,8 +349,10 @@ impl StorageInner { let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; - let senders = TransactionSigned::recover_signers(block.body.iter(), block.body.len()) - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + let senders = + block.body.iter().map(|tx| tx.recover_signer()).collect::>>().ok_or( + BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), + )?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 83945592190d..e73b59d88590 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -60,7 +60,6 @@ impl-serde = "0.4.0" once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } paste = "1.0" -rayon = "1.7" # proof related triehash = "0.8" diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 6ae1a8e062e0..0c9866ba28ef 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -163,7 +163,7 @@ impl SealedBlock { /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. pub fn senders(&self) -> Option> { - TransactionSigned::recover_signers(self.body.iter(), self.body.len()) + self.body.iter().map(|tx| tx.recover_signer()).collect::>>() } /// Seal sealed block with recovered transaction senders. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index fae91a3318ba..d3f03e082066 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -7,7 +7,6 @@ use bytes::{Buf, BytesMut}; use derive_more::{AsRef, Deref}; pub use error::InvalidTransactionError; pub use meta::TransactionMeta; -use rayon::prelude::{ParallelBridge, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use reth_rlp::{ length_of_length, Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, @@ -33,10 +32,6 @@ mod signature; mod tx_type; pub(crate) mod util; -// Expected number of transactions where we can expect a speed-up by recovering the senders in -// parallel. -const PARALLEL_SENDER_RECOVERY_THRESHOLD: usize = 10; - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -942,21 +937,6 @@ impl TransactionSigned { self.signature.recover_signer(signature_hash) } - /// Recovers a list of signers from a transaction list iterator - /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [Self::recover_signer]. - pub fn recover_signers<'a>( - txes: impl Iterator + Send, - num_txes: usize, - ) -> Option> { - if num_txes < PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.map(|tx| tx.recover_signer()).collect() - } else { - txes.cloned().par_bridge().map(|tx| tx.recover_signer()).collect() - } - } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] /// /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index e52bdb4bc19f..5ce714521fba 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -84,8 +84,11 @@ where Err(BlockValidationError::SenderRecoveryError.into()) } } else { - TransactionSigned::recover_signers(body.iter(), body.len()) - .ok_or(BlockValidationError::SenderRecoveryError.into()) + body.iter() + .map(|tx| { + tx.recover_signer().ok_or(BlockValidationError::SenderRecoveryError.into()) + }) + .collect() } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index fe691b0441d7..56d4c0f39233 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -22,10 +22,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - Result, -}; +use reth_interfaces::Result; use reth_primitives::{ keccak256, stage::{StageCheckpoint, StageId}, @@ -1913,12 +1910,14 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' let tx_iter = if Some(block.body.len()) == senders_len { block.body.into_iter().zip(senders.unwrap()).collect::>() } else { - let senders = TransactionSigned::recover_signers(block.body.iter(), block.body.len()) - .ok_or(BlockExecutionError::Validation( - BlockValidationError::SenderRecoveryError, - ))?; - - block.body.into_iter().zip(senders).collect() + block + .body + .into_iter() + .map(|tx| { + let signer = tx.recover_signer(); + (tx, signer.unwrap_or_default()) + }) + .collect::>() }; for (transaction, sender) in tx_iter { From f47498f4e08a64d10f13edb329e4fe9d98859982 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 8 Aug 2023 18:46:18 +0300 Subject: [PATCH 376/722] feat(cli): full storage trie recovery (#4119) --- bin/reth/src/recover/storage_tries.rs | 44 ++++++++++++++------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/bin/reth/src/recover/storage_tries.rs b/bin/reth/src/recover/storage_tries.rs index 49e39011deba..564410eca06f 100644 --- a/bin/reth/src/recover/storage_tries.rs +++ b/bin/reth/src/recover/storage_tries.rs @@ -10,8 +10,9 @@ use reth_db::{ init_db, tables, transaction::DbTx, }; -use reth_primitives::{keccak256, ChainSpec}; -use reth_provider::{AccountExtReader, BlockNumReader, ProviderFactory}; +use reth_primitives::ChainSpec; +use reth_provider::{BlockNumReader, HeaderProvider, ProviderError, ProviderFactory}; +use reth_trie::StateRoot; use std::{fs, sync::Arc}; use tracing::*; @@ -44,10 +45,6 @@ pub struct Command { value_parser = genesis_value_parser )] chain: Arc, - - /// The number of blocks in the past to look through. - #[arg(long, default_value_t = 100)] - lookback: u64, } impl Command { @@ -63,29 +60,34 @@ impl Command { let factory = ProviderFactory::new(&db, self.chain.clone()); let mut provider = factory.provider_rw()?; - let best_block = provider.best_block_number()?; - - let block_range = best_block.saturating_sub(self.lookback)..=best_block; - let changed_accounts = provider.changed_accounts_with_range(block_range)?; - let destroyed_accounts = provider - .basic_accounts(changed_accounts)? - .into_iter() - .filter_map(|(address, acc)| acc.is_none().then_some(address)) - .collect::>(); - - info!(target: "reth::cli", destroyed = destroyed_accounts.len(), "Starting recovery of storage tries"); + let best_header = provider + .sealed_header(best_block)? + .ok_or(ProviderError::HeaderNotFound(best_block.into()))?; let mut deleted_tries = 0; let tx_mut = provider.tx_mut(); + let mut hashed_account_cursor = tx_mut.cursor_read::()?; let mut storage_trie_cursor = tx_mut.cursor_dup_read::()?; - for address in destroyed_accounts { - let hashed_address = keccak256(address); - if storage_trie_cursor.seek_exact(hashed_address)?.is_some() { + let mut entry = storage_trie_cursor.first()?; + + info!(target: "reth::cli", "Starting pruning of storage tries"); + while let Some((hashed_address, _)) = entry { + if hashed_account_cursor.seek_exact(hashed_address)?.is_none() { deleted_tries += 1; - trace!(target: "reth::cli", ?address, ?hashed_address, "Deleting storage trie"); storage_trie_cursor.delete_current_duplicates()?; } + + entry = storage_trie_cursor.next()?; + } + + let state_root = StateRoot::new(tx_mut).root()?; + if state_root != best_header.state_root { + eyre::bail!( + "Recovery failed. Incorrect state root. Expected: {:?}. Received: {:?}", + best_header.state_root, + state_root + ); } provider.commit()?; From b823cc01778fd364a78279a2979f84ef79f954eb Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 8 Aug 2023 19:09:23 +0300 Subject: [PATCH 377/722] release: 0.1.0-alpha.5 (#4111) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 4d083e629f1f..aac12498b577 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From 058c55cd8cb78fa3aee97ad89cb39944df5d528b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 9 Aug 2023 14:25:55 +0300 Subject: [PATCH 378/722] fix(trie): include destroyed accounts in account prefix set (#4126) --- Cargo.lock | 92 +++++++++---------- .../src/providers/database/provider.rs | 5 +- 2 files changed, 48 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 984cb8c1a0e1..65fe6644c353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1930,7 +1930,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "reth-db", "reth-interfaces", @@ -5218,7 +5218,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "backon", "clap", @@ -5288,7 +5288,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5305,7 +5305,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "futures-core", "futures-util", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "futures", @@ -5352,7 +5352,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "aquamarine", "assert_matches", @@ -5371,7 +5371,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arbitrary", "bytes", @@ -5386,7 +5386,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "confy", "reth-discv4", @@ -5403,7 +5403,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "mockall", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arbitrary", "assert_matches", @@ -5455,7 +5455,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "discv5", "enr 0.8.1", @@ -5478,7 +5478,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "async-trait", "data-encoding", @@ -5502,7 +5502,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "futures", @@ -5527,7 +5527,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "aes 0.8.3", "block-padding", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arbitrary", "async-trait", @@ -5591,7 +5591,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arbitrary", "async-trait", @@ -5619,7 +5619,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "async-trait", "bytes", @@ -5638,7 +5638,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "bitflags 2.3.3", "byteorder", @@ -5658,7 +5658,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "bindgen 0.65.1", "cc", @@ -5667,7 +5667,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "futures", "metrics 0.20.1", @@ -5677,7 +5677,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "metrics 0.20.1", "once_cell", @@ -5691,7 +5691,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "pin-project", "reth-primitives", @@ -5700,7 +5700,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "igd", "pin-project-lite", @@ -5714,7 +5714,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "aquamarine", "async-trait", @@ -5764,7 +5764,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "async-trait", "reth-eth-wire", @@ -5777,7 +5777,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "futures-util", "reth-interfaces", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arbitrary", "assert_matches", @@ -5845,7 +5845,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "auto_impl", "derive_more", @@ -5866,7 +5866,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -5883,7 +5883,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "once_cell", "reth-consensus-common", @@ -5899,7 +5899,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "boa_engine", "boa_gc", @@ -5915,7 +5915,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "reth-primitives", "revm", @@ -5923,7 +5923,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "arrayvec", "auto_impl", @@ -5942,7 +5942,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -5951,7 +5951,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "async-trait", @@ -5998,7 +5998,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "jsonrpsee", "reth-primitives", @@ -6008,7 +6008,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "async-trait", "futures", @@ -6022,7 +6022,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "hyper", "jsonrpsee", @@ -6053,7 +6053,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "assert_matches", "async-trait", @@ -6075,7 +6075,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "itertools 0.11.0", "jsonrpsee-types", @@ -6090,7 +6090,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "aquamarine", "assert_matches", @@ -6126,7 +6126,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "dyn-clone", "futures-util", @@ -6139,7 +6139,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "tracing", "tracing-appender", @@ -6149,7 +6149,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "aquamarine", "assert_matches", @@ -6178,7 +6178,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" dependencies = [ "criterion", "derive_more", diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 56d4c0f39233..5c9bedc41083 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1435,9 +1435,8 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HashingWriter for DatabaseProvider let accounts = self.basic_accounts(lists)?; let hashed_addresses = self.insert_account_for_hashing(accounts)?; for (hashed_address, account) in hashed_addresses { - if account.is_some() { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - } else { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { destroyed_accounts.insert(hashed_address); } } From ba7fa1a4cab393e99cf1de5a7e69ec04db43ff57 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Wed, 9 Aug 2023 17:28:37 +0530 Subject: [PATCH 379/722] standalone rpc_types (#4088) Co-authored-by: Matthias Seitz --- Cargo.lock | 76 ++++++---- Cargo.toml | 4 +- crates/rpc/rpc-types-compat/Cargo.toml | 16 ++ crates/rpc/rpc-types-compat/src/block.rs | 114 ++++++++++++++ crates/rpc/rpc-types-compat/src/lib.rs | 21 +++ .../rpc-types-compat/src/transaction/mod.rs | 126 ++++++++++++++++ crates/rpc/rpc-types/Cargo.toml | 1 - crates/rpc/rpc-types/src/eth/block.rs | 118 +-------------- .../rpc/rpc-types/src/eth/transaction/mod.rs | 142 +----------------- crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/eth/api/block.rs | 15 +- crates/rpc/rpc/src/eth/api/transactions.rs | 8 +- crates/rpc/rpc/src/eth/pubsub.rs | 4 +- crates/rpc/rpc/src/txpool.rs | 2 +- 14 files changed, 349 insertions(+), 299 deletions(-) create mode 100644 crates/rpc/rpc-types-compat/Cargo.toml create mode 100644 crates/rpc/rpc-types-compat/src/block.rs create mode 100644 crates/rpc/rpc-types-compat/src/lib.rs create mode 100644 crates/rpc/rpc-types-compat/src/transaction/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 65fe6644c353..a11dd46e99a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,7 +723,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "bitflags 2.3.3", "boa_interner", @@ -736,7 +736,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -774,7 +774,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +798,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "boa_gc", "boa_macros", @@ -813,7 +813,7 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -824,7 +824,7 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -844,7 +844,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#c2df31b781c115d6bdb5de64979d247af329ffcd" +source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" [[package]] name = "brotli" @@ -989,9 +989,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6b2562119bf28c3439f7f02db99faf0aa1a8cdfe5772a2ee155d32227239f0" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" dependencies = [ "jobserver", "libc", @@ -1084,9 +1084,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.19" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d" +checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" dependencies = [ "clap_builder", "clap_derive", @@ -1095,9 +1095,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.19" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1" +checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" dependencies = [ "anstream", "anstyle", @@ -1635,9 +1635,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "zeroize", @@ -1810,7 +1810,7 @@ dependencies = [ [[package]] name = "discv5" version = "0.3.1" -source = "git+https://github.com/sigp/discv5#a9f1e99321aec746fb9d6e8df889aa515a5e1254" +source = "git+https://github.com/sigp/discv5#1439decd4e7d7c9de78ef61b5d67be3fee688510" dependencies = [ "aes 0.7.5", "aes-gcm", @@ -4565,18 +4565,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -4585,9 +4585,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" [[package]] name = "pin-utils" @@ -5978,6 +5978,7 @@ dependencies = [ "reth-rpc-api", "reth-rpc-engine-api", "reth-rpc-types", + "reth-rpc-types-compat", "reth-tasks", "reth-transaction-pool", "revm", @@ -6088,6 +6089,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-rpc-types-compat" +version = "0.1.0-alpha.5" +dependencies = [ + "reth-primitives", + "reth-rlp", + "reth-rpc-types", +] + [[package]] name = "reth-stages" version = "0.1.0-alpha.5" @@ -6685,9 +6695,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.181" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3e73c93c3240c0bda063c239298e633114c69a888c3e37ca8bb33f343e9890" +checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" dependencies = [ "serde_derive", ] @@ -6705,9 +6715,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.181" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be02f6cb0cd3a5ec20bbcfbcbd749f57daddb1a0882dc2e46a6c236c90b977ed" +checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -7149,7 +7159,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ - "strum_macros 0.25.1", + "strum_macros 0.25.2", ] [[package]] @@ -7167,9 +7177,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232" +checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" dependencies = [ "heck", "proc-macro2 1.0.66", @@ -7304,9 +7314,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.7.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ "cfg-if", "fastrand 2.0.0", @@ -7352,7 +7362,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", "serde", - "strum_macros 0.25.1", + "strum_macros 0.25.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index aac12498b577..ad0431f2ac0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,7 @@ members = [ "crates/transaction-pool", "crates/trie", "testing/ef-tests", - + "crates/rpc/rpc-types-compat", "examples", "examples/additional-rpc-namespace-in-cli", ] @@ -102,6 +102,8 @@ reth-transaction-pool = { path = "./crates/transaction-pool" } reth-tasks = { path = "./crates/tasks" } reth-network = { path = "./crates/net/network" } reth-network-api = { path = "./crates/net/network-api" } +reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat"} + ## eth ethers-core = { version = "2.0.8", default-features = false } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml new file mode 100644 index 000000000000..a7997e34102e --- /dev/null +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "reth-rpc-types-compat" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = """ +Compatibility layer for reth-primitives and ethereum RPC types +""" + +[dependencies] +reth-primitives.workspace = true +reth-rpc-types.workspace = true +reth-rlp.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs new file mode 100644 index 000000000000..6551124d9f74 --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -0,0 +1,114 @@ +//! Compatibility functions for rpc `Block` type. + +use crate::transaction::from_recovered_with_block_context; +use reth_primitives::{Block as PrimitiveBlock, Header as PrimitiveHeader, H256, U256}; +use reth_rlp::Encodable; +use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; + +/// Converts the given primitive block into a [Block] response with the given +/// [BlockTransactionsKind] +/// +/// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. +pub fn from_block( + block: PrimitiveBlock, + total_difficulty: U256, + kind: BlockTransactionsKind, + block_hash: Option, +) -> Result { + match kind { + BlockTransactionsKind::Hashes => { + Ok(from_block_with_tx_hashes(block, total_difficulty, block_hash)) + } + BlockTransactionsKind::Full => from_block_full(block, total_difficulty, block_hash), + } +} + +/// Create a new [Block] response from a [primitive block](reth_primitives::Block), using the +/// total difficulty to populate its field in the rpc response. +/// +/// This will populate the `transactions` field with only the hashes of the transactions in the +/// block: [BlockTransactions::Hashes] +pub fn from_block_with_tx_hashes( + block: PrimitiveBlock, + total_difficulty: U256, + block_hash: Option, +) -> Block { + let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); + let transactions = block.body.iter().map(|tx| tx.hash()).collect(); + + from_block_with_transactions( + block_hash, + block, + total_difficulty, + BlockTransactions::Hashes(transactions), + ) +} + +/// Create a new [Block] response from a [primitive block](reth_primitives::Block), using the +/// total difficulty to populate its field in the rpc response. +/// +/// This will populate the `transactions` field with the _full_ +/// [Transaction](reth_rpc_types::Transaction) objects: [BlockTransactions::Full] +pub fn from_block_full( + block: PrimitiveBlock, + total_difficulty: U256, + block_hash: Option, +) -> Result { + let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); + let block_number = block.number; + let mut transactions = Vec::with_capacity(block.body.len()); + for (idx, tx) in block.body.iter().enumerate() { + let signed_tx = tx.clone().into_ecrecovered().ok_or(BlockError::InvalidSignature)?; + transactions.push(from_recovered_with_block_context( + signed_tx, + block_hash, + block_number, + block.base_fee_per_gas, + U256::from(idx), + )) + } + + Ok(from_block_with_transactions( + block_hash, + block, + total_difficulty, + BlockTransactions::Full(transactions), + )) +} + +fn from_block_with_transactions( + block_hash: H256, + block: PrimitiveBlock, + total_difficulty: U256, + transactions: BlockTransactions, +) -> Block { + let block_length = block.length(); + let uncles = block.ommers.into_iter().map(|h| h.hash_slow()).collect(); + let header = Header::from_primitive_with_hash(block.header.seal(block_hash)); + let withdrawals = if header.withdrawals_root.is_some() { block.withdrawals } else { None }; + Block { + header, + uncles, + transactions, + total_difficulty: Some(total_difficulty), + size: Some(U256::from(block_length)), + withdrawals, + } +} + +/// Build an RPC block response representing +/// an Uncle from its header. +pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { + let hash = header.hash_slow(); + let rpc_header = Header::from_primitive_with_hash(header.clone().seal(hash)); + let uncle_block = PrimitiveBlock { header, ..Default::default() }; + let size = Some(U256::from(uncle_block.length())); + Block { + uncles: vec![], + header: rpc_header, + transactions: BlockTransactions::Uncle, + withdrawals: Some(vec![]), + size, + total_difficulty: None, + } +} diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs new file mode 100644 index 000000000000..b4628cfcbab1 --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -0,0 +1,21 @@ +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/" +)] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, unused_crate_dependencies)] +#![deny(unused_must_use, rust_2018_idioms)] +#![doc(test( + no_crate_inject, + attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) +))] + +//! Reth compatibility and utils for RPC types +//! +//! This crate various helper functions to convert between reth primitive types and rpc types. + +pub mod block; +pub use block::*; +pub mod transaction; +pub use transaction::*; diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs new file mode 100644 index 000000000000..02026b04fbec --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -0,0 +1,126 @@ +//! Compatibility functions for rpc `Transaction` type. + +use reth_primitives::{ + AccessListItem, BlockNumber, Transaction as PrimitiveTransaction, + TransactionKind as PrimitiveTransactionKind, TransactionSignedEcRecovered, TxType, H256, U128, + U256, U64, +}; +use reth_rpc_types::{Signature, Transaction}; + +/// Create a new rpc transaction result for a mined transaction, using the given block hash, +/// number, and tx index fields to populate the corresponding fields in the rpc result. +/// +/// The block hash, number, and tx index fields should be from the original block where the +/// transaction was mined. +pub fn from_recovered_with_block_context( + tx: TransactionSignedEcRecovered, + block_hash: H256, + block_number: BlockNumber, + base_fee: Option, + tx_index: U256, +) -> Transaction { + fill(tx, Some(block_hash), Some(block_number), base_fee, Some(tx_index)) +} + +/// Create a new rpc transaction result for a _pending_ signed transaction, setting block +/// environment related fields to `None`. +pub fn from_recovered(tx: TransactionSignedEcRecovered) -> Transaction { + fill(tx, None, None, None, None) +} + +/// Create a new rpc transaction result for a _pending_ signed transaction, setting block +/// environment related fields to `None`. +fn fill( + tx: TransactionSignedEcRecovered, + block_hash: Option, + block_number: Option, + base_fee: Option, + transaction_index: Option, +) -> Transaction { + let signer = tx.signer(); + let signed_tx = tx.into_signed(); + + let to = match signed_tx.kind() { + PrimitiveTransactionKind::Create => None, + PrimitiveTransactionKind::Call(to) => Some(*to), + }; + + let (gas_price, max_fee_per_gas) = match signed_tx.tx_type() { + TxType::Legacy => (Some(U128::from(signed_tx.max_fee_per_gas())), None), + TxType::EIP2930 => (Some(U128::from(signed_tx.max_fee_per_gas())), None), + TxType::EIP1559 | TxType::EIP4844 => { + // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + + // baseFee` + let gas_price = base_fee + .and_then(|base_fee| { + signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) + }) + .unwrap_or_else(|| signed_tx.max_fee_per_gas()); + + (Some(U128::from(gas_price)), Some(U128::from(signed_tx.max_fee_per_gas()))) + } + }; + + let chain_id = signed_tx.chain_id().map(U64::from); + let access_list = match &signed_tx.transaction { + PrimitiveTransaction::Legacy(_) => None, + PrimitiveTransaction::Eip2930(tx) => Some( + tx.access_list + .0 + .iter() + .map(|item| AccessListItem { + address: item.address.0.into(), + storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), + }) + .collect(), + ), + PrimitiveTransaction::Eip1559(tx) => Some( + tx.access_list + .0 + .iter() + .map(|item| AccessListItem { + address: item.address.0.into(), + storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), + }) + .collect(), + ), + PrimitiveTransaction::Eip4844(tx) => Some( + tx.access_list + .0 + .iter() + .map(|item| AccessListItem { + address: item.address.0.into(), + storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), + }) + .collect(), + ), + }; + + let signature = Signature::from_primitive_signature( + *signed_tx.signature(), + signed_tx.tx_type(), + signed_tx.chain_id(), + ); + + Transaction { + hash: signed_tx.hash(), + nonce: U256::from(signed_tx.nonce()), + from: signer, + to, + value: U256::from(signed_tx.value()), + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas().map(U128::from), + signature: Some(signature), + gas: U256::from(signed_tx.gas_limit()), + input: signed_tx.input().clone(), + chain_id, + access_list, + transaction_type: Some(U64::from(signed_tx.tx_type() as u8)), + + // These fields are set to None because they are not stored as part of the transaction + block_hash, + block_number: block_number.map(U256::from), + transaction_index, + } +} diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 31a09d60b5f4..3cce6c1a8417 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -14,7 +14,6 @@ Reth RPC types # reth reth-primitives.workspace = true reth-rlp.workspace = true - # errors thiserror.workspace = true diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index a3762aef76d1..5588d55611f6 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -1,13 +1,11 @@ //! Contains types that represent ethereum types in [reth_primitives] when used in RPC use crate::Transaction; use reth_primitives::{ - Address, Block as PrimitiveBlock, Bloom, Bytes, Header as PrimitiveHeader, SealedHeader, - Withdrawal, H256, H64, U256, U64, + Address, Bloom, Bytes, Header as PrimitiveHeader, SealedHeader, Withdrawal, H256, H64, U256, + U64, }; -use reth_rlp::Encodable; use serde::{ser::Error, Deserialize, Serialize, Serializer}; use std::{collections::BTreeMap, ops::Deref}; - /// Block Transactions depending on the boolean attribute of `eth_getBlockBy*`, /// or if used by `eth_getUncle*` #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -84,118 +82,6 @@ pub struct Block { pub withdrawals: Option>, } -impl Block { - /// Converts the given primitive block into a [Block] response with the given - /// [BlockTransactionsKind] - /// - /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. - pub fn from_block( - block: PrimitiveBlock, - total_difficulty: U256, - kind: BlockTransactionsKind, - block_hash: Option, - ) -> Result { - match kind { - BlockTransactionsKind::Hashes => { - Ok(Self::from_block_with_tx_hashes(block, total_difficulty, block_hash)) - } - BlockTransactionsKind::Full => { - Self::from_block_full(block, total_difficulty, block_hash) - } - } - } - - /// Create a new [Block] response from a [primitive block](reth_primitives::Block), using the - /// total difficulty to populate its field in the rpc response. - /// - /// This will populate the `transactions` field with only the hashes of the transactions in the - /// block: [BlockTransactions::Hashes] - pub fn from_block_with_tx_hashes( - block: PrimitiveBlock, - total_difficulty: U256, - block_hash: Option, - ) -> Self { - let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.iter().map(|tx| tx.hash()).collect(); - - Self::from_block_with_transactions( - block_hash, - block, - total_difficulty, - BlockTransactions::Hashes(transactions), - ) - } - - /// Create a new [Block] response from a [primitive block](reth_primitives::Block), using the - /// total difficulty to populate its field in the rpc response. - /// - /// This will populate the `transactions` field with the _full_ [Transaction] objects: - /// [BlockTransactions::Full] - pub fn from_block_full( - block: PrimitiveBlock, - total_difficulty: U256, - block_hash: Option, - ) -> Result { - let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let block_number = block.number; - let mut transactions = Vec::with_capacity(block.body.len()); - for (idx, tx) in block.body.iter().enumerate() { - let signed_tx = tx.clone().into_ecrecovered().ok_or(BlockError::InvalidSignature)?; - transactions.push(Transaction::from_recovered_with_block_context( - signed_tx, - block_hash, - block_number, - block.base_fee_per_gas, - U256::from(idx), - )) - } - - Ok(Self::from_block_with_transactions( - block_hash, - block, - total_difficulty, - BlockTransactions::Full(transactions), - )) - } - - fn from_block_with_transactions( - block_hash: H256, - block: PrimitiveBlock, - total_difficulty: U256, - transactions: BlockTransactions, - ) -> Self { - let block_length = block.length(); - let uncles = block.ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_primitive_with_hash(block.header.seal(block_hash)); - let withdrawals = if header.withdrawals_root.is_some() { block.withdrawals } else { None }; - Self { - header, - uncles, - transactions, - total_difficulty: Some(total_difficulty), - size: Some(U256::from(block_length)), - withdrawals, - } - } - - /// Build an RPC block response representing - /// an Uncle from its header. - pub fn uncle_block_from_header(header: PrimitiveHeader) -> Self { - let hash = header.hash_slow(); - let rpc_header = Header::from_primitive_with_hash(header.clone().seal(hash)); - let uncle_block = PrimitiveBlock { header, ..Default::default() }; - let size = Some(U256::from(uncle_block.length())); - Self { - uncles: vec![], - header: rpc_header, - transactions: BlockTransactions::Uncle, - withdrawals: Some(vec![]), - size, - total_difficulty: None, - } - } -} - /// Block header representation. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index 734d33f1d22c..1e8b09aecb1e 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -1,21 +1,16 @@ -mod common; -mod receipt; -mod request; -mod signature; -mod typed; - pub use common::TransactionInfo; pub use receipt::TransactionReceipt; pub use request::TransactionRequest; +use reth_primitives::{AccessListItem, Address, Bytes, H256, U128, U256, U64}; +use serde::{Deserialize, Serialize}; pub use signature::Signature; pub use typed::*; -use reth_primitives::{ - AccessListItem, Address, BlockNumber, Bytes, Transaction as PrimitiveTransaction, - TransactionKind as PrimitiveTransactionKind, TransactionSignedEcRecovered, TxType, H256, U128, - U256, U64, -}; -use serde::{Deserialize, Serialize}; +mod common; +mod receipt; +mod request; +mod signature; +mod typed; /// Transaction object used in RPC #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -70,131 +65,10 @@ pub struct Transaction { pub transaction_type: Option, } -impl Transaction { - /// Create a new rpc transaction result for a mined transaction, using the given block hash, - /// number, and tx index fields to populate the corresponding fields in the rpc result. - /// - /// The block hash, number, and tx index fields should be from the original block where the - /// transaction was mined. - pub fn from_recovered_with_block_context( - tx: TransactionSignedEcRecovered, - block_hash: H256, - block_number: BlockNumber, - base_fee: Option, - tx_index: U256, - ) -> Self { - Self::fill(tx, Some(block_hash), Some(block_number), base_fee, Some(tx_index)) - } - - /// Create a new rpc transaction result for a _pending_ signed transaction, setting block - /// environment related fields to `None`. - pub fn from_recovered(tx: TransactionSignedEcRecovered) -> Self { - Self::fill(tx, None, None, None, None) - } - - /// Create a new rpc transaction result for a _pending_ signed transaction, setting block - /// environment related fields to `None`. - fn fill( - tx: TransactionSignedEcRecovered, - block_hash: Option, - block_number: Option, - base_fee: Option, - transaction_index: Option, - ) -> Self { - let signer = tx.signer(); - let signed_tx = tx.into_signed(); - - let to = match signed_tx.kind() { - PrimitiveTransactionKind::Create => None, - PrimitiveTransactionKind::Call(to) => Some(*to), - }; - - let (gas_price, max_fee_per_gas) = match signed_tx.tx_type() { - TxType::Legacy => (Some(U128::from(signed_tx.max_fee_per_gas())), None), - TxType::EIP2930 => (Some(U128::from(signed_tx.max_fee_per_gas())), None), - TxType::EIP1559 | TxType::EIP4844 => { - // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + - // baseFee` - let gas_price = base_fee - .and_then(|base_fee| { - signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) - }) - .unwrap_or_else(|| signed_tx.max_fee_per_gas()); - - (Some(U128::from(gas_price)), Some(U128::from(signed_tx.max_fee_per_gas()))) - } - }; - - let chain_id = signed_tx.chain_id().map(U64::from); - let access_list = match &signed_tx.transaction { - PrimitiveTransaction::Legacy(_) => None, - PrimitiveTransaction::Eip2930(tx) => Some( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ), - PrimitiveTransaction::Eip1559(tx) => Some( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ), - PrimitiveTransaction::Eip4844(tx) => Some( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ), - }; - - let signature = Signature::from_primitive_signature( - *signed_tx.signature(), - signed_tx.tx_type(), - signed_tx.chain_id(), - ); - - Self { - hash: signed_tx.hash(), - nonce: U256::from(signed_tx.nonce()), - from: signer, - to, - value: U256::from(signed_tx.value()), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas().map(U128::from), - signature: Some(signature), - gas: U256::from(signed_tx.gas_limit()), - input: signed_tx.input().clone(), - chain_id, - access_list, - transaction_type: Some(U64::from(signed_tx.tx_type() as u8)), - - // These fields are set to None because they are not stored as part of the transaction - block_hash, - block_number: block_number.map(U256::from), - transaction_index, - } - } -} - #[cfg(test)] mod tests { - use crate::eth::transaction::signature::Parity; - use super::*; + use crate::eth::transaction::signature::Parity; #[test] fn serde_transaction() { diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 1791821f1ce3..756fefd8a1e3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -24,6 +24,7 @@ reth-revm = { path = "../../revm" } reth-tasks.workspace = true reth-metrics.workspace = true reth-consensus-common = { path = "../../consensus/common" } +reth-rpc-types-compat.workspace = true # eth revm = { workspace = true, features = [ diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index ab297add7114..f575998fc385 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -9,10 +9,12 @@ use crate::{ }; use reth_network_api::NetworkInfo; use reth_primitives::{BlockId, BlockNumberOrTag, TransactionMeta}; + use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_types::{Block, Index, RichBlock, TransactionReceipt}; -use reth_transaction_pool::TransactionPool; +use reth_rpc_types::{Index, RichBlock, TransactionReceipt}; +use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; +use reth_transaction_pool::TransactionPool; impl EthApi where Provider: @@ -47,10 +49,8 @@ where .unwrap_or_default(); let index = usize::from(index); - let uncle = uncles - .into_iter() - .nth(index) - .map(|header| Block::uncle_block_from_header(header).into()); + let uncle = + uncles.into_iter().nth(index).map(|header| uncle_block_from_header(header).into()); Ok(uncle) } @@ -160,8 +160,7 @@ where .provider() .header_td_by_number(block.number)? .ok_or(EthApiError::UnknownBlockNumber)?; - let block = - Block::from_block(block.into(), total_difficulty, full.into(), Some(block_hash))?; + let block = from_block(block.into(), total_difficulty, full.into(), Some(block_hash))?; Ok(Some(block.into())) } } diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 6784b62ec0c8..770a9e8c1e15 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -19,6 +19,8 @@ use reth_primitives::{ TransactionKind::{Call, Create}, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, H256, U128, U256, U64, }; +use reth_rpc_types_compat::from_recovered_with_block_context; + use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; @@ -732,7 +734,7 @@ where if let Some(tx_signed) = block.body.into_iter().nth(index.into()) { let tx = tx_signed.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; - return Ok(Some(Transaction::from_recovered_with_block_context( + return Ok(Some(from_recovered_with_block_context( tx, block_hash, block.header.number, @@ -820,9 +822,9 @@ impl From for TransactionSignedEcRecovered { impl From for Transaction { fn from(value: TransactionSource) -> Self { match value { - TransactionSource::Pool(tx) => Transaction::from_recovered(tx), + TransactionSource::Pool(tx) => reth_rpc_types_compat::transaction::from_recovered(tx), TransactionSource::Block { transaction, index, block_hash, block_number, base_fee } => { - Transaction::from_recovered_with_block_context( + from_recovered_with_block_context( transaction, block_hash, block_number, diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index d5c0231ef849..4d71d3810dff 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -11,7 +11,7 @@ use reth_rpc_types::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - FilteredParams, Header, Log, Transaction, + FilteredParams, Header, Log, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; @@ -128,7 +128,7 @@ where // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().map(|tx| { EthSubscriptionResult::FullTransaction(Box::new( - Transaction::from_recovered( + reth_rpc_types_compat::transaction::from_recovered( tx.transaction.to_recovered_transaction(), ), )) diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 8ca4c13729a8..6458b015fb4d 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -39,7 +39,7 @@ where let entry = content.entry(tx.sender()).or_default(); let key = tx.nonce().to_string(); let tx = tx.to_recovered_transaction(); - let tx = Transaction::from_recovered(tx); + let tx = reth_rpc_types_compat::transaction::from_recovered(tx); entry.insert(key, tx); } From 6c90ec537dca94f1f096f024f3c483006802f25e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 9 Aug 2023 07:59:14 -0400 Subject: [PATCH 380/722] feat: add BlobTransaction network type (#4102) --- crates/net/eth-wire/src/types/transactions.rs | 47 ++++++++++++++++++- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire/src/types/transactions.rs b/crates/net/eth-wire/src/types/transactions.rs index 7f38c0f69235..9df994004d6d 100644 --- a/crates/net/eth-wire/src/types/transactions.rs +++ b/crates/net/eth-wire/src/types/transactions.rs @@ -1,7 +1,10 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. use reth_codecs::derive_arbitrary; -use reth_primitives::{TransactionSigned, H256}; -use reth_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; +use reth_primitives::{ + kzg::{self, Blob, Bytes48, KzgProof, KzgSettings}, + TransactionSigned, H256, +}; +use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -51,6 +54,46 @@ impl From for Vec { } } +/// A response to [`GetPooledTransactions`] that includes blob data, their commitments, and their +/// corresponding proofs. +/// +/// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element +/// of a [PooledTransactions] response. +// TODO: derive_arbitrary +#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] +pub struct BlobTransaction { + /// The transaction payload. + pub transaction: TransactionSigned, + /// The transaction's blob data. + pub blobs: Vec, + /// The transaction's blob commitments. + pub commitments: Vec, + /// The transaction's blob proofs. + pub proofs: Vec, +} + +impl BlobTransaction { + /// Verifies that the transaction's blob data, commitments, and proofs are all valid. + /// + /// Takes as input the [KzgSettings], which should contain the the parameters derived from the + /// KZG trusted setup. + /// + /// This ensures that the blob transaction payload has the same number of blob data elements, + /// commitments, and proofs. Each blob data element is verified against its commitment and + /// proof. + /// + /// Returns `false` if any blob KZG proof in the response fails to verify. + pub fn validate(&self, proof_settings: &KzgSettings) -> Result { + // Verify as a batch + KzgProof::verify_blob_kzg_proof_batch( + self.blobs.as_slice(), + self.commitments.as_slice(), + self.proofs.as_slice(), + proof_settings, + ) + } +} + #[cfg(test)] mod test { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; From 517f86aa2f3595e291fe741ef67583a17543dfe0 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:08:45 +0200 Subject: [PATCH 381/722] ask for confirmation during reth db drop (#4118) --- bin/reth/src/db/mod.rs | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index a56ff9d9a1ff..9671f8d795ba 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -15,7 +15,10 @@ use reth_db::{ Tables, }; use reth_primitives::ChainSpec; -use std::sync::Arc; +use std::{ + io::{self, Write}, + sync::Arc, +}; mod clear; mod diff; @@ -74,7 +77,11 @@ pub enum Subcommands { /// Gets the content of a table for the given key Get(get::Command), /// Deletes all database entries - Drop, + Drop { + /// Bypasses the interactive confirmation and drops the database directly + #[arg(short, long)] + force: bool, + }, /// Deletes all table entries Clear(clear::Command), /// Lists current and local database versions @@ -178,7 +185,22 @@ impl Command { let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } - Subcommands::Drop => { + Subcommands::Drop { force } => { + if !force { + // Ask for confirmation + print!("Are you sure you want to drop the database? This cannot be undone. (y/N): "); + // Flush the buffer to ensure the message is printed immediately + io::stdout().flush().unwrap(); + + let mut input = String::new(); + io::stdin().read_line(&mut input).expect("Failed to read line"); + + if !input.trim().eq_ignore_ascii_case("y") { + println!("Database drop aborted!"); + return Ok(()) + } + } + let db = open_db(&db_path, self.db.log_level)?; let mut tool = DbTool::new(&db, self.chain.clone())?; tool.drop(db_path)?; From 6581961d3f4f861df0cdc1b6b4f8f49ba70267df Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 9 Aug 2023 15:17:19 +0300 Subject: [PATCH 382/722] chore: disable eta for headers & bodies (#4065) --- bin/reth/src/node/events.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/node/events.rs b/bin/reth/src/node/events.rs index cc8fe657b54b..d6f835db6b5f 100644 --- a/bin/reth/src/node/events.rs +++ b/bin/reth/src/node/events.rs @@ -66,7 +66,7 @@ impl NodeState { stage = %stage_id, from = self.current_checkpoint.block_number, checkpoint = %self.current_checkpoint, - eta = %self.eta, + eta = %self.eta.fmt_for_stage(stage_id), "Executing stage", ); } @@ -85,7 +85,7 @@ impl NodeState { stage = %stage_id, block = checkpoint.block_number, %checkpoint, - eta = %self.eta, + eta = %self.eta.fmt_for_stage(stage_id), "{}", if done { "Stage finished executing" @@ -226,13 +226,13 @@ where let mut this = self.project(); while this.info_interval.poll_tick(cx).is_ready() { - if let Some(stage) = this.state.current_stage.map(|id| id.to_string()) { + if let Some(stage_id) = this.state.current_stage { info!( target: "reth::cli", connected_peers = this.state.num_connected_peers(), - %stage, + stage = %stage_id.to_string(), checkpoint = %this.state.current_checkpoint, - eta = %this.state.eta, + eta = %this.state.eta.fmt_for_stage(stage_id), "Status" ); } else { @@ -299,6 +299,18 @@ impl Eta { self.last_checkpoint = current; self.last_checkpoint_time = Some(Instant::now()); } + + /// Format ETA for a given stage. + /// + /// NOTE: Currently ETA is disabled for Headers and Bodies stages until we find better + /// heuristics for calculation. + fn fmt_for_stage(&self, stage: StageId) -> String { + if matches!(stage, StageId::Headers | StageId::Bodies) { + String::from("unknown") + } else { + format!("{}", self) + } + } } impl std::fmt::Display for Eta { From 31d1288d402817c16c404b5f625d8ed37d9bef16 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 9 Aug 2023 13:30:44 +0100 Subject: [PATCH 383/722] feat: adds `arbitratry` to `BlobTransaction` and `KZG_TRUSTED_SETUP` (#4116) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/net/eth-wire/src/types/transactions.rs | 92 +- crates/primitives/Cargo.toml | 1 + .../primitives/res/eip4844/trusted_setup.txt | 4163 +++++++++++++++++ crates/primitives/src/constants/eip4844.rs | 14 + 5 files changed, 4269 insertions(+), 2 deletions(-) create mode 100644 crates/primitives/res/eip4844/trusted_setup.txt diff --git a/Cargo.lock b/Cargo.lock index a11dd46e99a7..8d15a79d1ac3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5831,6 +5831,7 @@ dependencies = [ "serde_with", "strum 0.25.0", "sucds", + "tempfile", "test-fuzz", "thiserror", "tiny-keccak", diff --git a/crates/net/eth-wire/src/types/transactions.rs b/crates/net/eth-wire/src/types/transactions.rs index 9df994004d6d..35a8a7649127 100644 --- a/crates/net/eth-wire/src/types/transactions.rs +++ b/crates/net/eth-wire/src/types/transactions.rs @@ -1,5 +1,5 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. -use reth_codecs::derive_arbitrary; +use reth_codecs::{add_arbitrary_tests, derive_arbitrary}; use reth_primitives::{ kzg::{self, Blob, Bytes48, KzgProof, KzgSettings}, TransactionSigned, H256, @@ -9,6 +9,19 @@ use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrap #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(any(test, feature = "arbitrary"))] +use proptest::{ + arbitrary::{any as proptest_any, ParamsFor}, + collection::vec as proptest_vec, + strategy::{BoxedStrategy, Strategy}, +}; + +#[cfg(any(test, feature = "arbitrary"))] +use reth_primitives::{ + constants::eip4844::{FIELD_ELEMENTS_PER_BLOB, KZG_TRUSTED_SETUP}, + kzg::{KzgCommitment, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}, +}; + /// A list of transaction hashes that the peer would like transaction bodies for. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -59,7 +72,7 @@ impl From for Vec { /// /// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element /// of a [PooledTransactions] response. -// TODO: derive_arbitrary +#[add_arbitrary_tests(rlp, 20)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] pub struct BlobTransaction { /// The transaction payload. @@ -94,6 +107,81 @@ impl BlobTransaction { } } +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for BlobTransaction { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let mut arr = [0u8; BYTES_PER_BLOB]; + let blobs: Vec = (0..u.int_in_range(1..=16)?) + .map(|_| { + arr = arbitrary::Arbitrary::arbitrary(u).unwrap(); + + // Ensure that the blob is canonical by ensuring that + // each field element contained in the blob is < BLS_MODULUS + for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { + arr[i * BYTES_PER_FIELD_ELEMENT] = 0; + } + Blob::from(arr) + }) + .collect(); + + Ok(generate_blob_transaction(blobs, TransactionSigned::arbitrary(u)?)) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl proptest::arbitrary::Arbitrary for BlobTransaction { + type Parameters = ParamsFor; + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + ( + proptest_vec(proptest_vec(proptest_any::(), BYTES_PER_BLOB), 1..=5), + proptest_any::(), + ) + .prop_map(move |(blobs, tx)| { + let blobs = blobs + .into_iter() + .map(|mut blob| { + let mut arr = [0u8; BYTES_PER_BLOB]; + + // Ensure that the blob is canonical by ensuring that + // each field element contained in the blob is < BLS_MODULUS + for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { + blob[i * BYTES_PER_FIELD_ELEMENT] = 0; + } + + arr.copy_from_slice(blob.as_slice()); + arr.into() + }) + .collect(); + + generate_blob_transaction(blobs, tx) + }) + .boxed() + } + + type Strategy = BoxedStrategy; +} + +#[cfg(any(test, feature = "arbitrary"))] +fn generate_blob_transaction(blobs: Vec, transaction: TransactionSigned) -> BlobTransaction { + let kzg_settings = KZG_TRUSTED_SETUP.clone(); + + let commitments: Vec = blobs + .iter() + .map(|blob| KzgCommitment::blob_to_kzg_commitment(blob.clone(), &kzg_settings).unwrap()) + .map(|commitment| commitment.to_bytes()) + .collect(); + + let proofs: Vec = blobs + .iter() + .zip(commitments.iter()) + .map(|(blob, commitment)| { + KzgProof::compute_blob_kzg_proof(blob.clone(), *commitment, &kzg_settings).unwrap() + }) + .map(|proof| proof.to_bytes()) + .collect(); + + BlobTransaction { transaction, blobs, commitments, proofs } +} #[cfg(test)] mod test { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e73b59d88590..69cf6d3e0f6e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -60,6 +60,7 @@ impl-serde = "0.4.0" once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } paste = "1.0" +tempfile = "3.3" # proof related triehash = "0.8" diff --git a/crates/primitives/res/eip4844/trusted_setup.txt b/crates/primitives/res/eip4844/trusted_setup.txt new file mode 100644 index 000000000000..26612cb88767 --- /dev/null +++ b/crates/primitives/res/eip4844/trusted_setup.txt @@ -0,0 +1,4163 @@ +4096 +65 +8d0c6eeadd3f8529d67246f77404a4ac2d9d7fd7d50cf103d3e6abb9003e5e36d8f322663ebced6707a7f46d97b7566d +a0d2392f030681c61c2a867862917e10f7678d882034bb89af3db87e6ab3883a304034643dc9688a04e41a5b831582bc +94298073048d70c74f36685e547d04b7311479daa05912e18ead64b2099a194bf48ec344273d58daf0b86b1d8f1d318d +85c4063d13499013dc2ccaa98c1606763e6b1e8cca20922d4cec12ecbaf006ea81ffabe6596d1ac7ba1daf7e63e30898 +84c64bce36c6b5145c6880113366025ab9a8f88e3948d374e27be8b8f9f87402c70fec9b3c621a2d1d26764a84370d0c +8b206c823acf5294552ee54579fac0f45ea15bd273dbacd63b88cd7cddbcce23b56e52f8ea352e1e1d7dcd9b3991b413 +b70aaa4038ba3f5ff306c647b4392d004950c53ad8f6713b5c9c21ac99f5c56cf57323dac500a1f4e9507c4746b07a2f +895f6d1fc70b52f838d81b24f4840729cd5988b649e9d6e6f6dbac4281d8818f39ebdae7e6ea139d7f98a832bd6f29f1 +a71a2832bbaade974c9ef7505dfa24e1ba466a9951b7c2db56886be31c9c7b871f3ee76cb1fcc1aab4b906d6502bc9b5 +9530ba64a21e27834609c00616bc63e8fc2dc7800e478ad728ec39c624f65bbc62cb48f59decb7fbf605ce1920d02622 +8d0609affaf8619bb2f6c80699e5bc7783becbd5973630cdd227ae52d6d701c45f4270becca97701b40279fab588cf64 +8f5d5b4c3bb8dc9a19e5a0f84df6322a79a00c7783c86254197d313a5b35d3965a1f7c0b9c4e39ec1e8f5d02d3aa0862 +96aa47a3ba20b1cfe81eb26bef503225037fdf4c9df53bea1b520841875cd1db6aa8e0f34685da08b55a3ce7289e6de0 +b4c27ee3f4b8c0031837160f0a75632f5b51b5850d52b530096443f54c2b264aeccc5c61b4fcc8de7074475f354fa0d8 +acfd735cda20be1d6f425a7886629c91732fbb5a4e0350ca740a8fb5b39f2001071cec0b2a0f6ca35e1f35a5ea18d00f +ae44d87b1d16d59504c602cbacde2c2791f1520391ca50154e6036d3953ca466cf93d6537da2adb729e6f9f4ffa87853 +97b492872ce44941ea4668ffca83b82fac0f4021bd47e0a5ffeaaacb1b3fc924ee4d53b99f7bcafe0985caf0fbe5d1d3 +b3fbe2f9103d293f49c6c6016d5913f041c9113295397388111a0fdf4245d8edd6e63b9a1a1c9c8f868d6e1988116880 +805efa08fd2046c44c427b225c17bed8a1eb3320cdf94026fdc24c6d345a6cfebfd7475f85d2d1bf22018ca72d2761d3 +9888bae0d83077d1dfde82fdffb1195565c31c519b80cba1e21aba58ee9ccb5677f74bfde13fa5723026514a7d839661 +922e19d2646ba90c9f56278bddf74621cc4518ae2f042fb8245843e87cd82724c6d7c9a99907ac6de5f2187fd2e77cbe +a38f0e1faf97dd1e0804b44e4d150dbfa48318442d1c5255eb0c14ea56b50502f3c7cb216a0336e7c140398088dc01cf +93598ea391c8735799a1d4cd0456f34994ccdf4883fad57419f634f30fee595938bc66b066dade9ae52578818c00d899 +a528dc920734cfaee9feacbc0baa5b73befb1ec6fbd422fcad09a9c1f8f8c40b5ea332b2cf04dc1d6d921e9da9ddfeb4 +b38d45316bf78d11e796a34ee535814e6cde0e642f14108329c5b21f4fec18cd61f84a3025824bb8dc4cbd26b2ecc9bf +8eec35a7404c9a35dc6ad0260b7f0f7fd1bfe92a2e08bc72548b99ed9acdc378728a8ea9c6879a6e47e37edb0d28c193 +a68a4446274ccd947c61bf736c5219dad680b99c6085a26719793e0d9dab26d5f8a0b28e71be6e1b9ea4ae39139f7f57 +a0acb543f41ad12e3b2e096629ccdd719a001d0ff53bb151e9a37aa57852f7275a7bbd06dc2a06af9144524548164af5 +b271e74cdbcf8b9143f8472174bdb068c23308ea807c60a554c185f7be6f231aac13347139837514171a876dfac5baa5 +8195a460719000cd1df379ebbf7918f71301a50a2fa587505cc5b8c4534c3d2343f63d28e7ee991d7a1cebb15d380696 +96202b60426773e8731dcbedbf613477f65940a19fb4be0f4f742b0c76ae9d88ecdb6d36cd4f12bb404dd5d360c819e2 +b0a80fe60b71ca9e80157138de8787b8a786326179604b8a15a744e52662645987e5f859ef5c76492d560daf4624b9a7 +a331ea8adf87daa5e2d458d0113c307edae1a84927bca7d484aca5f8c1b6378ab42981c44b0d916d7249f4b475f926f1 +aa1a8f59ae0912abf191ea7e209ff401628278dfb2269db6d87cf33bd52af3dbffbe96513a8b210e965c853a554b787a +ac4f4a0e1b1a155e1f22a9085b0b047fe54c8437dbbb8e9720fd6b0cdd76557d19ca2e885a48890f0247b1a72be0e287 +a428465505eac7b9660eb0d495a7a00c8cc238de3a02ebbd2eb07e502e9868086e9584b59953cf1480c0b781295db339 +b7b77e21e08f6357cbd3dcd3035c3e8ec84cdfa13c7baef6c67e0ef43095e61fd549694263d7def8b8adc3a0fdcc7987 +abb991d17c5bdd264c592c55101e265cb3210c4157aee4079173fd51da1e0199eed1d6c890aab95817ec078561d771af +846a8e4f801faf5fbec078b09c362ee30a00b2b58a4871744d03cd118b913464233ff926e52b0c75fbfcf098ad25a1e6 +947e91ffa32f38c1ccb72cca4bfabaee9e63ab74a16f034cabba25e462f7331ebe5a7ba393f69e91830415fa75b1b52e +8dc5e26adc693f4e300cab7385edca1a2fe14c8ee6dc0cd6d013cb5aa154dc380e9e81e259cbc59c1f38f7c4a57f1c7d +9818ef6605d6ea3b7bf4da5c6d6d8ed540bb94df4d14c974e1b79ed2fd1a0b897b8cf1ff671a181a697effd66b1644a5 +b5eab6baf03af994fc32cc9dce388394c18c01cdafe7909fde948f3e00a72dc8f30d15977d0f114bd7c140f5f94cf005 +83b2e9858d3b929f9a2ad66a91a2c0c44d15d288c17c12a1614301a6f2d61d31eaa540ca7781520fe4420afae0ec0208 +ab338fbd38bce4d1b7a759f71e5e5673746c52846eff3d0b6825e390aeeca8f9f123ee88c78fe4d520cc415cbae32bf1 +81adb6322b8db95d1711304e5b59f37640ca88c03e6c7e15de932be5267dff7351fa17664113ecc528e8920f5bfdc0d1 +89e2e0c0d769e4107232df741678a6bacb041d0154385450aaca8be9c3c18c42f817373962e7569d33935c35666a8a6a +8f0756fea8b34a2b471ec39e4448a6a6935e5432ec2859d222964a4c82777a340e1d702777aeb946fa405afc0438221a +a2bf90c505a6f03b3dd09d04e1e7cf301fe3415b273e263f15fdfe5d0e40f619b95e8bf00916d3eaa7d7f8c0bae41c8e +91d5c76b5542637588cd47279d0bd74a25dbda0d8ec0ff68b62d7e01e34a63fc3e06d116ee75c803864b1cf330f6c360 +a9958c388d25315a979566174b0622446335cb559aff1992bd71910c47497536019c6854d31c0e22df07505963fc44ff +91d82b09d5726077eed6c19bcb398abe79d87ce16c413df6bf5932b8fd64b4c0fd19c9bf0fa8db657a4a4d4c0d8f5a2d +ac6e0a86e0ee416855c3e9eef2526c43835f5245527ed0038bc83b4fcadb4ea5beb91143cc674486681a9f0e63f856b1 +aaf00d6efd0c6efb9f7d6a42555abec05c5af8f324e2e579fc2ac83bdc937cc682d9bc2ffd250619c8bb098b8c84db80 +963f5fcd8476d0dbeb03a62cde40e3deee25f55e7ded7572d8884975f38eddc5406fc4b0adff602a1cca90f7205a7fdc +a3805ee01512f644d2679511bd8607890ee9721e75ac9a85ab9fd6fceb1308d5b9b0e9907686b4e683b34aed0f34cd81 +a483d7708465cd4e33b4407fe82c84ef6bc7fa21475d961fe2e99802d0c999b6474ef7a46dd615b219c9c7e9faec45ee +b6b5f9456f12d6781c41f17cdc9d259f9515994d5dee49bb701a33fa2e8dcbb2c8c13f822b51ad232fc5e05bff2f68ef +8766b721b0cf9b1a42614c7d29aad2d89da4996dc9e2a3baeba4b33ca74100ab0b83f55c546c963e3b6af1dcf9ca067c +ac5e8da1154cf4be8df2bbd2e212b7f8077099b2010c99e739441198f65337c6f7ef0d9136453a7668fde6e1389c32c7 +a9d6d2c8845e5f1fec183c5153f1f6e23421e28ce0c86b0ce993b30b87869065acad9e6d9927d9f03c590852821b2f9c +a320ca07c44f7ea3ff858fe18395a86f59559617f13ec96d1e8b4a3f01d9c066a45c8d8cf8f1f14a360bb774d55f5f18 +b3adb00e1312dce73b74fbd2ea16f0fb0085bd0db10772e9c260e9ed9f8829ff690e3dfffacaddc8233d484bb69778b3 +87b0c8d8a167d5199d0b0743c20fb83ec8a1c442f0204bcc53bf292ba382bef58a58a6d1e2467920e32c290fdc6dae7c +a74fa436a5adc280a68e0c56b28ac33647bdfc8c5326f4c99db6dbd1b98d91afb1f41f5fffd6bcc31c1f8789c148e2db +8a37349e4ba7558965077f7f9d839c61b7dcb857fcc7965c76a64a75e377bfea8cd09b7a269ce602cc4472affc483b69 +8af813f62c5962ff96bf73e33f47fd5a8e3e55651d429e77d2ce64a63c535ecc5cfc749bb120c489b7ea1d9b2a5d233c +833021445b7d9817caa33d6853fa25efc38e9d62494d209627d26799432ea7b87a96de4694967151abc1252dd2d04dfc +8f78a715107e0ace3a41bff0385fd75c13bf1250f9e5ddecf39e81bacc1244b978e3464892f7fb2596957855b8bf9fc7 +aed144134dc1cc6c671f70ebe71a3aadf7511eea382969bc5d499a678d2d8ce249ebf1a06b51183f61413eba0517012b +b39a53e82c5553943a5e45bc5116d8672ec44bed96b3541dead40344b287a7b02dbf7107372effb067edd946f47de500 +b383844c3b20a8bc06098046ec6b406df9419ad86fac4a000905c01325426903a5e369af856d71ccd52fea362ed29db5 +83815a7098283723eec6aa6451b5d99578bf28a02971375a1fe90c15a20963e129372ac4af7b306ee2e7316472c5d66d +b426b4e185806a31febd745fa8d26b6397832a04e33c9a7eb460cbf302b4c134a8a01d4e5e40bc9b73296c539e60b3ca +a6cabf8205711457e6363ef4379ebc1226001e1aaea3002b25bfd9e173f4368002f4461e79eeb9f4aa46f1b56c739ab9 +a6e88ab01282313269cd2d8c0df1a79dada5b565d6623900af9e7e15351de2b0105cc55d3e9080e1e41efe48be32a622 +b2b106db3d56d189ea57afa133ae4941b4eb1dc168357af488e46811c687713fc66bbd6f8500bbd13cdb45cb82c14d1d +b3a74780ff949d19e6438db280e53632c60dc544f41320d40297fe5bb7fcee7e7931111053c30fb1ed9019ab28965b44 +8c67f32b9fdc04ec291cc0d928841ab09b08e87356e43fbbf7ac3ff0f955642628f661b6f0c8e2192a887489fddf07bb +b3be58bd628383352e6473fe9a1a27cf17242df0b1273f5867e9119e908969b9e9e7e294a83b9ea14825003cb652d80c +a867acf6ab03e50936c19a21d4040bfd97eb5a89852bd9967da0e326d67ce839937cab4e910d1149ecef9d5f1b2d8f08 +8006b19126bd49cbb40d73a99a37c2e02d6d37065bbe0cfcee888280176184964bd8f222f85960667c5b36dfaee0ee35 +ac50967b8b7840bf9d51216d68a274f1d3431c7d4031fbac75a754befbbb707c2bb184867db6b9d957f3ba0fd0a26231 +b5a794c928aff0c4271674eb0a02143ed9b4d3bc950584c7cd97b7d3c3f2e323798fd5ccc6fcc0eb2e417d87f4c542a2 +a2ca3d6509f04b37091ce6697672ee6495b42d986d75bd2d2058faa100d09fd0a145350f2d280d2cb36516171bd97dbf +92cfa293469967a9207b37cd70392312faf81b52963bfbad5f9f3da00817d26e10faf469e0e720c3bb195f23dda8c696 +a0dd5135da0a0e33fa922c623263b29518d7fa000e5beefc66faa4d6201516d058f155475c4806917a3259db4377c38a +8fc3ae8ea6231aa9afb245a0af437e88ebca2c9ab76850c731981afba90d5add0ea254053449355eccf39df55bd912ed +9727afe1f0804297717cec9dc96d2d27024a6ae6d352fee5d25377ee858ee801593df6124b79cb62ddc9235ec1ade4ac +8bcb2c53fcaa38e8e2e0fd0929bc4d9ddce73c0282c8675676950ff806cb9f56ebd398b269f9a8c2a6265b15faf25fca +a8bd9007fbbdd4b8c049d0eb7d3649bd6a3e5097372fa8ea4b8821ba955c9ef3f39ac8b19f39d3af98640c74b9595005 +92c7e851c8bd6b09dfcbfdb644725c4f65e1c3dbd111df9d85d14a0bb2d7b657eb0c7db796b42bf447b3912ef1d3b8c3 +98c499b494d5b2b8bea97d00ac3a6d826ab3045bb35424575c87117fc2a1958f3829813e266630749caf0fa6eeb76819 +8df190d71e432fe8691d843f6eb563445805c372eb5b6b064ec4e939be3e07526b5b7f5a289ede44ae6116a91357b8b1 +b5010243f7c760fb52a935f6d8ed8fc12c0c2f57db3de8bb01fdeedf7e1c87b08f3dd3c649b65751f9fd27afa6be34c7 +889c8057402cc18649f5f943aed38d6ef609b66c583f75584f3b876c1f50c5dc7d738dc7642135742e1f13fa87be46c1 +996087337f69a19a4ebe8e764acf7af8170a7ad733cd201b0e4efde6ea11039a1853e115ad11387e0fb30ab655a666d8 +902732c429e767ab895f47b2e72f7facad5ef05a72c36a5f9762c2194eb559f22845bbb87c1acc985306ecb4b4fbbf79 +8519b62a150ea805cdfc05788b8d4e797d8396a7306b41777c438c2e8b5c38839cfec5e7dc5d546b42b7b76e062982a7 +862a53ba169e6842a72763f9082ff48fbfbb63129d5a26513917c2bca9ad6362c624ce6fc973cf464f2eb4892131eb04 +b86cd67c809d75fdb9f1c9453a39870f448b138f2b4058d07a707b88bb37f29d42e33ce444f4fbe50d6be13339cae8a6 +8cf5d8365dbbafc0af192feb4fc00c181e2c3babc5d253268ef5564934555fb1e9b1d85ec46f0ca4709b7d5b27169b89 +b48f11a1809ec780bf6181fae3b8d14f8d4dc7d1721128854354be691c7fc7695d60624f84016c1cea29a02aaf28bfbc +8b46b695a08cb9a2f29ab9dd79ab8a39ec7f0086995b8685568e007cd73aa2cd650d4fae6c3fb109c35612f751ba225e +8d2f9f0a5a7de894d6c50baceb8d75c96082df1dcf893ac95f420a93acbbf910204903d2eb6012b1b0495f08aaf9992f +b334db00a770394a84ec55c1bd5440b7d9f2521029030ef3411b0c2e0a34c75c827fd629c561ea76bd21cd6cf47027f4 +96e9ff76c42bcb36f2fb7819e9123420ed5608132f7c791f95cb657a61b13041e9ba2b36f798a0fdb484878cbe015905 +99f8d701e889abd7815d43ba99e0a85776ec48311fa7cb719d049f73b5d530fa950746ffbbb7beb9e30c39d864891dc2 +98169c20df7c15d7543991f9c68e40ac66607cbd43fc6195416e40009917039357e932d6e807f3a40bc4503ad01ae80a +84bd97dd9e4e2ba75d0dee7d4418c720d4746203d847ce2bdd6ed17d492023df48d7b1de27e3f5cb8660c4bb9519ae1b +a54319e06db7f5f826277a54734a875c5b3fd2fa09d36d8b73594137aa62774b7356560157bc9e3fdf1046dc57b6006a +90cfff7cd4e7c73b84f63455d31b0d428cb5eee53e378028591478511985bcc95eb94f79ad28af5b3bed864e422d7b06 +a11c23cc8dce26ac35aea9abe911905a32616a259fa7da3a20f42dc853ad31b2634007aa110c360d3771ff19851f4fb4 +9856fbee9095074ad0568498ff45f13fe81e84ea5edaf04127d9ee7e35e730c6d23fa7f8f49d092cf06b222f94ab7f36 +818862dec89f0dc314629fffbca9b96f24dfde2d835fa8bde21b30dc99fe46d837d8f745e41b39b8cf26bfe7f338f582 +831819d41524c50d19f7720bf48f65346b42fb7955ee6ecc192f7e9fed2e7010abccdfdeac2b0c7c599bc83ac70be371 +b367e588eb96aa8a908d8cc354706fee97e092d1bc7a836dbcc97c6ed4de349643a783fb4ddf0dec85a32060318efa85 +b7aaef729befd4ab2be5ec957d7d1dbe6178de1d05c2b230d8c4b0574a3363e2d51bc54ea0279a49cc7adffa15a5a43a +ae2891d848822794ecb641e12e30701f571431821d281ceecbccaaa69b8cd8242495dc5dbf38f7d8ed98f6c6919038aa +872cf2f230d3fffce17bf6f70739084876dc13596415644d151e477ce04170d6ab5a40773557eeb3600c1ad953a0bfce +b853d0a14cef7893ba1efb8f4c0fdb61342d30fa66f8e3d2ca5208826ce1db5c8a99aa5b64c97e9d90857d53beb93d67 +910b434536cec39a2c47ca396e279afdbc997a1c0192a7d8be2ba24126b4d762b4525a94cea593a7c1f707ba39f17c0c +b6511e9dea1fbccedd7b8bb0a790a71db3999bd4e3db91be2f1e25062fae9bb4e94e50d8ec0dcc67b7a0abce985200b2 +936885c90ebe5a231d9c2eb0dfd8d08a55ecaa8e0db31c28b7416869b3cc0371448168cbec968d4d26d1cb5a16ebe541 +b71c2ac873b27fe3da67036ca546d31ca7f7a3dc13070f1530fce566e7a707daeb22b80423d505f1835fe557173754f8 +85acb64140915c940b078478b7d4dadd4d8504cde595e64f60bd6c21e426b4e422608df1ed2dd94709c190e8592c22d7 +b5831c7d7c413278070a4ef1653cec9c4c029ee27a209a6ea0ad09b299309dea70a7aef4ff9c6bdeda87dcda8fa0c318 +aa0e56e3205751b4b8f8fa2b6d68b25121f2b2468df9f1bd4ef55f236b031805a7d9fd6f3bba876c69cdba8c5ea5e05f +b021f5ae4ed50f9b53f66dd326e3f49a96f4314fc7986ace23c1f4be9955ec61d8f7c74961b5fdeabcd0b9bccbf92ce8 +88df439f485c297469e04a1d407e738e4e6ac09a7a0e14e2df66681e562fdb637a996df4b9df4e185faab8914a5cef76 +8e7ae06baa69cb23ca3575205920cb74ac3cda9eb316f4eef7b46e2bff549175a751226d5b5c65fe631a35c3f8e34d61 +99b26ff174418d1efc07dfbed70be8e0cb86ac0cec84e7524677161f519977d9ca3e2bbe76face8fe9016f994dafc0ff +a5f17fe28992be57abd2d2dcaa6f7c085522795bfdf87ba9d762a0070ad4630a42aa1e809801bc9f2a5daf46a03e0c22 +8d673c7934d0e072b9d844994f30c384e55cec8d37ce88d3ad21f8bb1c90ecc770a0eaf2945851e5dab697c3fc2814a9 +a003ed4eb401cfe08d56405442ca572f29728cfff8f682ef4d0e56dd06557750f6a9f28a20c033bc6bbb792cc76cc1a8 +8010408f845cf1185b381fed0e03c53b33b86ea4912426819d431477bd61c534df25b6d3cf40042583543093e5f4bb44 +9021a1ae2eb501134e0f51093c9f9ac7d276d10b14471b14f4a9e386256e8c155bef59973a3d81c38bdab683cd5c10e0 +a5abf269ceabbb1cf0b75d5b9c720a3d230d38f284ed787b6a05145d697a01909662a5b095269996e6fa021849d0f41f +b4b260af0a005220deb2266518d11dbc36d17e59fc7b4780ab20a813f2412ebd568b1f8adc45bf045fcbe0e60c65fd24 +b8c4cb93bedbb75d058269dfccda44ae92fe37b3ab2ef3d95c4a907e1fadf77c3db0fa5869c19843e14b122e01e5c1f4 +ac818f7cdecc7b495779d8d0ff487f23ab36a61d0cf073e11000349747537b5b77044203585a55214bb34f67ef76f2d2 +86215799c25356904611e71271327ca4882f19a889938839c80a30d319ddbe6c0f1dfa9d5523813a096048c4aef338cd +a9204889b9388bf713ca59ea35d288cd692285a34e4aa47f3751453589eb3b03a9cc49a40d82ec2c913c736752d8674d +893aecf973c862c71602ffb9f5ac7bf9c256db36e909c95fe093d871aab2499e7a248f924f72dea604de14abfc00e21c +b8882ee51cfe4acba958fa6f19102aa5471b1fbaf3c00292e474e3e2ec0d5b79af3748b7eea7489b17920ce29efc4139 +8350813d2ec66ef35f1efa6c129e2ebaedc082c5160507bcf04018e170fc0731858ad417a017dadbd9ade78015312e7f +83f6829532be8cd92f3bf1fef264ee5b7466b96e2821d097f56cbb292d605a6fb26cd3a01d4037a3b1681d8143ae54d7 +87d6258777347e4c1428ba3dcbf87fdd5113d5c30cf329e89fa3c9c1d954d031e8acacb4eed9dca8d44507c65e47e7cd +a05669a1e561b1c131b0f70e3d9fc846dc320dc0872334d07347e260d40b2e51fdbabeb0d1ae1fb89fba70af51f25a1a +819925c23fd4d851ea0eecc8c581f4a0047f5449c821d34eccc59a2911f1bd4c319dab6ece19411d028b7fdedece366b +b831b762254afd35364a04966d07b3c97e0b883c27444ff939c2ab1b649dc21ac8915b99dc6903623ed7adaae44870ac +93ec0190f47deffe74179879d3df8113a720423f5ca211d56db9654db20afe10371f3f8ec491d4e166609b9b9a82d0d4 +8f4aa6313719bcfad7ca1ed0af2d2ee10424ea303177466915839f17d2c5df84cc28fcef192cbb91bb696dd383efd3b2 +8d9c9fdf4b8b6a0a702959cf784ad43d550834e5ab2cd3bebede7773c0c755417ad2de7d25b7ff579f377f0800234b44 +99d9427c20752f89049195a91cf85e7082f9150c3b5cb66b267be44c89d41e7cc269a66dacabacadab62f2fa00cc03be +b37709d1aca976cbbf3dc4f08d9c35924d1b8b0f1c465bd92e4c8ff9708e7d045c423183b04a0e0ab4c29efd99ef6f0e +a163f42fb371b138d59c683c2a4db4ca8cbc971ae13f9a9cc39d7f253b7ee46a207b804360e05e8938c73bf3193bab55 +87a037aa558508773fc9a0b9ba18e3d368ffe47dfaf1afacee4748f72e9d3decc2f7c44b7bf0b0268873a9c2ef5fe916 +a1f20cb535cc3aebd6e738491fe3446478f7609d210af56a4004d72500b3ec2236e93446783fe628c9337bcd89c1e8e1 +9757aa358dfbba4f7116da00fe9af97f7ac6d390792ea07682b984aa853379ac525222ac8a83de802859c6dec9182ef7 +815daca1eded189ec7cb7cbc8ad443f38e6ddb3fb1301d1e5a1b02586f1329035209b7c9232dc4dff3fc546cb5ac7835 +aed86dfaf9c4f0a4b2a183f70f9041172002a773482a8ebf3d9d5f97d37ee7c6767badfda15476b3b243931235c7831c +8d032e681e89e41b29f26be02f80030fa888f6967061d2204c1ebb2279a3211d759d187bce6408c6830affa1337fb4e0 +877bff5c2db06116f918a722b26422c920aeade1efa02fa61773fca77f0ea4a7e4ee0ecaaa5cfe98044c0ff91b627588 +b9ee5310d0996a10a242738d846565bdb343a4049a24cd4868db318ea6168a32548efaf4ab84edfbf27ce8aec1be2d1c +b59f6928167323037c6296dd7697846e80a7a4b81320cfae9073ebd2002a03bdf6933e887f33ad83eda8468876c2c4fb +8167686245149dc116a175331c25301e18bb48a6627e2835ae3dd80dd373d029129c50ab2aebeaf2c2ccddc58dcc72ec +82b7dcc29803f916effb67c5ba96a1c067ed8ca43ad0e8d61a510ab067baefd4d6b49e3886b863da2de1d8f2979a4baa +b43824cd6f6872a576d64372dde466fef6decdbb5ad5db55791249fde0a483e4e40c6e1c221e923e096a038fe47dab5e +ab1e9884cf5a8444140cf4a22b9a4311a266db11b392e06c89843ac9d027729fee410560bcd35626fd8de3aad19afc4a +a0dbd92a8d955eb1d24887ca739c639bdee8493506d7344aadb28c929f9eb3b4ebaae6bd7fd9ffe8abb83d0d29091e43 +8352a47a70e343f21b55da541b8c0e35cd88731276a1550d45792c738c4d4d7dc664f447c3933daabd4dbb29bb83be4a +8ce4a1e3c4370346d6f58528a5ef1a85360d964f89e54867ba09c985c1e6c07e710a32cdda8da9fa0e3b26622d866874 +b5e356d67dd70b6f01dd6181611d89f30ea00b179ae1fa42c7eadb0b077fb52b19212b0b9a075ebd6dc62c74050b2d2f +b68f2cd1db8e4ad5efdba3c6eaa60bfcc7b51c2b0ce8bb943a4bc6968995abe8a45fe7f12434e5b0076f148d942786be +b5c7b07f80cd05c0b0840a9f634845928210433b549fb0f84a36c87bf5f7d7eb854736c4083445c952348482a300226a +8cfd9ea5185ff9779dee35efe0252957d6a74693104fb7c2ea989252a1aa99d19abaab76b2d7416eb99145c6fdb89506 +8cc8e2c5c6ddee7ef720052a39cab1ecc5e1d4c5f00fb6989731a23f6d87ac4b055abb47da7202a98c674684d103152a +8c95394c9ed45e1bf1b7cfe93b2694f6a01ff5fed8f6064e673ba3e67551829949f6885963d11860d005e6fabd5ac32c +adf00b86f4a295b607df157f14195d6b51e18e2757778fde0006289fabba8c0a4ab8fad5e3e68ddbb16ccb196cc5973f +b1714b95c4885aac0ee978e6bbabbc9596f92b8858cb953df077511d178527c462cbe1d97fdc898938bae2cd560f7b66 +adf103f4344feb6b9c8104105d64475abc697e5f805e9b08aa874e4953d56605677ef7ff4b0b97987dc47257168ae94d +b0ce6ede9edb272d8769aed7c9c7a7c9df2fb83d31cc16771f13173bcdc209daf2f35887dcca85522d5fdae39f7b8e36 +ad698d1154f7eda04e2e65f66f7fcdb7b0391f248ba37d210a18db75dafd10aedc8a4d6f9299d5b6a77964c58b380126 +904856cd3ecdbb1742239441f92d579beb5616a6e46a953cf2f1dd4a83a147679fc45270dcac3e9e3d346b46ab061757 +b600b5b521af51cdfcb75581e1eccc666a7078d6a7f49f4fdb0d73c9b2dab4ce0ecafcbd71f6dd22636e135c634ee055 +a170c5d31f6657f85078c48c7bbf11687ce032ab2ff4b9b3aee5af742baecf41ea1c2db83bcba00bccc977af7d0c5c8e +a9ef1cbb6a7acb54faf1bcbd4676cdeba36013ca5d1ac1914c3ff353954f42e152b16da2bdf4a7d423b986d62b831974 +aa706d88d3bd2ce9e992547e285788295fd3e2bbf88e329fae91e772248aa68fdfdb52f0b766746a3d7991308c725f47 +911a837dfff2062bae6bcd1fe41032e889eb397e8206cedadf888c9a427a0afe8c88dcb24579be7bfa502a40f6a8c1cc +ae80382929b7a9b6f51fe0439528a7b1a78f97a8565ba8cddb9ee4ba488f2ab710e7923443f8759a10f670087e1292c4 +b8962de382aaa844d45a882ffb7cd0cd1ab2ef073bce510a0d18a119f7a3f9088a7e06d8864a69b13dc2f66840af35ae +954538ffff65191538dca17ec1df5876cb2cd63023ff2665cc3954143e318ece7d14d64548929e939b86038f6c323fc1 +89efa770de15201a41f298020d1d6880c032e3fb8de3690d482843eb859e286acabb1a6dc001c94185494759f47a0c83 +a7a22d95b97c7c07b555764069adaa31b00b6738d853a5da0fe7dc47297d4912a0add87b14fa7db0a087a9de402ea281 +9190d60740c0813ba2ae1a7a1400fa75d6db4d5ce88b4db0626922647f0c50796a4e724e9cc67d635b8a03c5f41978f7 +ab07c30b95477c65f35dc4c56d164e9346d393ad1c2f989326763a4cc04b2cb0386e263007cc5d0125631a09ad3b874c +9398d8e243147de3f70ce60f162c56c6c75f29feb7bc913512420ee3f992e3c3fb964d84ef8de70ef2c118db7d6d7fd5 +b161b15b38cbd581f51ca991d1d897e0710cd6fdf672b9467af612cd26ec30e770c2553469de587af44b17e3d7fea9f7 +8c5d0260b6eb71375c7ad2e243257065e4ea15501190371e9c33721a121c8111e68387db278e8f1a206c0cce478aaa2b +b54ac06a0fb7711d701c0cd25c01ef640e60e3cb669f76e530a97615680905b5c5eac3c653ce6f97ceca2b04f6248e46 +b5c7f76e3ed6dc6c5d45494f851fa1b5eaf3b89adac7c34ad66c730e10488928f6ef0c399c4c26cbeb231e6e0d3d5022 +b6cd90bdd011ac1370a7bbc9c111489da2968d7b50bf1c40330375d1a405c62a31e338e89842fe67982f8165b03480c7 +b0afcaf8d01f5b57cdeb54393f27b27dc81922aa9eaccc411de3b03d920ae7b45295b090ef65685457b1f8045c435587 +b2786c0460e5057f94d346c8ebe194f994f6556ab2904a1d1afd66c0ff36391b56f72ed769dcc58558ee5efaa2ed6785 +965dbb0cb671be339afcb2d6f56e3c386fb5d28536d61d6073b420ee15dee79c205af2f089fbb07514a03c71bf54b4e2 +90f2003e2286bba9cebff3a6791637ca83b6509201c6aed1d47f27097d383d5c2d8532bff9e3541d2c34259841cf26ab +902142d1224e1888ebbfef66aaf8d5b98c27927a00b950753a41d1d28a687a8286b51655da9a60db285b20dc81d5ea89 +a5d364448bf0d0849e5104bdaef9cb2cc8c555f5d6d34239c68671fbe1252f7c8c75b83cea10159dee4da73298f39a12 +b013a54c5b99e296d9419ad5c2aaf4545acd34405e57d13cb764e92132cc20d1a14b33e10caf22d898b608670c04f273 +b92976dceda373331804d48a7847f508cafde8d15949df53dbda09d03908678db1e61ee637baad5f05b2b03ea6f5a870 +968bcb308c7ad0813dc9b3170f23f419aecd7b42176f27fac698811795bf42659fea6b04dab4ef43595dcc990622041b +a9d0a20e9367ea831dccd37f4d97ea75e9aeec952947a7946d95e0d249c94024183ef79a624bdea782469824df0ee4e4 +8521b9667453c3658703e5db365b13f0e0d2331ce611ff1e708f8124d8a81bb5e82871de4a66d45c1a6b0a3901bd901e +b9c88e76e69b0722c0a2f97e57dbc4a6f7456434cd694e2ff67f4e24740cffa4db03e2b18f07f22954ae7db2286e1fa2 +8400e55aa9ab01d4cc0affd611127b5d8d9a9dbd897f3cb8e2050379983aa54249be17d7b7891977b2515bb44a483f65 +8cbb967b4ed31dc40ea06822a94d54cbfc8845c66fbafa3474c8f5fe1ada97299ed4ca955d9d7a39af8821eabf711854 +b4d266ee3fea264a6c563fd6bed46f958c2d7bd328225f6e47faf41a0916aef3b697574322f8b814dfb2f5c242022bf6 +8f7c72d69a919450215ead660ffa9637642c5306354888d549fd4a42e11c649b389f67cc802a0184d10fdb261351140c +a5f9e494ea9b2393ec32c48aac76c04158ccef436d4e70ad930cba20c55fbf61e8f239f70b9d75462405c4b6317c71a1 +b3befb259b52a44a6f44345859e315c20efa48c0c992b0b1621d903164a77667a93f13859790a5e4acb9f3ec6c5a3c6e +b9e4ca259b4ee490d0824207d4d05baf0910d3fe5561ff8b514d8aa5c646417ca76f36ab7c6a9d0fb04c279742f6167a +98fa8c32a39092edb3c2c65c811d2a553931010ccb18d2124d5b96debd8b637d42b8a80111289f2079d9ebca2131a6dc +a65e5aa4631ab168b0954e404006ce05ac088fd3d8692d48af2de5fd47edbf306c80e1c7529697754dbbba1b54164ba0 +b94b7d37e4d970b4bb67bf324ebf80961a1b5a1fa7d9531286ab81a71d6c5f79886f8ef59d38ae35b518a10ed8176dcc +b5ed2f4b0a9ae9ace2e8f6a7fd6560d17c90ae11a74fa8bef2c6c0e38bfd2b9dd2984480633bca276cb73137467e2ce3 +a18556fe291d87a2358e804ee62ddff2c1d53569858b8ae9b4949d117e3bfb4aefce1950be8b6545277f112bebeeb93d +a0d60b9def5d3c05856dff874b4b66ec6e6f0a55c7b33060cc26206c266017cdcf79b1d6f6be93ed7005a932f9c6a0b9 +801fced58a3537c69c232ce846b7517efd958e57c4d7cd262dbec9038d71246dafad124aa48e47fe84ecc786433747c7 +a5e9a8ea302524323aa64a7c26274f08d497df3d570676ecc86bd753c96a487a650389a85f0bc8f5ea94fe6819dc14e5 +a8a2963dc9238a268045d103db101adc3b2f3ab4651b7703b2fe40ece06f66bf60af91369c712aa176df6ed3d64a82fa +a4a8ff0a9a98442357bcdd9a44665919c5d9da6a7d7d21ccdbbd8f3079b1e01125af054b43b37fc303941d0a2e7baee0 +90ef893350f50d6f61ee13dfab6e3121f4a06a1908a707b5f0036cdc2fe483614de3b1445df663934036784342b0106f +84e74d5bc40aaab2cc1d52946b7e06781fbef9d8de6f8b50cd74955d6bdb724864c0e31d5ac57bf271a521db6a352bd6 +832cdf653bbbd128e2e36e7360354a9e82813737c8ab194303d76667a27aa95252756c1514b9e4257db1875f70f73eb4 +a0af8660ed32e6dbcc4d5d21b0a79a25ff49394224f14e6e47604cf3b00136de8f9ab92e82814a595bf65340271c16c3 +9040b5caf5e4dc4118572a2df6176716b5b79d510877bbb4a1211b046596899ea193be4d889e11e464ffb445ab71907b +b9bf8354c70238ab084b028f59e379b8a65c21604034d1b8c9b975f35a476e3c0ba09dd25bf95c5d8ffb25832537319b +a7b492cc1df2a8f62c935d49770d5078586bd0fefda262eb5622033e867e0b9dc0ffc2ce61cd678136a3878d4cbb2b56 +95a5ef06f38743bba187a7a977023b1d9d5ec9ef95ba4343ad149a7b8b0db0e8e528bfb268dc7e5c708bc614dc3d02c8 +99dcf7f123df6c55aeff0a20885a73e84d861ec95cf9208ba90494f37a2dcaacebc8344f392547d3046616d9753c7217 +b3e14f309281a3685ceb14f8921c1e021b7e93c9e9595596b9fb627e60d09ed9e5534733fcbdf2fbc8c981698f5e62ac +816a5e0463074f8c7fb2998e0f0cf89b55790bdbbb573715f6268afb0492453bd640dd07a9953d0400169d555fdf4ac8 +8356d68f3fe7e02a751f579813bd888c9f4edcc568142307d1c9259caef692800e1581d14225e3a3585dac667928fa94 +8d70ea3314c91bfc3f7c1dcf08328ae96f857d98c6aac12ad9eebc2f77e514afdbaf728dfcb192ed29e7ce9a0623ecbb +b68280e7f62ced834b55bc2fcc38d9ea0b1fbcd67cc1682622231894d707c51478ed5edf657d68e0b1b734d9f814b731 +b712dd539e1d79a6222328615d548612eab564ace9737d0249aa2eefed556bbcf3101eba35a8d429d4a5f9828c2ac1fe +8da42ca096419f267f0680fd3067a5dbb790bc815606800ae87fe0263cae47c29a9a1d8233b19fe89f8cc8df6f64697e +8cb2ffd647e07a6754b606bde29582c0665ac4dde30ebdda0144d3479998948dae9eb0f65f82a6c5630210449fbd59f7 +8064c3ef96c8e04398d49e665d6de714de6ee0fced836695baa2aa31139373fad63a7fc3d40600d69799c9df1374a791 +aec99bea8ab4e6d4b246c364b5edc27631c0acc619687941d83fa5ba087dd41f8eaec024c7e5c97cf83b141b6fb135da +8db6051f48901308b08bb1feb8fd2bceaedde560548e79223bd87e485ea45d28c6dcec58030537406ed2b7a9e94e60cc +a5b812c92d0081833dcf9e54f2e1979a919b01302535d10b03b779330c6d25d2de1f374b77fe357db65d24f9cbcd5572 +967d442485c44cf94971d035040e090c98264e3348f55deabd9b48366ec8fe0d5a52e4b2c9a96780a94fc1340338484e +a4b4110bef27f55d70f2765fc3f83c5ddcdfe7f8c341ea9d7c5bcee2f6341bcfbf7b170b52e51480e9b5509f3b52048f +a0d39e4eb013da967a6ac808625122a1c69bf589e3855482dedb6847bb78adc0c8366612c1886d485b31cda7304ec987 +a92f756b44d44b4e22ad265b688b13c9358114557489b8fb0d9720a35e1773b3f0fa7805ac59b35d119a57fe0f596692 +aa27e4b979af6742b49db8bf73c064afd83a9cfe9016131a10381f35a46169e8cfd1a466f295fcc432c217c7c9fa44a5 +845961319cc10bcfbb1f3cb414a5c6a6d008fb3aac42c7d5d74e892cc998af97bc9a9120c3f794e4078135e16a416e38 +a18dbe3015c26ae3e95034c01d7898e3c884d49cc82e71ddb2cf89d11cec34cc2a3dff0fafb464e8e59b82ce1a0a7a11 +a954aed6d7124fa5bd5074bd65be4d28547a665fb4fe5a31c75a5313b77d1c6fc3c978e24c9591a2774f97f76632bdde +8f983b2da584bdff598fcb83c4caa367b4542f4417cc9fa05265ff11d6e12143c384b4398d3745a2d826235c72186a79 +b2caa17d434982d8dd59a9427307dfe4416b0efc8df627dd5fc20d2c11046c93461d669cab2862c094eec6a9845990c6 +8c2baa5a97ee3154cce9fa24f6b54b23e9d073e222220fdd0e83e210c0058fb45ce844382828b0cb21438cf4cad76ee6 +b93437406e4755ccf1de89f5cbe89e939490a2a5cf1585d4363c21ae35b986cb0b981dec02be2940b4ec429cc7a64d4c +a90ac36c97b7ea2eddb65e98e0d08a61e5253019eeb138b9f68f82bb61cdbadf06245b9dfffe851dfa3aa0667c6ac4b8 +8bcdd7b92f43b721ddbfd7596e104bc30b8b43bdaee098aac11222903c37f860df29d888a44aa19f6041da8400ddd062 +98f62d96bdf4e93ed25b2184598081f77732795b06b3041515aa95ffda18eb2af5da1db0e7cfed3899143e4a5d5e7d6c +ad541e3d7f24e4546b4ae1160c1c359f531099dab4be3c077e446c82cb41b9e20b35fa7569798a9f72c1fae312b140b4 +8844a1471ff3f868c6465459a5e0f2fb4d93c65021641760f1bb84f792b151bc04b5a0421bbc72cf978e038edc046b8f +af895aebe27f8357ae6d991c2841572c2063b8d0b05a2a35e51d9b58944c425c764f45a3f3b13f50b1b1f3d9025e52ad +adf85265bb8ee7fead68d676a8301129a6b4984149f0eb4701eae82ec50120ddad657d8798af533e2295877309366e9c +962e157fe343d7296b45f88d9495d2e5481e05ea44ca7661c1fdf8cc0ac87c403753ca81101c1294f248e09089c090eb +a7c8959548c7ae2338b083172fee07543dc14b25860538b48c76ef98ab8f2f126ecb53f8576b8a2b5813ecb152867f18 +ae71680366e11471e1c9a0bc7ea3095bc4d6ceb6cf15b51f1b6061b043f6d5941c9f869be7cb5513e8450dca16df2547 +831290201f42ebf21f611ca769477b767cf0ee58d549fcd9e993fae39d07745813c5ce66afa61b55bb5b4664f400ece7 +af5879e992f86de4787f1bc6decbc4de7d340367b420a99a6c34ac4650d2a40cbe1cef5c6470fc6c72de8ee1fe6bcce4 +8d3c27e1b2ef88d76ac0b1441d327567c761962779c8b1f746e3c976acb63b21d03e5e76589ce9bb0d9ba6e849ed3d53 +ab23b09c9f4151e22654d43c1523f009623b01fe1953d343107cef38b95bd10afd898964946d3cb8521bcbe893e1c84d +8a6acade9520e7a8c07f33d60a87fd53faa6fbf7f018735bffcbbb757c3bafb26f547ceb68e7b8b6bca74819bfcd521a +94db50080d557440a46b6b45ee8083bc90e9267d40489040cbed6234bebf350c788ec51557b969f95194102fde8e9713 +8be8031f32504e0c44958d893649f76cec17af79efcd22bbedb78378f0a150845467e59f79a3f2a3b6a66bdf0d71d13c +a69a4ac47fd92e1926b5e14adcbebbef049848e8a00d4bb387340892e5a9333cae512f447201728d3b53c6cf980a5fdc +8fc713825277c5a8d9ef0a1f6219d141def6d8b30aff0d901026280a17d1265d563ff5192a0817e0e1a04ff447fb6643 +8bf0a85569c4f0770ff09db30b8b2ea6c687630c7801302c17986c69a57c30f0781d14b3f98a10b50c4ecebc16a5b5ec +896baa4135d5621fd6b6a19c6d20b47415923c6e10f76c03a8879fd8354e853b0b98993aa44e334623d60166ba3e3ca9 +b82cde1c2e75a519ef727b17f1e76f4a858857261be9d866a4429d9facf9ea71d16b8af53c26bde34739fe6ea99edc73 +b1a9e1f2e34895a7c5711b983220580589713306837c14073d952fe2aef0297135de0be4b25cbfaed5e2566727fb32ef +b42ed0e9eaf02312d1dba19a044702038cf72d02944d3018960077effc6da86c5753036a85d93cd7233671f03d78d49a +a402e34849e911dbf0981328b9fe6fff834c1b8683591efd3b85aa7d249811d6b460a534d95e7a96fdd7f821a201c2c4 +a774417470c1532f39923d499566af762fa176c9d533767efd457cc5e4a27f60e9217f4b84a9343ecb133d9a9aab96b7 +83dc340541b9ef2eb8394d957cd07b996d2b52ac6eb5562cbba8f1a3312f941c424c12d1341a6dc19d18d289c681ef40 +b2906c32d5756b5712e45dec53782494a81e80f887c6e1ef76e79c737625eccecb8fd17b20e6f84890d322b6ffde6eab +b89705c30cec4d50691bc9f4d461c902d6a4d147cf75ee2f1c542ad73e5f0dabe3d04cd41c6c04ab1422be4134cf1ad7 +8c3293651f4c4fac688bf5837c208b15e5a19ce51b20dd80ffc7fca12d3e615b2773cfc3ed62a1b39c66808a116bde06 +8fceb8ef481163527d1fc3abc7e1a5b3b6de2f654c3fe116d1367b177dcba2e0d2124a7216803513a3d53fc1e30435b9 +b2a42c827da630aaa3eb20ed07d136aa11ba01b4c8efc0a57ebab7d5b851a15daa6ba118bcffbc20703916e430e30a87 +a86340153abb3fe97414e2fde857e15aac27c9bb9b61258eea6766024f426ed0753f08f07f6b02b5375e1587ea3afcab +b006465e258e646f91ba889765113d3dc9bd657246c533cab6516d55ba054baa9d7276a3b0fa31730c3bd824845bf107 +a08aadc09428719cde0050d064c0f42c5b7c4f6c158227d7636f870957d6cfe821b4c62d39279a7c98f5a75fcb7bbfba +885e7d47ce9b50d21b95116be195be25f15223a6a189387575cc76740174c3e9044f1196986d82856b3fb25cdd562049 +b18c3780362d822cc06910743c4cbcef044823a22d12987fe2e56f3801e417f2e9cd31574ea1c5c6ee7673a14aa56e3e +a625570ef7d31c042d968018865aeeba34ee65a059ab1ec079c7a8ba1be9e24bce6afb7036c07d9d6c96ab014f95d661 +8fc9bd4764adc4c300b5bd49a06dce885d1d8aff9bae68a47976d0cd42110aa6afa2d7b90b64e81c0f14de729f2fb851 +91d88714cb669f5f00241aa5ab80dffb04109492ea9c72b59645eb1f85f3539c61db2ab418af986f42241df8b35445e9 +b98f14e664df2590dd2d00b5b5c817e388e5d9fb074f718637c33b3d4969c89e82fdd12db8997f5ff3bf5bb5ca5dd839 +86cb3d9f148cb2170317a4c22af7092155aa66ecff7ab1299b102fbbaa33ed2a284b97b08f529d2da9faea63fb98972c +92449f6b8a7c737ecef291c947cbd602c47d7fe47dc3426c2b413f3019169aa56e14c2a7216adce713e1c7bd5c08a83f +b08c1b9080bba88b44a65070948142d73c00730715fbdd01e13fc3415c5b4f3248ef514fa3ade4a918c9a820cccae97c +b0a05297da76e37c22be7383e60bba1cbc4f98ba650e12d4afcfcea569842003644a10ad73c9148958f7bf1ffa0a27d0 +839092c1f4e9fb1ec0dde8176f013b0d706ab275079f00f8e774287dd658d1b5638d5fe206f5f2a141911a74bb120f75 +a36bd669bdc055ece4b17ff6eac4c60a2f23324a5eb6d0d6c16a2fce44c39cfd52d1fa2b67f3f5e83504e36426fbfc40 +8aa428323512cf769645e2913a72976d32da4c0062ffe468a6062fd009340f0f23c6b63285848a0e7631a907adb032a0 +944800f7d43f41283eb56115ac39ccc5bf107ae5db6abcaba6936b896260cd09428a6b828c0bccebeb00541073dbf38e +8e700ca7c9e1538cf64e161dd8d16af56fc29d53c79648150d6d8c268b0c95c76acded723e29918690d66252bd75f5b3 +b9c4ce35b5b16b4c39b6e85800c76b26e8d0999500fabc1e5b6234a7f8da18c621266ac0d5ebc085354297ff21ac89a5 +a0c706d32063f1877f7e903048ce885f5d012008d4a8019dd00261a8bbc30834bffeba56cdeddc59167d54cc9e65f8fa +839813b736225087cbbcf24506ea7bf69138605036b764ec0514055ac174bbc67c786a405708eb39a6c14c8d7e0ec6ee +b1a5fef055a7e921c664f1a6d3cb8b21943c89b7e61524a307d8e45aa432e5765a27c32efdb32d88062cd80800a260de +b17f8202d9ed42f0f5cb1b1dbda60711de3b917a77f6069546fa3f86d21f372b8dd5cb86f1994b873ba9982404e08daf +b5211d54bd02d44d4d808ad57067606f3e9fa2cad244a5f2acef0edf82de3c496d2b800f7c05f175d01fa6ace28b44d1 +aa9c6f8f489b35fdb7544116fe5102a34ff542de29262f156df4db4ea6e064f5ea20c4bd877d40377ed5d58114b68f19 +826668b1f32e85844ff85dd7e2a8e7f4e0fd349162428bc9d91626b5ab21bdbacd1c9e30cf16f5809b8bf5da4f4fe364 +b30d14917b49437f9fdbae13d50aee3d8a18da3a7f247b39e5d3e975c60bd269da32da4e4cc8844666fca0d65f4e3640 +8c6918d8d94b36c6b9e772e9a432e66df16724e3b0660bde5ea397e6ef88028bb7d26184fbe266a1e86aef4a0dfe5faa +906d80ffd692c1dd03ab89be52e0a5a9e90a9cdbfc523d2b99c138ae81f45d24c34703f9cb5a666b67416e3bb6272bc4 +8b07e8ba22b436e64f011cacf5e89c55cd3bfb72ae8b32a3a8922c4fccb29de6f73662d6e330da6aa6e732a2187ef3c9 +9547466b4553a49adf59cc65d4c3c9401b2178947ebe3bd33c6e63cfb67d6be8729033158594f6f244b272c4487d6958 +aafcccea41e05cb47223fa8dfec0dd55964268bd4d05e24469614077668655ac8a51d2ac2bfb22862f8f4fa817048c2f +870f8c1173e8fd365b0a2e55c66eea3ab55355990c311f3042377803d37e68d712edcc5a0a2e2f5a46df0c1c8e6310c2 +b4288f792008f342935f18d8d9447fe4ddcfea350566e13dba451f58c68e27241af1367f2603a9dff6748e7fe0c53de4 +91c58c0e537d3afdcf7783601dd9cda2aa9956e11f711b15403760cf15fc6dffb40ed643886854571da8c0f84e17adfe +a43fec8ee92febed32e7cdd4e6314a62d9d3052c7a9504057dfba6c71fdfbeff1cef945d8f087bd106b5bec7478ad51f +99cf5e0e3593a92f2ec12eb71d00eccec3eec8662333471b2cb3a7826b7daca2c4d57ffba18299189cf7364e2af5df6d +af50f9ab890b7517ff1f1194c5b3b6f7f82eabc607687a8380be371a6a67b117aeb9b6f725556551b81f8117971706a2 +aa352430887053602a54403bd0d24d6b5181b44aa976dfa190e21851699a88127dcc904c90a48ec44610056b5dcd36c4 +964c821ea1902354736fa382a929c156bd67b9468d6920d47c27b9d0d304b6144118888d124c1f6785da596435ed2410 +b2284a67af26b5f5aff87b4d8e12c78ab37c5eb6e92718fca8549f86f4f001b660fc4520456aff72c9bcddd686603942 +83c54cbb997ea493dc75df4023071dce6da94268feaa2352373789616f012098270ba4fd60c791796a6f5062fb2cd35e +9143e8fee0b8f0f34c65c7750858093dcf165c6a83c026bfac2d5ffa746361eb4b6a14fdb43e403add901ac3735735a3 +97d7748a5b278ee47b18c9e60689b12a0a05be47e58e78bf8c04b9e8b34e2e2f2d3ac3c25c76ab2e0a75e8a54777b7c8 +b4e68f6f2d978a5411414c164c81ddb2a141b01ebe18c65a8626ca75d6432e5988310b50a888a78c3a0a242353525af5 +8976f4cc3eaf2684718cf584712c4adaf00a4d9c521f395f937e13233b30329658b3deacfe7e29fac84c496047f2d36b +a40bcdf4b6e95f1535c88dddcbf2074ef2e746b7fd232bdfd2b88f2f6d4bbf21c6b263cf5fd3e12a03476f2f5ffe00d2 +88c7b6337ee705acd8358ef6d2242d36b140afff0579a7784b3928a0c49698bd39c1f400e8a2e3eda5fbfb2e8f28fe51 +a98612ba8b450a71d2075d51617ebeb7ca401ad3cbd9b8554850c65ef4f093ba78defb00638428c9f1f6f850d619287f +b7e71d3ffa18b185c1a6bd75668ff65d985efc0a0c19f3812cafde9adbfb59ffd108abeb376e6a8877fdf5061562f82b +8a3e5fd776cc26908a108a22b1b122d60cb8c4f483cbedcd8af78a85217bb5a887df3efed2b8b4ec66e68eb02a56ca93 +b0d92b28b169d9422c75f9d5cb0a701e2e47b051e4eacd2fd1aa46e25581a711c16caf32f40de7c7721f5bf19f48b3f5 +88895739d5152282f23e5909cf4beebda0425116eb45fc5a6a162e19207686d164506c53b745fb2e051bb493f6dbad74 +adbccfed12085cd3930bd97534980888ee564dda49e510c4e3ca0c088894855ef6178d5b060bca8a8a1a427afdbec8a8 +87d00674abd3d2e7047a07ed82d887e1d8b8155635887f232dd50d6a0de3fb8e45b80b5a05bc2ec0dea9497b4aa783ac +806e1d3dfadd91cbf10e0d6a5e61738d0dbff83407b523720dce8f21f8468b8a3fc8102acf6ba3cf632ca1cb2af54675 +95a9dff67cf30e993071edede12623d60031fa684dfbe1654f278a1eb1eb7e1be47886d3f8a46c29b032da3176c0d857 +9721973288384c70a9b191436029e85be57970ad001717edc76d44cbfa0dff74f8af61d5279c5cd5c92c9d0f6c793f63 +95c22d1d9b51ef36ba30ee059dcd61d22be3c65f245d0a5179186874219c08e1a4266f687fc973e71f3e33df2b0f7fd3 +b53ec083dd12cc42ae2bae46883a71f2a35443c9ce4ed43aa341eb5f616a53b64211ed5aac717fe09ef1d50f551ed9f0 +a103dab6695c682400f60be8d5851ce07f12e4bd9f454d83b39c41ddcf1443bb14c719b00b4da477a03f341aa1e920cb +b522236988518e5363b1c4bb3f641ff91d3d4c4d64c5f065415b738160b4ce4b0c22e1e054a876aa6c6a52fa4a21dfa2 +a6a00562f0879702cdba5befd256a09f44bf48e61780e0677ff8c3fda81d8e6dc76ba1b05e3494ca9a4cef057eba6610 +b974a2ae631e0b348421f0cda5bd4ce7d73c22dd0fc30404c28852c33499818cab89fbf5c95436d56a0aab3bf2bbab51 +9148cf2a7b7e773245d4df5a9d34cf6d9d42b1a26a4ca6bc3013feca6f3941d6c44f29ba9328b7fe6ce6d7f6565f8e4a +a34035c4a63e98528a135cc53bbbcfcda75572bc4c765f212507f33ac1a4f55563c1a2991624f7133c77b748bbe1a6da +a0c45923cfb7bd272ee113aecb21ae8c94dda7ad1fe051ddb37ab13d3bb7da5d52d86fff9f807273476c24f606a21521 +81ec2ca57f4e7d47897d0c5b232c59d7b56fe9ce0a204be28256a7472808de93d99b43c824a0cd26391e6cac59171daa +8373852f14a3366d46c7a4fc470199f4eebe8ee40379bd5aae36e9dd3336decaead2a284975ba8c84d08236e6b87c369 +b47e878a93779f71773af471ba372cb998f43baca1ae85ea7ff1b93a4dee9327e2fb79691c468ec6e61ab0eae7ceb9f1 +8fc8f260f74303f26360464cfef5ee7eebcbb06073cef3b1b71dab806d7c22f6b3244ce21d0945b35c41f032f7929683 +87e3c4e1dab00596e051ce780b9a8dba02ecdc358f6ddaeb4ec03c326e4b7da248404745392658eb1defff75b1ba25c8 +aac95d8e3b7fe236a7ca347d12a13ec33073f2b2b5a220ecfd1986ca5c3889f0e6a9d9c377a721949aa8991c1821953a +91a483679437ae126a16f5dc3bba6e9bb199dfbba417f0dc479f22819b018c420edc79b602db6183c6591b1909df4488 +94a4b2c663aa87a2417cad4daf21a88b84983a7b212ffcd18048a297b98e07dd4c059617136976fac1d9e94c8c25b8d2 +83e2a690bfa93c79f878a63c0f69f57aabdd8bede16b5966ffba7903dc6ad76775df1fd5347e6f2825f6cd7640f45a45 +a316af7ac11b7780d15312dc729499a1a63b61c4283e103ecce43c3b0cbb0f4bce6ff04e403f5c7cb670dee80c75ab99 +8d0a911c54ee1f9f7e7794732ad87b434c3f356294d196a5e35eac871727fd32a49c27c2dfa10833f9e6f9c7ccbe0064 +8b8db09028298a1f6362b346c8bfeced7cb5d13165a67c0559a9798a95b7a4a9810c02bb852289d47c59f507bd24ce77 +962d57305c518f175ed5d0847fb52ddc4258ca0e4c9ddfc8c333a2ee9f8b4e48d25a3d7e644b785a5953e2e4063da224 +92e0799491898271769250fe88b0cb9dadec98ac92f79de58c418d23ef8c47fcf21ddc90e0cd68bb8f1deb5da82da183 +99855067125f6a6c3a3e58d3bd2700a73ef558926bd8320d2c805a68e94207b63eda6bdc5a925ec36556045900802d51 +a724ae105ab4364a17ddb43d93da1e3fc6b50213f99b7be60954b24dc375c4f93a0737f4a10b4499b6f52667d5f3a64e +82070fb43a63fb50869b118f8940108f0a3e4cc5e4618948417e5cc3801996f2c869d22f90ca4ca1fdbef83c4778421a +b25c04365d6f24d5d3296c10d85a5de87d52a139ddbcbf9e0142074bc18b63a8bc5f5d135bd1e06c111702a4db4cee28 +851093282dcda93e5c98d687a17a7ee828cf868f6c85d372d9ae87f55d0593d8f9f0c273d31f7afa031cf6aea6a7ef93 +93f04f086fa48578210ed207065d80a40abcc82d8bfc99386a4044561d35748ff6c3da6489933c23644ad4b60726da8a +84b1b50d1e876ca5fc341bbedab5b3cc0f6a3f43ea7dd72605f74d0d9c781297b2f12b7872dd600924f1659a4cdf8089 +81b0ba88c582d3956f6b49ca3e031c6400f2ec7e1cd73684f380f608101e9807f54866be0bb9a09c03953c4c74fbb3c8 +a641af6ac644c41a55dee2ef55d3c37abdb19d52bc1835d88e7adda6b6ccd13987c5fd9cba9d318cabb541aa6a0c652e +a7b75b0624d04ad0901070e691eb2d2645b60f87e9d6b26e77a5fb843f846c32fc26e76ae93fd33fe3b857f87bc25162 +a81ba3e2ed0f94c67cd02ba7360e134f8becf7ed2ed2db09b9f5ef0942f7073bfee74ca446067db6092f7b38f74ccc11 +ab80edcabab5830a24210420f880ebac4e41bf7650c11ba230f4889634dbf8e8e2309f36be892b071c67a3bab8fc7ed6 +94d69b64675076fecad40fae4887fb13a8b991b325fa84e9d2d66e3b57646de71a58ad8fd8700fefb46975b18289250b +b44fc0df480cd753a041620fa655be9df74963ae03d4625847d5bb025ceb37f48d19c8c9c444546fba5fe5abb2868506 +b56e2c51324d6200b3d9781b68b5b5e1617a68afccd28b3a12a4be498d2e3aafcd86514c373a9f3a001db733010c29cf +a359a0c172e5cd7ce25080dd2652d863d7c95a4a502ae277ac47f613be5991300f05978404a0acb3bcda93524dcf36e4 +b01427a3dfdf8888727c0c9b01590b8ae372b7b4080d61e17ccb581bac21e61c4a58c75db7a410d1b2a367304e1e4943 +95cb08be4a96c18fbf9d32a4bbf632242029d039a5fdea811488d3634cd86520d4f9806250a8c01855ee2481210f542a +b8594fe6c0717164058f08aedeed1853523f56cec5edbf0d2be271fa5e8bfd61f2974b0f3988d70f5baa2e7888c7ec1f +8f64ee89f59daf74fa1056803247c9d678783ee3917b12a201f30f7523957763e979ceaddb38bae20de40b9885728049 +b6093ee4bdb837bcc59172e236f4bdbd439c0a5a50e2aa16636cbff81b51e92989eb5f80a3f75c37ae7b5b942e55b3d2 +913b6fbb7b43e3e5c49e96cd8e82ed25c655e51c7b8ca82e8fbf92b01ac83c39d52f6f4efab5d39b0591a0538601a86f +81f42668479ca0bec589678dc0973bf716b632578690efe1a0f13de630f306fb4a189a98c2302572fd85d3877ee030b5 +90ff89c38a9a7189f28d35a088657f52283670e7fec842fa91c265660ea2e73b0ad6c46703d649f406f787490b7a7e4b +9077b8b5f1e083183f3152ceb9c5491b5d4b86525a08879f7fb6d5e27f9f1a6867cf0d81b669a4a2d1f1654b67fa8d9c +a7a0275cf5b894adbf2e54a972310cfe113e811872111d6ee497d03750d9f6ffa5517b6c13a99b111a4a91e8e4dfeeee +a08976bf8125b7538313a584bbe710741d630cab067a204ad4501cc4938874ce7aa6a1a826259c2e82ef10a66f1f36fa +8aa45385b5b97f1f3e45f2bbf7a4f3e8ef068e628608484971c97adeb610ebd5deec31317e03eb6536808921062c04db +945b106b8f3ae85e60dfd34ef3dcc079bc6f0aab6df279ed000856efd51321462038ac0a1ca5db3ebf6379bc341e7c55 +a4199c87a96f98cc9d8776fe6de131d2c706b481eb9e9a3bbc50a93d492d7fd724ea469f723fbcfb94920cb5b32c1d76 +a5347b1b2f6149805de67546c5ed72253311099bf1473dbc63edcf14a0a5e68d401f5341338623fbe2e2715b8257e386 +af5dcd03ddc3769e83351d6b958d47a06d4e5224bd5b0ec40ffe6b319763fab8572002f4da294a9673d47762fd0e6e1d +82ec1031b7430419d83b3eea10a4af4c7027f32b91c3ae723de043233b4a2e0c022c9e0f5a1ac49753800f119159112d +8a744d911b67d03b69811f72e9b40d77084547e4da5c05ff33893468b029a08266fc07303f7005fd6099683ca42b3db4 +93ab566bd62d3439b8fc620f3313ef0d4cb369f0f0c352cdaf8e5c9e50b9950ac3540b72f4bf5adcb9635f9f7ce74219 +b2a211d72e314799bc2ac7030b8bbb8ef4c38ebd0ebb09d6cbd43bd40c6c61d80a3aad02cc73f5775a08b9657da20a48 +98d60f0a98d28718e0c6dcccc35a53521ea7f2d8fe08ea474374a336b44cea4cd1c63b31f2ad10186822bfb54aca53e6 +831f89cb94627cfe554d46ae1aad8c1cde7ebe86c4bd8fac4ef73ac2d5b491f5efa5dc4198cb8ffbec563e0606b91d89 +8f8552583bc6cb3fb176b7202236ee4128faf0c8ec608f9150f8e011d8c80b42aab5242c434d622b6d43510eaef752c0 +897bf27baaee0f9a8445200c3d688ae04789c380d1b795557841606a2031092328eb4c47fef31c27fdd64ba841d9d691 +b57589a4af8184b4a8ceb6d8657a35522672229b91692c1cec3ac632951e707922a00086d55d7550d699c4828bcfaab1 +98c2fe98095e026aa34074bcff1215e5a8595076167b6023311176e1c314b92b5a6d5faa9599d28fca286fadd4e3b26c +a034992e563bd31ede3360efd9987ecddc289bc31046aa8680903bb82345724805e6f6cf30f7889b6b95cf7319c3aea1 +85c33d9f10cc7185f54d53c24095e621966065e0ff2689a9aa6bb3d63706796c37a95021738df990c2c19493c0d44b64 +a8c1247d6de2215f45b50dd2dc24945ff9b93184bcc2159b69703b0bba246adcd1a70a12659f34c4ca4ba27dea6e3df5 +83ebdad2834c97bf92aac8717bab2f5cb1f01026b964d78e2f3b44e99d7908e419165b345d2b2f125b903096584e6683 +b0af6f7f81780ceb6e70adfd98e7702ec930c8ca854b50704c4a0fc8b887b9df60a6fe9038b487f3ed0eb8eb457307ea +933ec7e53882453898617f842ab2efae4756eb6f6ea0161cced5b62a0cdde4c08c7700d52f7546d4dd11a4c9e25d624e +adf6e6d4706025f85eb734f506dde66459c9537a1abf6189199cf219ae583b461e11c6242fce5f0795e4d9025270fabf +89e4316319483098761b0b065df4cfb542963b7a2556ba5425b6442fb0e596eb2a4f03e2dc8c617eebe8f243a12e7d10 +90c5a147555759ebc4d0e15e957a548315f9994ef0c7a3f53f2d18da44fb93bf051d96ba8551597a6f3e701b926fd791 +a151a9a5199c72c697b771cd81e550fc6f9596c752ae686ad988b316a7548360cf9785ab4645164d96cfdf9069a94020 +80cba11a3977729d7948db5bcc186159f4cae7c0a835bb38bb781e287dd6c238508e748f23454405c9d5eed28e77df02 +ae4b92ea03cb8ad12ad3ec76869ad05acb09f9d07a3c9a87dec0e50d9a276fe5d3d515a8c446f3aa35cd7d340a22c369 +8630062709a1f180f952de9f1ca3f41acce5420677f43d9619097e905a6237f1908d66db7a4dfdf1b2b92fb087e9944f +81defc33dd383d984c902c014424bddd5e53b013f67f791a919446daa103b09b972fa5242aba1b1dbe4a93149373f6c3 +963891ecaea97e661bac2594642327a54f5a0beb38fcb1c642c44b0b61faab9c87b0c9f544a3369171b533d3ab22f8f1 +932fadbff5f922ddcd4da942d57fe3e6da45c3d230808d800a3ca55f39b0b62f159be31a5924b395d577a259f48c6400 +992ce13bd037723447f88aeb6c7722fd9510c7474192b174ea914ed57c195c44c298aec9a8cabac103f0a5b50051c70b +b032157b3e4fe69db6ce6bb10bdf706a853fbd0bee08c2ab89da51ad827425df5df498b90e7a30247a7f9e954ca986e5 +b2478d4874578da3d5000893736bb65712e6aafe96e6fa5cf5878ae59ba0ce640dbe5d76ec2b5baca75af57def471719 +a387c17b14dd54910fecf472f760e67cf71a95e9e965cc09484e19581ada65e79938b86136a93e287e615fbd4908e080 +98f02be271d0f8841d8d561163f9e55e99b57aff121a93fba7a4654bcf15a0899811f00f5bcbfbebd98e365a0e332e97 +a3c34f01d54cab52a8890391b8cf152cc9cdc16e7e53794ed11aa7b1a21e9a84d39ddcfbcb36c5df6891c12307efc2e0 +a940331f491ec7ad4a9236ca581b280688d7015eb839ee6a64415827693d82d01710dc4bbd5352396be22781fea7a900 +b10874ed88423731535094031c40c4b82af407160dfade4229ac8f4ef09d57b3db95c4a9d73c1a35704f6bd0d5f6c561 +a9c5a4a7680261c1b0596f8ab631d73d4a7881b01e6559c628b5cdafa6dd2b6db2db64f3f2ab5841413a8a52b966a0da +8fc154564a61d5e799badc98b43a3587f804385a850adce9a115cbd2ad911f3fd4072b8e6b22fc6c025a6b7e7ea5a49f +b9caf7c6dcce3d378aa62c182b50bc9c6f651eb791d20fffa37ef4c9925962335fe0b3bc90190539312aa9ccf596b3b9 +90c5b7acf5cb37596d1f64fc91dee90f625f4219fa05e03e29aebea416c8e13384f2996f8d56791bcf44ae67dc808945 +ab8d311fc78f8a1b98830555a447c230c03981f59089e3d8a73069d402a3c7485abe3db82faf6304aaca488a12dbe921 +8a74fda6100c1f8810a8cacc41b62875dd46d5c4a869e3db46202d45a8d9c733b9299dda17ce2ad3e159122412a29372 +8769dcacba90e6fc8cab8592f996c95a9991a3efecfb8646555f93c8e208af9b57cf15569e1d6e603edac0148a94eb87 +854fd65eea71247df6963499bafc7d0e4e9649f970716d5c02fbd8708346dcde878253febb5797a0690bd45a2779fa04 +83e12dc75ef79fd4cc0c89c99d2dace612956723fb2e888432ec15b858545f94c16fae6230561458ceee658738db55ba +8416ef9ac4e93deff8a571f10ed05588bef96a379a4bdcc1d4b31891a922951fa9580e032610ac1bb694f01cb78e099b +93aea6e5561c9470b69d6a3a1801c7eef59d792d2795a428970185c0d59b883ab12e5e30612d5b6cde60323d8b6a4619 +91d383035aa4ec3d71e84675be54f763f03427d26c83afb229f9a59e748fb1919a81aca9c049f2f2b69c17207b0fb410 +b1c438956f015aef0d89304beb1477a82aed7b01703c89372b0e6f114c1d6e02a1b90d961b4acbb411cd730e8cacc022 +a1ee864a62ca6007681d1f859d868e0bcd9e0d27d1da220a983106dc695cb440980cfdb286e31768b0324b39ae797f18 +b57881eba0712599d588258ceada1f9e59c246cc38959747d86e5a286d5780d72d09e77fd1284614122e73da30d5cf5c +a48f9ae05ba0e3a506ba2e8bbce0d04e10c9238fa3dffa273ef3ffe9ec2ed929198a46507c0c9d9b54653427f12160f9 +8db18da7426c7779756790c62daf32ae40d4b797073cd07d74e5a7a3858c73850a3060f5a3506aae904c3219a149e35d +a2bf815f1a18d7be8ce0c452dfc421da00dcd17e794300cdd536e4c195b8c5b7ccc9729f78936940a527672ac538c470 +a34c6f1f2398c5712acc84e2314f16d656055adcafad765575ae909f80ab706cf526d59e5a43074d671c55b3a4c3c718 +b19357c82069a51a856f74cbb848d99166ce37bd9aca993467d5c480a1b54e6122ebddb6aa86d798188ea9f3087f7534 +b440eac6f24d12c293d21f88e7c57c17be2bdb2a0569a593766ae90d43eccf813a884f09d45a0fb044ee0b74ff54146a +b585d42ef5c7f8d5a1f47aa1329f3b1a566c38bf812af522aa26553010a02bfd6e9cc78fdb940ef413e163c836396a5f +aca213b27f3718348e5496342c89fffc7335f6792283084458c4a1aa5fe0a1e534fcec8e7c002f36141308faae73ef2a +b24c07359769f8ffc33bb60c1f463ea2baad440687ef83d8b7c77931592d534b2c44953c405914ace5b90b65646c1913 +b53dfaf381205a87ca4347328ff14a27541fa6436538f697824071d02d4a737ceb76a38dcc6e8dadef3b5bc6442f5109 +b55972d8ed5197215c0a9144fc76f2cd562ca5f4e28c33a4df913363fd1388978b224c44814adb4c065c588a4ac1fe10 +a3303bc650e120c2e9b8e964ad550eb6ac65ffe6b520768b3e8735565ae37eafdc00e3c15fae766d812f66956a460733 +b11e53912ea0e40c3636d81d7637e10c94cc7ed9330a7e78171a66d02b7603f4cb9b3f6968104b158de254e65b81640f +b076bb9f6d396aa09c2f4706ea553b426fdfd87d7d69e438285b74d334e82f73973cb4dbd6cb1647493433dad65dbc41 +9415828b1632175f0b733541e32c26a9c88fe12c721c23e595f2efceaa7f867f359e32564b7c032185686587ac935cf4 +89579a112c306181c79aabdbf683e7806357febcb73bf5e8883862ae29618ef89498b62634404bb612d618fcd16da415 +8761bcd55d04297c4f24899e8fb9f7c1fcd7449ae86371ee985b6a262e228f561c2584980694d9bf354bdf01543edb6a +9100c88bf5f6f00305de0c9cf73555f16a2016d71c50cb77438e8062bd549fa5407793a8a6a7e06398756777680a2069 +9235dfef45aeff9c174898b0755881b7171ed86362854f0eabc3bc9256176c05a5dc27ca527c91c3fa70c0ec5fd5e160 +ac53b1d677cebab6a99381dd9072b8ac1abae9870ec04a1f8d2a59b6f1de797c1492b59af6948f5cf2b20599170f5bba +946542936b0c59156e8fd5c1623b41369bc2cbcc46ece80360dcb5e7cce718a3dd8a021f0b9c223062a4e43d910b634f +b1e9939b34e1fcc026e820fcfa9ce748b79499f8e81d24a3ef0457b3f507fe5fa37b975a47c143e92eb695623b4e253b +9382d9b5766f6ae960d8a8435e8b5666e57ef8e5f56219e7bfd02857afe5cb16f44d70a9e444cfb1008649ae9b863857 +91770ed1215ed97dca1282b60b960be69c78e1473edb17cd833e712632f4338ff74bf435c3b257439497c72d535ae31f +8eb2cbe8681bb289781bf5250e8fa332141548234c5c428ff648700103a7cd31fdc2f17230992516c674aa0ab211af02 +a823b71c82481bc6ac4f157d5c7f84b893a326bbb498c74222427ded463d231bc6e0240d572ab96266e60eb7c8486aea +a13ce4f482089d867e5babcd11c39fa9a9facd41a2c34ee2577de9ce9c249187e16f2b3a984cc55f9e45b9343462d6d2 +8d80e7bc706059cf5151f9f90e761b033db35d16b80b34dc8b538adc8709d305a0c06933dcd391e96629cf3888c8bf87 +abcd36cdd86c0fb57fb7c0d7a3b9af5fd9aed14e9f4e7e84b0796c5c0ad18c41585e8c46e511cef73dc486fe43f6a014 +a947a5b6916f416fa5a69c31aba94add48584791148b27d0b3ed32c02a05dfc06f7fdc5006e3b2503bdf6e410e30f2fb +b158e621580659f1fa061d976b8591ac03b53ecd23d9eb2b08c1a20353d78438287749664d196020d469ef44b3b8752e +90a5a9540281e481ac4b8d29968f477cb006b56bd145529da855d65d7db0cf610062418c41a1d80c4a5a880c0abe62a0 +b2c91808b6289d08a395204a5c416d4e50a8bb1a8d04a4117c596c4ad8f4dd9e3fb9ce5336d745fc6566086ae2b8e94f +af6767c9b4a444b90aeb69dfddae5ee05d73b5d96e307ce0f3c12bccca7bc16475b237ba3bc401d8dafb413865edf71e +8dcecf624419f6517ef038748ac50797623b771d6111aa29194f7d44cfb30097ced26879e24f1b12a1f6b4591af4639b +954437559d082a718b0d6d7cec090532104ab4e85088e1fc8ee781d42e1a7f4cdb99960429707d72f195ff5d00928793 +80f0b7d190baa6e6ab859dc5baab355e277b00ddcca32e5cebe192877ad1b90ead9e4e846ca0c94c26315465aeb21108 +b8c29f181ed0bb6ac5f6a8d9016980303bb9a6e3bd63ce7a1a03b73829ac306d4fab306ac21c4d285e0d9acb289c8f2a +a7685079fe73ecaeabf2a0ef56bad8b8afb6aeca50f550c97bf27e6b4a8b6866601427fcd741dc9cb4ce67a223d52990 +ada2ebf6f2a05708d3757fbf91365ec4d8747eb4c9d7a8728de3198ceac5694516ab6fd6235568aecd8d6d21fef5ef48 +846bc5da33d969c53ab98765396cab8dcdbb73b9836c9bda176470582a3427cb6de26d9732fab5395d042a66bdba704c +800a3a7ea83ce858b5ebc80820f4117efa5e3927a7350d9771cad9cb38b8299a5ad6d1593682bba281c23a48d8b2aa71 +a002b18595dec90b5b7103a5e3ec55bdd7a5602ee2d3e5bd4d635730483d42745d339521c824128423dfe7571e66cbaf +b6b4e2067ac00a32f74b71007d8ab058c2ef6b7f57249cb02301085e1a1e71d5de8f24f79b463376fd5c848f2ab1c5bc +a3e03036db1b6117efe995bf238b0353ad6f12809630dca51f7daaaf69f7db18702e6b265208944bfb1e8d3897878a51 +add16712f66d48aab0885bd8f0f1fb8230227b8e0ffca751951c97077888e496d6bfab678cb8f9ffba34cee7a8027634 +ad211af2dd0748f85a9701b68c19edd4a7c420e497cb2e20afdc9df0e79663841e03b3c52b66d4474736f50d66c713ce +8c8a899ce0f16d797b342dc03c2212dda9ee02244c73c7511626dba845d11a0feb138441da5459c42f97209bf758cd9b +a17efc75c7d34326564ec2fdc3b7450e08ad5d1de4eb353de9d1cd919d90f4be99f7d8e236908b1f29cf07ae1ffe0f84 +862d4a8b844e1b0dd9f4deff180456ebed5333b54290b84f23c0ddb2725ac20307e21cbb7343feac598756fe36d39053 +9187fbb19e728a95629deda66a59e178f3fcd6e9d7877465aa5a02cea3baba2b684bd247b4afbf4aa466b64cb6460485 +85ae5636688d06eab3be16e44fe148515d9448c6123af2365d2c997f511764f16830610a58d747adab6db5031bea3981 +8aa8a82891f4e041ce6df3d6d5d7e5c9aaaffe08e0a345ac0a34df218272664c1b7be2450abb9bc428bd4077e6e5dcc4 +8c3bcc85ea574dfe1b9ca8748565c88024e94374434612925b4e9a09fa9d49c0a56b8d0e44de7bd49a587ef71c4bff5f +9524f9dd866fe62faf8049a0a3f1572b024120d2e27d1be90ad8b8805b4e2c14a58614516281cc646c19460a6b75587c +84580d9c72cfa6726ff07e8d9628f0382dc84ce586d616c0c1bd1fd193d0a49305893eae97388de45ba79afe88052ee9 +b5573e7b9e5f0e423548f0583423a5db453790ab4869bd83d4d860167e13fd78f49f9a1ffe93ddddf5d7cd6ec1402bc4 +aff658033db3dad70170decb471aee2cf477cf4d7e03267a45f1af5fd18200f5505c7ce75516d70af0b0804ec5868a05 +84a0eab4e732a0484c6c9ed51431e80cea807702fa99c8209f4371e55551088a12e33a11a7ef69012202b0bc2b063159 +a68f8e730f8eb49420fe9d7d39bb986f0584c1775817e35bb3f7dae02fd860cddf44f1788dc9e10d5bf837886b51947f +946002dd6cf7a4fd3be4bf451440e3f3fd7e9b09f609fa4e64767180b43146095dfc4b6994287f8cfa6d1390d144be71 +b7f19777d0da06f2ab53d6382751dc5e415249d2c96fce94ef971401935c1d1f7d3b678501e785cf04b237efe2fe736e +81e5c66dd404fc8ffd3ac5fe5e69ead7b32a5a7bc8605a2c19185efcc65c5073e7817be41e1c49143e191c63f35239c1 +b5f49c523532dfa897034977b9151d753e8a0fc834fa326d0f3d6dacc7c7370a53fc6e80f6d5a90a3fbec9bbb61b4b7c +8fc8e78c07319877adfaa154a339e408a4ae7572c4fb33c8c5950376060667fbfc8ede31e1b067933d47e3fdbf8564d7 +859cfef032a1a044532e2346975679545fbb3993a34497ce81bdcc312e8d51b021f153090724e4b08214f38276ee1e0d +ae476722f456c79a9c9dfdc1c501efa37f2bff19ab33a049908409c7309d8dd2c2912aa138a57a8d5cb3790ca3c0ba2f +89acbbeffb37a19d89cfe8ed9aa8b6acf332767a4c54900428dd9ab3bf223b97315aca399c6971fe3b73a10a5e95a325 +90a4a00418fdf4420a4f48e920622aae6feb5bf41fd21a54e44039378e24f0d93ccc858d2d8a302200c199987d7cb5e4 +a3f316b0bd603143eba4c3d2f8efe51173c48afe3c25b4ca69d862c44922c441bd50d9a5040b7b42ba5685b44071c272 +a22f4dc96fedd62b9a9f51812349e04d42d81d0103465c09295a26544e394a34abdc6ded37902d913d7f99752dbfb627 +a49f51baf32d0b228f76796a0fef0fe48a0c43ec5d6af1aa437603d7332505be8b57b1c5e133bc5d413739f5ae2ce9d0 +a9e4fe133057a0cd991898e119b735b31a79811307625277c97491ff5d864c428cfa42ae843601d7bb05c0313472d086 +b987edfe0add1463a797ff3de10492b2b6b7ef0da67c221ab6f0f2b259445768a73fbe495de238c4abbe4d328e817c49 +b7f0e4532a379a4c306bbef98b45af3b82b17175dfe0f884222ed954c12f27d8a5bdd0cdeb1df27ff5832ba42a6dd521 +9471bc5ad5ec554acfd61b2eb97b752cb754536f95ae54ca2cbd1dc2b32eb618881f6d8a8b2802c1a4e58c927067d6cf +b4c84f09225cf963c7cc9d082efe51afbbbe33469dd90b072807438e6bde71db8352a31bb0efde6cd3529619812ef067 +8f08005a83e716062d6659c7e86c7d3b51e27b22be70371c125046de08f10ea51db12d616fbf43e47a52e546e7acaac7 +a8937e66a23f9d9b353224491f06e98750b04eca14a88021ee72caf41bdce17d128957c78127fba8ef3dc47598d768a7 +80ad991de9bd3ad543cddeaa1d69ca4e749aaefb461644de9fc4bd18c3b4376c6555fc73517a8b1268d0e1e1628d3c1f +b22f98bca8fe5a048ba0e155c03e7df3e3cee2bfe8d50e110159abdb16b316d6948f983c056991a737b646b4d1807866 +b0bb925c19ca875cf8cdbefa8879b950016cc98b1deb59df8b819018e8c0ad71ea7413733286f9a1db457066965ce452 +95a991e66d00dd99a1f4753f6171046a5ab4f4d5d4fe0adfe9842795348a772d5a4a714dba06b4264b30f22dafa1322f +ad91e781fa68527a37c7d43dd242455752da9c3f6065cd954c46ae23ce2db08f9df9fec3917e80912f391c7a7f2f7ffa +a202d3becbf28d899fe28f09a58a0a742617c1b9b03209eca1be7f072a8ada1f7eac2cc47e08788d85e1908eb9d3d8ee +a360ccb27e40d774d5a07b4ebed713e59a0d71b3ee3f02374e7582b59ec4a5ce22cc69c55e89742ba036dd9b4edd8f34 +a10b897a946882b7c9e28abbb512a603ffa18f9274369843eb3491524a321df1f572eea349099ac6e749ea253c901ea0 +b782a672cd344da368732ecd7e0a1476c2af04613d3eb6da0e322f80438af932bd6d49be7a6f69f7c877512731723d89 +aeccee8dfd764e1adcfc4bf669e0fa87a94e7c79324333e958df47888bff5cec358b8b5bbb48db54822b54d11bbb4bc6 +ad4953913662a9ee8753a354864339f43916f2c2390d0a3f847c712b42718ee00ee14158d730709971941e8680d54560 +92ccb31d6c9e8940c7e8a4873e7eb9de9fb2fa2bac344fa367062ea451fd49a6920a45218dca3ee968711397d2a01536 +9448d9b2b3d12dde9b702f53373db8b8595f9d1f9de2ebee76de292f966f375316953aadf6bfc0e4e853e1fa12d8f02c +8919230878a7219da8c80a4b7d00b9169fb503e72d79789dd53863c243b8d0fb0a819d46fa636d805d0b9b1d15d1f2d9 +b6581ab01215aac023f5e6f57419b6aa63c0743c07caf57d4e146b56b02d90ce1423f70489ac3a11e5c968cb924f937c +a793ec1b1fe56a76920296af06073caadfd6f1d7e30950f8ca13de3de45fe275ca4b361f5249d9405264c3a06ebb5502 +86385b4a4e1bfb5efe7bfef8fd0dfeba7f4400852237cab60febb1dfa409e497a649e81284b5a15fe680b78927256756 +85d10600de96103daa7c90657174b6cb4a1286df5379f1eda9f11c97f9df57043c290eb1ae83658530fe0fd264867b86 +ae01b2396d0f598c21659cd854c15edd4904a34d22278aef97c9260a14a8b250b52d972d304ac4b187c24d08795d5355 +b91b3e4b6fc06e88081fe023ef1b773d82c628eb0f73a2731a9aa05b0dc89b7aeef2eea60125d302e696f45c407aeac2 +986d0f478e33af7568eab6bb26a55c13ffd7cae27525b4abe2f3a994bdb11bbc73d59bdb9a2f6b6ba420a26f8f620ba6 +9746f4fdeef35feaff1def0ea5366b64f21ed29749ae6349f9cb75987e7f931952f913f446100f2a6b182561f382e8eb +a34a116cfde1acbce0d7de037f72a7ca30ab126d8f4815b2b8bcb88e0e6c89015a4daaf4d4ce8eae23eb5d059cf9a5cf +80c3ea37f6a44f07cc9c9c881990f2a5deb9f9489a382718b18a287aa3c50ee6ebe8fd1b3afb84a3cf87f06556f4ca15 +97cff3bc88cfc72ce5e561f7eeb95d4ffb32697e290190c7902e9570c56b3854753777fc417fd27536fc398c8fefb63b +b8807232455833e4072df9bffa388ae6e8099758c2a739194719af7d9ed4041974a6cd9605f089de8b43f0e12f181358 +96f79fca72f75dc182c71f2343f0c43b06d98563fd02d2e1fbc031b96601608d8a726c811a74bb51ab8b0a3ce3632dc4 +b5262761680a4235a8c1257de4735cdcadf08d5d12c6e9d4f628464d5c05dfff3884a9ef2af3b7724b5a8c97e6be74eb +b6ce0eada73433d98f8fae7d55e4ea2b9d9d7a0ae850d328dd06991f27b1f03e470868fb102800ff3efe4ee1698531b9 +a37b7d9fe9d3fdfbc72c59cf6cacc7e7a89d534dea3d73121f7483331aec8ab3fbff58ffabb943b75d6f86df0ba43262 +93fce9be8a27fcaa1283d90d3e87265a6221ee302ec708161a42bd00ffe8e726743d9e187e1bf4307c0e3f25afbb1d44 +a4ea919021346ae7ea69d5e8f46d860b24c35c676b62f4e577c90e0c05c5646fe73721b143b7c38835dd4b443e6c3676 +b79983a5948453f70dfa4c396ce1945204498fe79f40c0667291bd0fdd96ed0b9ea424571f7ade342275c854c9f03d9e +866f8e395ed730b614b70bf999cad6e87e9086c1f5aea8d69020b562ee285dd0fb93afaca0dd13a0713f74a3f9340f01 +a3fef158782292c6139f9a0d01711aa4ed6f5cac11d4c499e9e65c60469ae3afbde44fb059845973a4b3bbca627b7eb7 +b4a2c0321b68f056e7d8051beede396fa2f0704d8aa34224f79f7b7a62eb485fc81889cb617019622fd5b5fa604516f5 +8f0e3edddbaead9059df94de4139e3a70693c9ea9bc6baaa5695dddfd67263b33926670159846292801941b9a0c6545b +9804e850f961e091dadd985d43d526ba8054d1bf9c573ed38f24bbd87aeaad4dcba4c321480abc515a16b3b28f27bb2a +95f330da28af29e362da3776f153f391703a0595323585220712dae2b54362cc6222070edd2f0dd970acfbe2e3147d5c +82d03b771231179cc31b29fe1e53379d77b5273b5c0a68d973accd7a757c7584dbb37f0507cdfde8807313ec733a6393 +81b3c39a9f632086e97b7c1f0ec7e2eaf9dc3cb0d84dec18a4441dbdc9fe9878fde4bcfa686bca1a9522632a353a5566 +a2db124ab2b493d5f9a1e4ca6b3144593c2fc8bfac129fd79da11dfbb7ef410a234fda9273a50a5ca05d7b37cc2088a2 +aa8550633c9449228702690cc505c0fc4837ea40862058e8f9713622b34d49fdc3a979b9317993c5da53b5bb5b7f4974 +ae783bcf7a736fdc815d0205b4c2c2b2fee0a854765228f76c39638ba503e2d37f1e28f6bdf263923f96fead76b4187b +b5ec86092c1d250251e93bab2f24e321afd2cd24cf49adfcbed9e8bc5142343ae750206c556320551e50fc972142f0da +b3b5791b590a6e9b3f473d5148624014aa244495249322a5d75cde2c64117ff9d32f4b0698b0e4382e5e7f72933061f8 +876c6a9162c17b16d6b35e6ce1ba32e26aec7dd1368bceab261ab880ad845c91e54b96a52c7d3aafbfbafc0e37139dca +902ddb5774d20b0707a704486457c29048776a5b88c377b14af6616c8ddf6cd34f49807df9c9d8866d6b39685cfb0f19 +8b87f71f94bc96de927d77a5d7123fa9cdda8c76aff64a5e6112cbc2eca43b07f8376db3e330f8af6a1db9b948908a6a +a69a5922e572b13d6778218e3657f1e1eea9a9682f6eb1b731d676d03563e14a37ff69bc5e673c74090ecb0969a593f7 +aff3510d78ba72f3cf5e3101847b7c4a956815aa77148689c07864e8a12dd0ef33d5f6c8cb486e0ea55850161f6afed0 +aa9c459cb2a008d94cbee2c6b561d18b0d7c6ffa8a65cbf86ae2c14eec070ee9d5324f5d38f25a945ddcd70307e964c4 +8310e15b050b1e40ece7530b22964bde0fd04f48dfffdec5a0d1fb8af0799a7fdc1d878139fb7cb8d043d3a52c2d1605 +b8f0856ce2c4034ee4041d0383f25fb0eeefc00b82443311a466fc18608313683af2e70e333eb87e7c687e8498e8a1ce +a8200a75c158fbb78474cab8a543caecd430b5d8b9964fc45d2d494dd938021cd00c7c33413ad53aa437d508f460a42a +a310091472b5b42b02176b72d5f8120bdb173025de24b420e3ca3fb9a386c39092a1d1bb591c6f68ee97a268a7ff9e95 +b23f1bf8bcec9cb5232b407115eead855fd06f5bf86ba322ad61d45460c84f0f36911aba303de788c9a0878207eac288 +ae4c129ad6d08be44690bb84370e48bfd92c5d87940750ee2c98c9a2604456f7f42727ab211989657bb202f6d907df04 +95992057d654f3e189a859346aa9aa009f074cb193b7f5720fa70c2b7c9ce887d886f6cff93fa57c1f7c8eaa187603f6 +ad12d560273963da94151dd6be49c665d7624011c67d54ab41447452a866bc997e92a80bdd9ca56a03528e72c456dc76 +8e4eda72e9cfcaa07265bb6a66d88e9ce3390ae1a6b8831045b36ea4156b53d23724824d0f0bca250ce850c5926fa38f +980fe29c1a267c556532c46130fb54a811944bdfea263f1afcdab248fa85591c22ac26167f4133372b18d9f5cce83707 +a7da9f99ddde16c0eac63d534a6b6776ad89b48a5b9718a2f2331dce903a100a2b7855cf7b257565a326ddc76adc71a5 +8ca854c55e256efd790940cb01125f293e60a390b5bd3e7a60e13ac11a24f350a7eb5ebddfa0a2890905ca0f1980b315 +9440335818859b5e8f180893a8acedceabaaa44e320286506721c639a489b5bfb80b42b28902ee87237b0bd3dd49552a +b9da545a20a5e7d60fd0c376dcaf4b144f5c5a62c8ffa7b250c53ce44be69c4e0d5e4e11422ef90593ae58ae1df0e5d3 +b75852a850687f477849fc51e0479703cd44428671c71bfdd27fe3e7930b97d2fc55f20348ca4e5bc08db2fc16a4f23c +b515081d8d099e4b6253c991ca2d3e42633f5832c64aa8f9cde23cb42c097c2c3717c46c5f178f16c58295f97b2b3fe7 +9506c9902419243e73d3197e407985dd5113f16c6be492651bbbf9576621942710aea74522d6fb56d5b52c6ccdaa4307 +952673ae27462a0f6c9545eede245c2f8e2fd6077b72a71f5672f1a5a02c263bc2a66f24f0e30376feb7a8187b715f08 +a8f1e2085ed666a8f86b474d9589dc309d5c83bd53e745f8e09abe0dfbaf53e5384c68580672990344d4aa739438b4d8 +ad6e04d4a67a5a5529ceaf7de6e19416be5b4c436610aa576ac04aee3b73317da88f891121f966393a37f52b775a2dd8 +a35a884736f08c7f76923ae7adb17fdac04e6c505178bca9502eaa2ed16d4d93fa953fb6dcf99e9e9962a6eb3eeead00 +b8af72273360bab4b3ca302cf0659717cbfb335fbc9ad4ffdd3340113ece9e63b2bdbd611e5f6b740a4689286f9a452d +b1a1f4ba2640800c3ed3892e049f6e10f8a571efa3bbe21fe2d6cee8fded171c675a3bb8aa121e2d1d715de84bad2e2b +8102a6c3598b40da4d6e8eccfdd5dadc8d6262e38b69c5b211b0732f4c6e3045d79fba12770a0b2b66f1e9f4664b1510 +90979587d75bf12819f63832beea7dcbef101f6814bf88db4575bfcd9cf0ea8eceba76d4d6db17630b73b46c1acfe011 +8dd98f14d2beb5b5b79cc30f6825ec11ed76bd5a8864593ffc0c2baffab6872bad182e1c64b93aab8dd5adb465fa5cec +8083334dadc49c84f936c603a2857f174eda5659ab2b7214572f318aba3ebd7b1c50e7cbea57272b9edf106bd016df3b +a634d08d2e8641b852e89d7ccab1bab700c32fb143bcbea132f2a5fb2968d74ded2af4107f69818798f0128cc245a8cb +94fc2dccf746d5b3027f7cf4547edf97097cd11db8d6a304c1c2ca6b3aba28c1af17c08d2bbb66f88c14472e0196a45e +b257a6fb01424b35e414c1c002e60487abb3b889d74c60cbdbf591e222739c6f97b95f6962842401f5e2009e91b28c55 +81955bdbf25741f3b85d5044898dc76ae51b1b805a51f7c72a389d3b4d94b2e3e0aa1ec271685bbcf192ed80db7367ab +86eb229b66c542514e42b113b9de7d4f146861a60f2a253264873e7de7da2ac206e156ff11f2de88491b9897174fe2f4 +8b8db00533afbb56b3d7d7a9a4a6af3cebb523699ffcb974603e54f268b3ef739c41cd11850b9651d9640d72217c3402 +8b7cbb72a6c4408d5f1b61001e65de459790444530245d47d4ee8e2d17716695283f21540bd7ac4f5a793a0d00bdf1d4 +875920b9bab4bc1712e6af89ae2e58e9928c22095026070b07e338421b554d9f96e549ac3706c6c8d73f502913a27553 +9455d192db7b039b3e8f0bc186c25ff07dfbe90dab911e3c62e3bd636db8019ed712cbb0ecd5cbb9a36c11034e102aba +8cb0b28e5d3838d69f6c12274d6b1250f8843938065d0665b347977fa3c1c685caef6930bae9483ed0d0a67005baad76 +94df2e14aae1ae2882ab22a7baf3dc768c4a72b346c2d46bfd93d394458398f91315e85dc68be371f35d5720d6ca8e11 +aacd94b416bfbeb5334032701214dd453ad6be312f303b7bec16a9b7d46ab95432a14c0fbf21a90f26aafb50ec7bb887 +b43d26963665244633cbb9b3c000cacce068c688119e94cc0dac7df0e6ee30188e53befff255977788be888a74c60fc2 +b40d67c9ad0078f61e8744be175e19c659a12065fe4363b0e88482b098b2431612e7c2fa7e519a092965de09ceafe25c +82cd4a4e547c798f89ce8b59687614aa128877e6d38b761646d03dc78f6cdd28054649fb3441bcd95c59b65a6d0dd158 +a058e9700f05cef6e40c88b154d66a818298e71ae9c2cf23e2af99a0a7dc8f57fbe529d566cb4247432e3c1dee839b08 +95c6f84406466346c0b4a2a7331ac266177fb08c493d9febb284c5ca0b141ccc17aa32407f579666b208fb187c0227dd +905d1d47a26b154f44d7531c53efbc3743ff70bd7dba50c9b9d26636767b0ae80de3963c56d4604399126f4ad41a0574 +83dfa11c520b4abaefe1b2bc1ce117806e222f373cd4fb724f3c037c228e3379d27a364e68faa73984ba73a0845f1b9a +a16e54786ba308a9c0241aff8f1bf785dece387d93bd74aa31de0969e3431479e2c0abebff9939a6644d2b0af44f80bb +81ac565212365176f5be1c0217f4e7c9fdbc9fe90f16161367635d52edcf57af79290531d2e8b585e1223d33febd957d +a296f4b09915e5d80ff7274dc3ffc9b04f0427e049ea4ef83dca91095275e8a260ef0335c7b6585953b62682da8c8e99 +a9150626208168a21ae871192ca9f11c1f7f6e41e8e02de00732de2324d0d69fe52f8762155c9913ee408a034552e49a +a42a56008ca340c6e9ff5a68c8778bb899ba5de9e7508c0cac355c157979a7ff6a6bd64f98b182114d3831cfa97ee72b +a4f05adf22c051812279258eea9eb00956b04ef095f2ca175f775ff53c710fb0020266adabd1dacaee814c4f1d965299 +967492e78ac0bceb8ad726ea0d2292b760043d16d64a6b1bb896e32630a7bf405c2b20e4e00842ae519a21697ff8db2d +adbf05e9b5931ae3dd24d105b5c523c221a486a4123c727069b9e295a5bc94f3e647a3c2cde1f9f45dbd89df411453c9 +a1759c0ebebd146ee3be0e5461a642938a8e6d0cdd2253ebd61645b227624c10c711e12615cd1e7ea9de9b83d63d1a25 +a4c5945d635b9efc89ad51f5428862aefe3d868d8fb8661911338a6d9e12b6c4e5c15a25e8cb4a7edc889b9fa2b57592 +aff127675ea6ad99cb51c6e17c055c9f8fd6c40130c195a78afdf4f9f7bc9c21eed56230adb316d681fc5cacc97187da +9071294e8ff05b246ff4526105742c8bf2d97a7e7913f4541080838ecfd2dbc67c7be664a8521af48dbc417c1b466a85 +990880b0dd576b04f4b4ce6f0c5d9ff4606ec9d3f56743ac2f469ac6a78c33d25c3105cf54f675e300ac68073b61b97a +a8d1a62ce47a4648988633ed1f22b6dea50a31d11fdddf490c81de08599f6b665e785d9d2a56be05844bd27e6d2e0933 +8ea5a6c06f2096ded450c9538da7d9e402a27d070f43646533c69de8ea7993545673a469c0e59c31520e973de71db1b4 +99d3a098782520612b98a5b1862ae91bcb338ab97d1a75536e44b36a22885f1450a50af05c76da3dd5ca3c718e69fdd4 +b987451526e0389b5fe94c8be92f4e792405745b0a76acd6f777053d0809868657ba630aa5945f4bd7ce51319f8996f7 +afffccc5ddd41313888a4f9fee189f3d20d8b2918aa5ad0617009ea6d608e7968063c71bd5e6a1d7557880d9a639328d +8ac51a02505d5cadfd158dde44932ab33984c420aeceb032ed1ee3a72770d268f9e60ccf80ce8494dfc7434b440daafd +b6543e50bd9c6f8e0862850c3d89835ddd96231527681d4ab7ae039c4a3a5a0b133a6d40cdb35c8a6c8dbb8d421d3e2b +a2ba901f4fde2b62274d0c5b4dbbea8f89518571d8f95ec0705b303b91832f7027704790a30f7d9d2cdafde92f241b3e +a6974b09280591c86998a6854a7d790f2a6fbe544770e062845cfc8f25eb48c58f5dfb1b325b21f049d81998029ad221 +890baeb336bbf6c16a65c839ffaab7b13dd3e55a3e7189f7732dbcb281b2901b6d8ba896650a55caa71f0c2219d9b70e +b694211e0556aebbe4baf9940326e648c34fda17a34e16aa4cefd0133558c8513ffb3b35e4ee436d9d879e11a44ec193 +97cf9eb2611d467421a3e0bfe5c75382696b15346f781311e4c9192b7bca5eb8eaf24fa16156f91248053d44de8c7c6f +8247f88605bd576e97128d4115a53ab1f33a730dc646c40d76c172ca2aa8641c511dddad60ee3a6fbe1bb15cac94a36c +ae7ecd1c4a5e9e6b46b67366bc85b540915623a63ab67e401d42ca1d34ae210a0d5487f2eef96d0021ebecfd8d4cd9a8 +aec5123fff0e5d395babe3cb7c3813e2888eb8d9056ad4777097e4309fb9d0928f5c224c00260a006f0e881be6a3bf8f +8101724fa0ce7c40ea165e81f3c8d52aa55951cc49b4da0696d98c9fafd933e7b6c28119aa33f12928d9f2339a1075d1 +a8360843bab19590e6f20694cdd8c15717a8539616f2c41a3e1690f904b5575adb0849226502a305baefb2ead2024974 +ade5cad933e6ed26bba796c9997b057c68821e87645c4079e38e3048ea75d8372758f8819cde85a3ab3ab8e44a7d9742 +ab1fe373fb2454174bd2bd1fe15251c6140b4ac07bda1a15e5eabf74b6f9a5b47581ef5f0dbd99fdf4d1c8c56a072af7 +b425e1af8651e2be3891213ff47a4d92df7432b8d8ea045bb6670caf37800a4cd563931a4eb13bff77575cbcae8bc14f +b274799fe9dd410e7aed7436f0c562010b3da9106dc867405822b1e593f56478645492dbc101a871f1d20acf554c3be6 +b01a62a9d529cc3156bc3e07f70e7a5614b8d005646c0d193c4feb68be0b449d02b8f0000da3404e75dbdfa9ca655186 +878b95e692d938573cdb8c3a5841de0b05e5484a61e36ea14042f4eadb8b54a24038d2f09745455715d7562b38a8e0df +a89e998e979dba65c5b1a9000ad0fd9bb1b2e1c168970f2744982781306bbe338857e2fac49c8cafda23f7cc7c22f945 +85880fdf30faed6acce9973225e8fe160e680a55fc77a31daacf9df185453ad0c0552eb3fd874698ad8e33c224f7f615 +ac28d20d4bbb35ba77366272474f90f0ed1519a0e4d5de737adee2de774ccd5f115949e309e85c5883dbc63daaa6e27b +a1758ac86db859e323f5231ad82d78acbe11d53d3ebf7e644e581b646eede079d86f90dc23b54e5de55f5b75f7ea7758 +ae4c0b84903f89353bf9a462370f0bf22c04628c38bb0caae23d6e2d91699a58bd064e3c2b1cbda7f0a675d129f67930 +95f21a099ffc21a0f9064d9b94ce227b3ff0a8c5a2af06ff5ee6b7f3248a17a8ca2f78cd7929ef1d0784f81eddefcd48 +8d06fbc1b468f12b381fd1e6108c63c0d898ddf123ea4e2e1247af115043c4f90b52796076277b722dd2b92708f80c21 +a300f39039d8b2452e63b272c6d1f6d14a808b2cd646e04476545da65b71a6e29060f879409f6941c84bde9abe3c7d01 +adecce1ccc5373072ba73930e47b17298e16d19dbb512eed88ad58d3046bb7eec9d90b3e6c9ba6b51e9119cf27ce53f2 +941a7e03a64a2885d9e7bee604ddc186f93ff792877a04209bbee2361ab4cb2aed3291f51a39be10900a1a11479282ca +acbcb1ab19f3add61d4544c5e3c1f6022e5cc20672b5dc28586e0e653819bdae18cda221bb9017dfaa89c217f9394f63 +b8d92cea7766d3562772b0f287df4d2e486657b7ab743ed31ec48fdc15b271c2b41d6264697282b359f5cb4d91200195 +957360ecb5d242f06d13c1b6d4fcd19897fb50a9a27eb1bd4882b400dc3851d0871c0c52716c05c6c6cf3dee3d389002 +abd2a23abbc903fbb00454c44b9fb4a03554a5ef04101b2f66b259101125058346d44d315b903c6d8d678132f30b1393 +ae9572beff080dd51d3c132006107a99c4271210af8fbe78beb98d24a40b782537c89308c5a2bddfdfe770f01f482550 +82c7e5a5e723938eb698602dc84d629042c1999938ebd0a55411be894bccfb2c0206ac1644e11fddd7f7ab5ee3de9fdc +aba22f23c458757dc71adb1ce7ef158f50fdd1917b24d09cfc2fbbcbe430b2d60785ab141cf35ad9f3d0a2b3e2c7f058 +8eff41278e6c512c7552469b74abedf29efa4632f800f1a1058a0b7a9d23da55d21d07fdbb954acb99de3a3e56f12df6 +8abd591e99b7e0169459861a3c2429d1087b4f5c7b3814e8cee12ecc527a14a3bdda3472409f62f49a1eb4b473f92dbf +82dcbff4c49a9970893afc965f1264fcab9bae65e8fb057f883d4417b09e547924123493501c3d6c23a5160277d22a8e +b5a919fcb448a8203ad3a271c618e7824a33fd523ed638c9af7cfe2c23e3290e904d2cd217a7f1f7170a5545f7e49264 +96d6834b592ddb9cf999ad314c89c09bedc34545eeda4698507676674b62c06cc9b5256483f4f114cd1ed9aaec2fba5e +a4e878cf4976eb5ff3b0c8f19b87de0ef10cd8ec06fe3cd0677bd6be80ba052ff721a4b836841bdffb1df79639d0446c +8e15787a8075fd45ab92503120de67beb6d37c1cc0843c4d3774e1f939ac5ed0a85dad7090d92fa217bd9d831319021b +8506c7fea5a90cd12b68fdbbae4486a630372e6fd97a96eea83a31863905def661c5cdead3cf8819515afe258dbcd4d9 +952ef3bc16a93714d611072a6d54008b5e1bf138fd92e57f40a6efb1290d6a1ffcc0e55ff7e1a6f5d106702bd06807cd +a5f7761fa0be1e160470e3e9e6ab4715992587c0a81b028c9e2cf89d6f9531c2f83c31d42b71fca4cc873d85eba74f33 +b4811f0df11ff05bf4c2c108a48eece601109304f48cde358400d4d2fa5c1fdaaf3627f31cb3a1bdd3c98862b221720d +9207ad280b0832f8687def16ad8686f6ce19beb1ca20c01b40dd49b1313f486f2cb837cfbbf243be64d1c2ab9d497c3f +b18a8c1e6363fadd881efb638013e980e4edb68c1313f3744e781ce38730e7777f0cba70ea97440318d93a77059d4a2b +901faf777867995aac092f23c99c61f97eeadf4ac6bcb7791c67fa3c495947baef494b2aace77077c966c5d427abbf92 +a123281aca1c4f98f56cff7ff2ae36862449f234d1723b2f54ebfccd2740d83bd768f9f4008b4771e56c302d7bfc764f +8cffe1266468cad1075652d0765ff9b89f19b3d385e29b40f5395b5a3ad4b157eed62e94279ac3ec5090a6bad089d8b3 +8d39870719bc4ebbcecba2c54322111b949a6ed22bda28a6cea4b150272e98c9ded48cc58fc5c6e3a6002327856726ec +b3d482c00301f6e7667aaeaf261150b322164a5a19a2fa3d7e7c7bf77dc12fa74f5b5685228ab8bf0daf4b87d9092447 +801acb8e2204afb513187936d30eb7cab61f3fbb87bfd4cd69d7f3b3ddba8e232b93050616c5a2e6daa0e64cef6d106f +ac11e18adda82d2a65e1363eb21bda612414b20202ecc0e2e80cc95679a9efa73029034b38fd8745ce7f85172a9ab639 +b631d6990d0f975a3394f800f3df1174a850b60111567784f1c4d5bba709739d8af934acfa4efc784b8fc151e3e4e423 +aeda6279b136b043415479a18b3bbff83f50e4207b113e30a9ccfd16bd1756065fc3b97553a97998a66013c6ac28f3d8 +8840b305dc893f1cb7ad9dd288f40774ec29ea7545477573a6f1b23eaee11b20304939797fd4bcab8703567929ce93ad +963cc84505a28571b705166592bffa4ea5c4eeafe86be90b3e4ae7b699aaaca968a151fe3d1e89709fe0a3f0edf5d61a +8e1ec0d0e51f89afea325051fc2fa69ab77d6c7363cc762e470a9dfa28d4827de5e50f0b474c407b8c8713bad85c4acd +909f313420403cb36c11d392cf929a4c20514aa2cb2d9c80565f79029121efd5410ef74e51faba4e9ba6d06fcf9f1bd1 +b2992b45da467e9c327ac4d8815467cf4d47518fc2094870d4355eb941534d102354fbda5ab7f53fbf9defa7e767ca13 +9563b50feb99df160946da0b435ac26f9c8b26f4470c88a62755cdf57faebeefffff41c7bdc6711511b1f33e025f6870 +a2a364d9536cd5537a4add24867deec61e38d3f5eb3490b649f61c72b20205a17545e61403d1fb0d3a6f382c75da1eb3 +89b6d7c56251304b57b1d1a4255cb588bd7a851e33bf9070ee0b1d841d5c35870f359bc0fdc0c69afe4e0a99f3b16ec2 +a8ae1ee0484fe46b13a627741ddcdae6a71c863b78aafe3852b49775a0e44732eaf54d81715b1dca06bb0f51a604b7e2 +b814ecbfbc9645c46fc3d81c7917268e86314162d270aed649171db8c8603f2bd01370f181f77dbcbcc5caf263bedc6c +8e5d7cc8aad908f3b4e96af00e108754915fecebdb54f0d78d03153d63267b67682e72cd9b427839dca94902d2f3cda7 +8fc5ff6d61dd5b1de8c94053aef5861009cb6781efcca5050172ef9502e727d648838f43df567f2e777b7d3a47c235dd +8788eea19d09e42b0e3e35eb9bcd14f643751c80c6e69a6ff3a9f1711e8031bbe82ccd854a74a5cfcf25dda663a49a62 +95d441d8cd715596343182ddcecb8566d47eaa2d957d8aea1313bbed9d643a52b954443deb90a8037a7fa51c88eec942 +a15efd36ef72783ccdc6336ef22a68cc46b1ecec0f660cfe8a055952a974342bf30f08cb808214bce69e516ff94c14c5 +acc084d36907a16de09a5299f183391e597beaf9fa27d905f74dc227701a7678a0f5a5d1be83657de45c9270a287ec69 +b3fd385764356346061570beb760ccf3808619618fd7521eb0feadc55b8153ef4986ff0cbfcbd4153ad4ea566989d72a +91ec6b26725532e8edfda109daa7ce578235f33bd858238dfa2eb6f3cd214115b44cce262a0f2f46727a96b7311d32e1 +96b867ccddb73afe1049bda018c96cfe4083fff5bb499e6a4d9fd1a88a325144f9a08cb0aee310e1bb4f6a5793777e80 +ad10c18465910152676f1bc6a40986119607b5c272488e6422cfda2eb31da741af13a50f5de84037348014a869c8e686 +86ade2dbc4cceb52b84afe1c874d1e3644691284c189761febc4804b520adf60b25817e46f3f3c08d2ab227d00b93076 +998b949af82065c709fc8f63113a9fecdd1367fc84fc3b88857d92321ba795e630ce1396a39c2e056b5acd206ee011d8 +8dec440bbd17b47dfd04e566c2d1b46f9133023b982fdc5eaeae51404bc83a593f8d10c30b24e13aec709549137cae47 +89436ff47431b99f037cddaee08bb199be836587a7db6ed740317888638e5f4bebbb86b80549edff89678fc137dfb40a +a8e9960746769b3f76246c82cd722d46d66625e124d99a1f71a790c01cec842bcf6c23c19cc7011ec972cedf54dc8a4c +980979dafedfd75ff235b37e09e17361cfdda14a5ac3db0b90ed491abfd551916016b2254538da7f4b86ece3038b1b1c +8ec340ca7654720bb9d2f209985439ebbc3f9990ef27e7d7ae366e0c45b4ed973316943122119604ea9a87fc41ebd29f +ab24440a40ab238d8cd811edb3ef99948ae0f33bf3d257b22c445204016cce22b6f06a1ca979fa72a36c4ddedc2b3195 +a1bcd2473ac7cfebfa61c10e56cae5422c6b261a4a1be60b763fcbcdf2eae4ccf80695f09b062b6cf5654dfab0ee62a5 +9027a613ce7bd827110a3a0e63e83f652e9bc7f4ce8da26c38b28ee893fd0c38bdb20f63a33470a73cb77f776244ab4a +86911cc8aeb628197a22bf44d95a0b49afb8332c38857fba8e390c27c527b8b45335e22b0f2e0a3395c16ced3c1ed2e8 +8f0529a330a3e9967dce09357d774715fd305bd9e47b53b8b71a2a1303d390942a835aa02fb865a14cfed4f6f2f33fe6 +b71ec81a64c834e7e6ef75b7f321a308943b4bad55b92f4dbaf46658613cebf7e4b5b1bc7f1cdc5d50d1a2a0690e2766 +98d66aaed9fb92f4c7bb1b488ccbca5e570aa14433028867562a561d84f673ac72e971cbe2cb3cbbb0a702797dc45a7e +8380aa94d96c6b3efd178de39f92f12ca4edd49fe3fe098b2b7781e7f3e5f81ee71d196fb8e260d1d52f2e300e72e7bc +8c36296ff907893ac58cecadd957b29f5508ae75c6cc61b15ae147b789e38c0eace67963ae62eff556221b3d64a257a2 +97e17676cbc0f62a93555375e82422ee49bc7cf56ad6c3d69bb1989d1dc043f9f7113d0ed84616dde310441b795db843 +a952229615534c7e9a715409d68e33086cdaddf0aec51f4369c4017a94ec3d7113a045054d695fb9d7fd335527259012 +817b90958246f15cbd73a9679e10192ca7f5325b41af6388b666d8436706dea94eafffbc3b8d53057f67ad726dbcd528 +95776e378c8abd9223c55cd6a2608e42e851c827b6f71ad3d4dc255c400f9eccf4847c43155f2d56af0c881abef4acfa +8476c254f4b82858ecbe128ed7d4d69a6563fd9c5f7d4defc3c67e0bfa44e41cfd78b8e2a63b0773ce3076e01d3f6a7d +a64b0b189063d31bcae1d13931e92d5ab0cfc23bf40566ac34b5b8b711d0e7d941102e6beb140547512e1fe2d9342e6c +9678460acff1f6eae81a14d5c8049cdcd50779a8719b5c5861762a035b07f7fa1b1ada8b6173f9decf051fd5a55bebd8 +88398758ce86ed0388b13413a73062adb8a026d6b044cd1e7f52142758bed397befee46f161f8a99900ae6a2b8f6b89f +a7dfaf40637c81d8b28358b6135bd7ad9cc59177bd9bc8e42ba54d687d974cdf56be0457638c46b6a18ceaa02d3c53f3 +b0e885e5d48aa8d7af498c5e00b7862ed4be1dad52002f2135d98e8f2e89ca0b36cf95b3218aad71d5b4ada403b7045b +803b0e69a89e8de138123f8da76f6c3e433402d80d2baba98cde3b775a8eda4168530a49345962c4b25a57257ba9f0a7 +8ce6ef80dadb4b1790167fbc48be10ef24248536834ff2b74887b1716c75cb5480c30aa8439c20474477f1ac69734e61 +824764396e2b1e8dcc9f83827a665ef493faec007276f118b5a1f32526340b117c0df12bea630030a131bf389ec78fc3 +874edb379ce4cc8247d071ef86e6efbd8890ba6fcb41ea7427942c140347ebf93e8cf369d1c91bd5f486eb69b45bce70 +adadcb6eb4cafa1e2a9aef3efb5b09ffa2a5cf3ce21f886d96a136336be680dabc0a7c96ec327d172072f66d6dcdbb39 +b993591b280e1f3527f083d238a8f7cf516d3cf00c3690d384881911c1495192a419b8e37872a565ce8007eb04ebe1b6 +b125faaeca3f0b9af7cb51bb30a7c446adbb9a993b11600c8b533bff43c1278de5cdda8cb46a4df46f2e42adb995bce8 +a7efe1b57326b57c2c01720d4fdf348d6a84d35f229d32a8f2eb5d2be4e561ef8aea4d4d0bcfcbf17da10a8e49835031 +a6bd4f5a87574b90a37b44f778d5c7117d78eb38f3d7874bad15ae141b60eed4ab0a7281ed747297f92e0b3fe5f9cafa +94b5e3067ca1db3c4e82daf6189d7d00246b0360cb863940840358daa36cb33857fde4c01acd0457a90e15accee7d764 +a5ff3ab12197b8a07dd80222a709271ab3b07beba453aacbaf225cfb055d729e5a17a20f0ff9e08febf307823cba4383 +a76dd8aa2b6a957ed82ecec49b72085394af22843272f19360a5b5f700910c6ec65bf2a832e1d70aa53fd6baa43c24f6 +8dfcbe4143ae63c6515f151e78e6690078a349a69bb1602b79f59dc51dea7d00d808cf3e9a88b3f390f29aaae6e69834 +8c6134b95946a1dd54126952e805aeb682bc634c17fe642d5d3d8deffffd7693c90c4cd7d112890abfd874aa26736a93 +933531875561d327c181a2e89aaaac0b53e7f506d59ef2dfc930c166446565bd3df03bab8f7d0da7c65624949cfbae2f +ac6937c5e2193395e5bb69fd45aa6a9ae76b336ea7b6fd3e6aeac124365edcba7e918ec2c663fb5142df2f3ad03411a6 +a8f0f968f2a61d61d2cf01625e6ac423b447d3e48378ea70d6ff38bc98c42e222fe3cbcb04662b19973a160dc9f868a2 +94100a36f63d5c3a6cfb903c25a228389921684cc84f123390f38f90859f37ec9714942ffe6766f9b615101a3c009e43 +b5321b07f5b1eb2c1c20b0c8ab407f72f9705b55a761ec5176c5bcc6e585a01cae78546c54117ca3428b2b63793f2e65 +9922f61ed6763d1c4d12485c142b8ff02119066b5011c43e78da1ee51f10a1cf514329874061e67b55597ca01a7b92ab +a212eb2d72af0c45c9ef547d7c34ac5c4f81a4f5ec41459c4abd83d06ec6b09fdab52f801a2209b79612ae797fa4507b +8577d2d8f17c7d90a90bab477a432602d6918ca3d2af082fbb9e83644b93e21ca0bced7f90f6e9279eaa590f4e41dc4d +9002d424e3bebd908b95c5e6a47180b7e1d83e507bfb81d6ad7903aa106df4808c55f10aa34d1dccad3fab4d3f7a453e +b9050299bf9163f6ebeff57c748cb86f587aea153c2e06e334b709a7c48c4cbfba427babf6188786a0387b0c4f50b5ce +852ae1195cc657c4d4690d4b9a5dea8e0baaa59c8de363ba5fccd9e39ec50c6aa8d2087c8b7589b19248c84608f5d0a8 +a02ff5781417ca0c476d82cf55b35615f9995dc7a482124bc486e29b0b06a215fbe3e79228c04547c143d32cd3bac645 +8d7bc95e34bc914642e514a401448b23cf58bce767bab1277697327eb47c4a99214a78b04c92d2e3f99a654308b96e34 +adb28445d3b1cc7d4e4dd1f8b992a668f6b6f777810465fdab231fd42f06b5bada290ba9ae0472110366fad033da514e +a0c72b15a609f56ff71da17b5b744d8701af24b99fbc24a88588213864f511bfa592775e9ab4d11959f4c8538dc015b8 +933205a40379d5f5a7fb62cda17873fbbd99a0aaa8773ddf4cd2707966d8f3b93a107ebfe98b2bb222fe0de33ef68d03 +90690c1a4635e2e165773249477fc07bf48b1fd4d27c1b41a8f83a898c8d3763efb289867f8d6b0d354d7f4c3f5c7320 +99858d8c4f1be5a462e17a349b60991cb8ce9990895d6e42ae762ce144abc65b5a6f6e14df6592a4a07a680e0f103b2a +b354a7da06bd93fb5269e44925295b7c5049467b5cacce68cbb3cab60135b15e2010037a889cb927e6065053af9ccb77 +af01fc4ac396d9b15a4bbd8cc4fe7b30c32a9f544d39e88cdcb9b20c1c3056f56d92583a9781ddb039ec2eeda31fb653 +a8d889fb7155f7900982cf2a65eb2121eb1cc8525bbee48fae70e5f6275c5b554e923d29ebbd9772b62109ff48fb7c99 +b80edae6e26364c28749fd17c7c10eb96787053c7744a5cc6c44082ae96c5d3a4008c899a284f2747d25b72ecb9cb3d0 +b495b37503d77e7aafc226fca575e974b7bb6af2b7488372b32055feecc465a9f2909729e6114b52a69d8726e08739cb +a877f18b1144ff22e10a4879539968a01321cecde898894cbe0c34348b5e6faa85e1597105c49653faed631b1e913ec7 +8c235c558a065f64e06b4bb4f876fe549aab73302a25d8c06a60df9fad05843915ac91b507febca6fe78c69b51b597de +b4c31398b854ccc3847065e79329a3fdae960f200c1cce020234778d9c519a244ff1988c1fbc12eb3da2540a5fa33327 +b7bd134b3460cb05abf5aed0bc3f9d0ccbfac4647324bedbdf5011da18d8b85dc4178dd128f6ddbe9d56ea58f59d0b5d +92594c786c810cf3b5d24c433c8a947f9277fe6c669e51ceb359f0ae8a2c4e513a6dad1ae71b7ded3cdca823a51e849b +b178535e043f1efcce10fbec720c05458e459fdda727753e0e412ef0114db957dc9793e58ec2c031008e8fb994145d59 +b31da7189abf3e66042053f0261c248d4da142861bfd76a9aced19559be5284523d3e309ef69843772b05e03741a13fe +b190a8c1a477e4187fecff2a93033e77e02de20aae93dda1e154598814b78fdf8b9ff574c5f63047d97e736e69621462 +98234bd1d079c52f404bf5e7f68b349a948ec1f770c999c3c98888a55d370982bfa976e7e32848a1ebb4c7694acc1740 +99b9eeb33a6fb104bba5571a3822ebe612bf4b07d720d46bde17f0db0b8e8b52165f9b569be9356a302614e43df3e087 +a1e3915b0dd90625b424303860d78e243dda73eecd01cba7c33100b30471d0a1ec378c29da0f5a297008b115be366160 +975118bf6ca718671335a427b6f2946ee7ece2d09ccfb1df08aa1e98ff8863b6c8b174c608b6b2f4b1176fb3cbc1e30d +903cb1e469694b99360a5850e2ca4201cad23cfccce15de9441e9065eb3e6e87f51cba774ab9015852abd51194c25e57 +821f7ff4d0b133e3be4e91d7ff241fa46c649ff61fc25a9fdcf23d685fe74cf6fade5729763f206876764a3d1a8e9b24 +a1ee8db859439c17e737b4b789023d8b3ce15f3294ec39684f019e1ea94b234ec8a5402bc6e910c2ed1cd22ff3add4de +af27383148757bdf6631c0ea8a5c382f65fc6ab09f3d342a808ca7e18401e437cd1df3b4383190fdf437a3b35cbcc069 +8310551d240750cef8232cd935869bad092b81add09e2e638e41aa8a50042ce25742120b25fb54ebece0b9f9bdb3f255 +8b1954e0761a6397e8da47dc07133434ebe2f32c1c80cd1f7f941f9965acdf3d0c0b1eb57f7ff45a55697d8b804e1d03 +8c11612381c6be93df17851d9f516395a14a13c7816c8556d9510472b858184bf3cc5b9d14ded8d72e8fb4729f0b23ba +b413ac49121c7e8731e536b59d5f40d73a200c4e8300f8b9f2b01df95a3dc5fe85404027fc79b0e52946e8679b3a8e43 +8451e5c1c83df9b590ec53d1f1717d44229ed0f0b6e7011d01ea355d8b351f572866b88032030af372bd9104124df55a +8d0a5c848ec43299bc3ea106847ed418876bc3cd09b2280c2a9b798c469661505ed147a8f4ffba33af0e1167fdb17508 +a6aa97a1f10709582471000b54ec046925a6ad72f2b37c4435621c9f48026d3e332b8e205b6518f11b90b476405960a9 +97696635b5a2a6c51de823eea97d529f6c94846abb0bd4c322b108825589eba9af97762484efaac04ee4847fb2fb7439 +92fd142181fe6ca8d648736866fed8bc3a158af2a305084442155ba8ce85fa1dfb31af7610c1c52a1d38686ac1306b70 +ae3da824ecc863b5229a1a683145be51dd5b81c042b3910a5409ca5009ba63330e4983020271aa4a1304b63b2a2df69e +aecc0fe31432c577c3592110c2f4058c7681c1d15cd8ed8ffb137da4de53188a5f34ca3593160936119bdcf3502bff7c +821eac5545e7f345a865a65e54807e66de3b114a31ddeb716f38fe76fdd9d117bee0d870dd37f34b91d4c070a60d81f4 +91a02abb7923f37d9d8aa9e22ded576c558188c5f6093c891c04d98ab9886893f82b25b962e9b87f3bf93d2c37a53cb9 +99a96f5d6c612ee68e840d5f052bf6a90fecfd61891d8a973e64be2e2bdd5de555b1d8bffbd2d3c66621f6e8a5072106 +b1d5ec8f833d8fbb0e320ff03141868d4a8fff09d6a401c22dbefadbb64323e6d65932879291090daf25658844c91f2e +a06afd66ebc68af507c7cf5ab514947ca7d6ccc89fb2e2e8cb6e5ae0f471473e5fba40bb84d05f2c0f97c87f9a50cb73 +83de3ca182bcf1eac0cc1db6ad9b1c2a1ecd5e394e78add7faa36e039a1b13cb0d1d2639892489df080fbf43e5cef8d5 +adf77fc7b342ff67a2eddaa4be2f04b4e6ceaca8ea89a9fc45cc892fcce8ac3cf8646cfa5aab10ac9d9706ce4c48a636 +8509a430ef8dc9a0abc30ef8f8ccdb349d66d40390fb39f0d3281f3f44acb034625361270162822ef0743d458a82b836 +8350fc09e8617826f708e8154a3280d8753e7dbbcf87e852f9b789fdbeb10bf3fed84fb76edd7b8239a920c449e2f4b7 +a2e7a29da8391a5b2d762bf86cb6ae855cdfad49821175f83f4713dd0c342a0784beba98d4948356985a44d9b8b9d0f7 +a99c50a1a88b8efe540e0f246439db73263648546d199ef0d5bc941524a07d7e02b3ef6e5b08dc9e316b0b4c6966823e +b34ba55136c341f4ca2927080a07476915b86aa820066230903f1f503afebd79f2acf52a0bc8589b148d3a9a4a99f536 +af637be5a3e71c172af1f2644d3674e022bc49c393df565ea5b05ce6401a27718c38a9232049dd18cbd5bf4f2ce65b32 +a2972ba7bfa7f40c2e175bb35048a8ef9bc296d5e5a6c4ca7ab3728f4264d64f2d81d29dce518dc86849485ff9703d7d +8c9db203e8726299adeb331d6f4c235dc3873a8022138d35796fb7098887e95e06dcfad5d766ceaa2c4fb0f8857f37fa +a82bfbaa9a6379442109e89aad0c0cfc6a27d4a5db5480741a509d549c229cb847b46a974dde9f1398c6b3010530f612 +b2d8ef6e091a76dfc04ab85a24dbe8b5a611c85f0ed529a752c2e4c04500de5b305c539d807184e05f120be2c4a05fc3 +8c6ffc66a87d38cea485d16ee6c63ce79c56b64ae413b7593f99cc9c6d3cd78ef3fa2ab8a7943d2f0e182176642adadb +acbc92de68b2b04e3dc128109511a1cbe07518042f365d5634e8b651cb1ac435ea48eeeb2b921876239183096ef6edee +979c4e1165e0ecfa17ed59fb33f70797e000ddbb64acf5fc478cccde940451df051e51b6449c5b11a36afa7868af82e3 +a5a017c5a94952aeae473976027124231abe50460cec4db3ebeb8b1290525776be7c15d108b749c2a1e4b018de827915 +8b6922ab1db925eed24b2586e95f5c709b79d2408a8fa2a71057045ead3ebdd0cc72bee23d9064cd824166eda1e29318 +89a991087a0b5805fcc5c6c5f6ac27e100da0d3713645aa9c90114e68ca9f185f21155eb7645a2c6c0616a47291fe129 +ae6ef954c942cbfd37f8f2dc58a649e2584d6777e7eb09ae6992ccde283ac4f4ec39e3a5cda7f7c60f467fb308d37f08 +9335ca5ccac59b39eb2bcef09c54b778ebb690415ba13fe5c8e4b6091d9343a01cc9baa6228cefd8dba98f0710f714da +a0211c9328be2b46f90ff13614eeffb4c1285e55580db3874610653219926af1d83bda5b089fd37a7c7440a0f1d94984 +a82e097dfa782c40808fac5d8ed1c4fccf6b95ef92e22276fd8d285303fcf18c46d8f752595a658ee5294088b9dc6fc0 +ad108fcd0ead65f7f839a1337d520f5bd0cb665ee7100fc3f0563ff1d2959eb01617de8eb7a67c9b98b7b4892082acdb +b89e6aeabcb3ee3cbf12e3c836bab29e59d49676bcf17a922f861d63141076833f4149fe9e9c3beed24edfacdf1e248b +8477501bd91211e3b1f66c3bfd399ef785271511bc9366366ce95ec5ea95d9288ab0928a6b7887aba62de4da754d3eaf +aeec40c04b279096946b743ad8171bf27988405e1321c04894d9a34e2cbd71f444ff0d14da6cda47e93aa6fe9c780d50 +a703bd2d8a5c3521a8aad92afef5162aed64e9e6343d5b0096ca87b5b5d05e28ed31ba235ab1a626943533a57872dd01 +b52d9dfc12c359efb548d7e2b36ddedaefdec0ef78eda8ac49a990b3eb0ed7668690a98d4d3c7bec4748a43df73f0271 +af887c008bad761ee267b9c1600054c9f17f9fc71acfe0d26d3b9b55536bca5c8aebe403a80aa66a1e3748bb150b20ef +ad2f7a545ef2c2a2978f25cf2402813665c156bab52c9e436d962e54913c85d815f0ba1ce57f61e944f84d9835ce05ea +91a0a9b3cfd05baf9b7df8e1fb42577ec873f8a46bb69a777a6ac9f702735d6e75e66c9257822c781c47b9f78993a46b +939fdc380fb527f9a1ddecf9c9460f37e406cd06c59ce988e361404acbfcb6379f2664a078531705dbc0c375d724137b +8bbbe5d5a0d102b8e0c8a62e7542e13c8c8a6acb88859e78d8e1d01ec0ddff71d429fcb98099e09ff0aa673c8b399dc4 +b67a70e4ef138f48258f7d905af753c962c3cc21b7b8ae8b311a2356c4753f8cd42fdee09ac5ed6de31296ead88c351a +8d21539e7dca02a271ce7d16431773bbe30e6a03f5aff517132d34cdd215ad0da2f06aa4a2a595be489234b233e0852e +892ae11513f572cc5dc8b734b716bb38c0876e50e5e942631bb380b754e9114c34b0606740301e29b27d88439fb32071 +a8780dc9faa485f51b6f93a986bc4e15b166986b13d22ec2fefc6b25403b8b81c15cc9ac0025acc09d84932b15afa09b +b01af013360cd9f2bb9789a2b909c5e010fe6ff179f15997dee1a2ba9ef1ccec19545afdecfcb476f92fcdd482bb2b5a +b5202e5d5053d3af21375d50ad1ccd92538ef9916d17c60eb55c164767c3c74681886297b6f52e258c98d0304d195d3d +8f6adbcfbb0734bf3a4609d75cf2e10f74ed855a8b07cf04ac89a73d23b2e3e5cf270a1f2547b3d73e9da033a3c514b0 +8abe529cd31e4cb2bd75fa2a5e45bd92cbe3b281e90ffc7dea01ba0df17c9a3df97a3fde373cce5d25b5814cf1128fed +b8bbf51187bb3bb124da3870e2dfecb326f25a9383e5cc3323813487457010b9055811669c3da87105050825dc98a743 +a5c83875fe61ebbdd3fd478540d7e5a1ad0f8c790bad0b7dd3a44831e2c376c4fffbc6b988667afa1b67bfaa2dbbb256 +a0606b3062e4beba9031ba2a8e6e90aa5a43ba7321003976e721fd4eedb56486f2c5b10ba7a7f5383272f4022092eacb +b485cc5e001de6bd1bbc9cd8d777098e426d88275aaa659232f317352e1ddff3478262d06b46a573c45409bc461883e1 +916449580b64a9d8510e2f8c7aee0b467a0e93b11edc3d50725bcbc3ca53c2b8bb231fdc0fc0ed5270bf2df3f64750d9 +b2e687caa9f148c2b20a27a91bada01a88bff47faaf6ed87815db26bb6cdd93672199661654763a6b8b4b2012f59dcca +b6933f7f9dabc8fb69197571366ac61295160d25881adf2fcc8aaabc9c5ed7cf229a493fd9e2f1c2f84facd1f55fee84 +b01eb8b2cf88c75c3e31807cfc7a4d5cafded88b1974ba0a9d5aaeda95a788030898239e12843eda02873b0cabe30e2b +a3ca290fa6ce064514a3431b44ecdb390ef500629270202041f23bc2f74038147f338189c497949fb3126bae3a6e3524 +93b0f8d02bd08af74918b1c22131865aa82aba9429dc47f6b51354ba72e33a8b56684b335a44661aa87774931eb85974 +81eebeb9bd92546c37c98e0a5deba012c159f69331a89615cf40c5b95c73dcdbf3ceb46b8620d94ff44fcdad88020c1e +b350e497932382c453a27bb33d2a9e0dbadf4cd8a858b6b72d1f3a0921afc571371e22b051b97da3bb08694c4ca3a4e8 +8c7052f63ba16f14fa85d885aa857d52f04b3a899a4108493799c90c0410de7549be85bec1f539f1608924668df48e5a +b397574d1fb43de0faaea67d1d9348d67b712b1adce300d6dc497bca94e0994eef8707c285c5c9ac0a66022655a8420b +a934661d2168ae1bd95b1143c2e5c19261708aeb795abad8ec87f23dc1b352fa436de997ebb4903d97cb875adb40dc2b +acf535fa1b77255210e1b8975e0e195624c9e9ffd150286ccd531a276cadc12047a4ded6362977891e145a2bd765e6b9 +8cc32356015d7fd29738dcc13c8008cdbe487755dd87d449ab569c85d0556a1ec520dbce6c3698fc413d470c93cb0c92 +8787c7b3b890e0d3734ac1c196588cacf0a3bde65e2cf42e961e23dbf784eef14c07337d3300ed430f518b03037bd558 +99da90994030cbc2fb8a057350765acac66129a62514bbd3f4ec29d5aab8acdd5f4d69ca83efe7f62b96b36116181e79 +a306424f71e8b58dfa0a0564b2b249f0d02c795c30eee5b0ad276db60423210bba33380fb45dbe2c7fedd6ee83794819 +b207a35d31ce966282348792d53d354bbd29ac1f496f16f3d916e9adbf321dc8a14112ca44965eb67370a42f64ca1850 +89e62e208147a7f57e72290eefccb9d681baa505d615ca33325dfa7b91919214646ca9bdc7749d89c9a2ce78c1b55936 +ac2d0ec2b26552335c6c30f56925baa7f68886a0917e41cfbc6358a7c82c1cb1b536246f59638fb2de84b9e66d2e57eb +8f1487659ecc3b383cebc23a1dc417e5e1808e5c8ae77c7c9d86d5ab705e8041ce5a906a700d1e06921f899f9f0ee615 +a58f1d414f662f4b78b86cae7b0e85dfddae33c15431af47352b6e7168a96c1d307d8b93f9888871fc859f3ed61c6efc +94f3626a225ac8e38a592b9c894e3b9168f9cf7116d5e43e570368ee6ee4ab76e725a59029006a9b12d5c19ddce8f811 +b5986e2601ad9b3260e691c34f78e1a015c3286fdd55101dcef7921f6cbcc910c79025d5b2b336d2b2f6fd86ee4e041e +b6e6798ddd0255fbe5cb04a551a32d4c5d21bdfd8444ff2c879afe722af8878d0a3a2fe92d63936f1f63fea2d213febf +86bea9bfffef8bc11758f93928c9fdfae916703b110c61fa7d8fe65653f8c62c6fecd4ff66a1f1a7f3c5e349492e334c +9595a4606284569f4b41d88111320840159fd3b446e00ec8afd7ddaa53dd5268db523f011074a092f8e931fc301a8081 +83b540a6bc119bf604a7db5f6c0665c33b41c365c12c72ca4fa7b0724115bbb0ff1ae38532c3356e8bb3ac551285929f +92c6daf961ca4eb25293e1794cf85cda4333cf1c128207af8a434e7e0b45d365f0f5baaefc4ebd5cd9720c245139c6e2 +b71465f3d7dba67990afc321384a8bb17f6d59243098dbed5abd9a6ffc7a3133b301dd0c6ca3843abbaa51d0953abbed +b15d93482d2ee5b1fec7921fcc5e218c1f4a9105a554220a4fb1895c7b1d7a41f90bbf8463d195eecf919fcbe8738c51 +a79c98e70931ffd64f4dcf7157fbae601a358261e280fe607eb70cef7d87f03efa44cf6ba0f17fbb283a9c8a437d2fdb +9019d51a6873331f8fe04cb45e728a0c8724a93d904522a9915c748360ddf5cdbf426a47b24abf2005295ed2a676cbf0 +b34cc339fec9a903a0c92ce265e64626029497762ff4dcaaf9bb3994298400ce80f4fb7dbe9ec55fe0c4a522c495cb69 +8fda9be7abfe3b2033cad31661432300e2905aef45a6f9a884e97729224887a6ec13368075df88bd75c11d05247bef15 +9417d120e70d6d5ca4b9369cba255805b5083c84d62dc8afec1a716ead1f874c71a98ad102dac4224467178fe3228f62 +a0a06b64867eebb70d3ce8aaa62908a767fb55438a0af3edf9a8249cd115879cde9f7425778b66bb6778cb0afeb44512 +a44309d3e1624b62754a3a4de28b4421f1969870f005ac5dc7e15183fa5b3ad182bcd09cca44924e03fbdb22f92f8cf8 +aea80f1c3a8fc36cfb5c9357d59470915370b2bec05f51f1d0e1d4437657e2303ba2d1ac3f64cf88f2df412dff158160 +b3f1557883d91b24485123d2f3ae0fce65caa533c09345ae6b30d2ac49953acee61c880c57975be7b4f5558d3a081305 +b52cb1e56f0d147cfb58528b29c7a40bab7cfc9365f2409df7299bfc92614269ff9de3cb2500bbc4909f6a56cf4b9984 +aa4f8fd0f5f87c177ee7242f7da76d352db161846cd31523a2100c069d9e4464170eec0bffc6d4da4f9e87017b415dbd +b5b61f52242985c718461a34504f82495d73cbb4bc51f9554b7fe9799491f26826d773656225f52a1531cd5bd6103cde +ad12ba9697804ede96001181c048f95b24ba60761c93fb41f4b4a27e0f361e6b1434e9b61391bacaf0705fdaa4a3a90e +9319286cbda236f19192ae9eb8177e5a57a195c261082ba1385b20328fb83ed438f29d263dddae2f5278c09548830c4a +88b01ee88c3a7ae2c9f80317dddbaa2b7b0c3a3c23828f03ff196e244500410c9ac81c2e2d3e1f609d4b36ee1732738c +8e31f30600a9d629488d44a008c821c3c57f13734eaee5a19f0182a2de9e538fff7d982980d7fcc725c969f29f7c2572 +b215740eea98b4bb14197a803a8975700ad2f25a25ef3628eae10166d56c823301f6dd62ce3f9ebf2d42d1f33d535004 +8fb0fdb253d4bcc6693642779be13a5b816189532763dfd7da868cfacfdb87cb5ebe53b18b69dfd721f8d4baf3c1d22d +8cdd050a447f431ff792156d10381aaf83c6634a94b614dd5b428274538a9cc1f830073533b4fd0a734d6dd4f8d9c4ce +81b01ee8c72ac668ad9dd19ead2d69cac28c3525e613e036e87aa455c2da9651cc8fcc97c451a8c8a071a4eb69623cd1 +8d9e02dc9ac83f861b3745bd69216232144c47cb468a7dbc49083ed961f978e34265b3f42c400339120bdc4644fe5711 +89e9410455b34cba9db0a5ea738e150fae54dd000d61e614f3274a6c8102ba7cd05b0936f484a85711ad9da7946f51ea +91f9d4949678f8e6f4b8499899818bdd0f510da552b5d79d8e09bf3b69d706ab36524b5e86d3251318899b9223debf6b +8b3c38eec7e1926a4be5e6863038c2d38ab41057bcfa20f2b494e9a0c13bc74c3a44c653402eb62a98e934928d0ebccb +a5cfe465bfbf6e8bfbd19d5e2da2fc434bd71acd651371087450c041aa55e3c4f822361e113c6c3d58646ed3ba89d6da +918665b8810bcb8d573ca88b02a02c62eaa5a4a689efb5c564b0c9183f78144e75d91fd1603e17d2c77586cbe5932954 +997dace0b739aeb52ba786faae5bdf1d48630a90321f9ceebfa9e86d189a3d79d7b04e459ac8e4adcfe83a5ce964eb1c +a5a1ca9f0ccc88017a616d481d912aab3f0e154b673f1131c5d9c9c3f5f147d25b6392b2c31e49f7bb7eb2697d05dbec +a76e99bec509eff01bf6767a06ac97ebc6671cb58bc3d4acc2803580a874885453dbba2e1bba26e45f8d2bda5f688860 +956c1362c8123c5d9ebff7049e851235d69fa645f211ef98e2b6564f2871114a12224e0ec676738d77d23c709dd28a6c +885efede83b1a3e96417e9f2858ab0c7a576fc420e8f1f26cabf3b1abeec36bcaa63e535da177847f5e0afdb211bf347 +affca2257f292a2db52f8b1bab350093f16f27ef17e724728eeaec324e2513cd576f6d2e003cc1c6e881334cb2e8bf22 +8dac963d34dcc9d479207a586715e938c232612107bb2d0af534d8da57ad678555d7c1887fadca6551c4f736ffa61739 +b55e600a6bbde81f5a0384f17679d3facb93a7c62ca50c81a1d520cf6e8008ac0160e9763cb2ca6f2e65d93ca458783b +9485e6c5ab2ebfb51498017e3823547b6ab297d818521ceac85cd6c3aa2d85ae075a0a264ae748fc76ce96a601462ffa +b4d8abca786c0db304a6634fba9b2a40d055c737ed0f933e1739354befdae138dae3c8620a44138f50ebeaf13b91929f +8bde7ca39c7bda95b1677a206b16c3a752db76869ea23c4b445c2ff320f2ee01f7358d67a514982ee3d1fb92b7bd7229 +8f8cd0acc689b6403ee401383e36cae5db2ff36fc2311bbadf8ebb6c31cbcc2ca4ffac4c049da5ba387761ef5ec93b02 +a06f42d5f69a566ff959139c707355bbf7aa033c08d853dce43f74a9933e6d7b90e72010ef3fcb3d12e25852343d1d31 +b10ece7cf6b69a76dba453b41049db0cdf13d116cf09c625312b150ee7437abd71d921eda872403d7d7ce7af1e6dccb7 +a3d820318e0f3b54fba7a4567912a82d6e6adf22b67cfc39784683a8e75f77538e793d9708aae228fa48a71abb596195 +8758fad55b68a260bea3bd113e078fd58d64a92f7935ff877f9f77d8adc0994b27040cfc850126c7777cfdfb2428a3e5 +b504913ee96c10f00b848cd417c555a24bc549bf5c7306140eff0af2ada8cb5e76bed1adb188e494332b210fbf24e781 +a00e019a40acc7aab84c1cc27c69920ad7205c2a3dc9e908a7ef59383695c9cb7093c4bcbc2945aab2655119552e3810 +b1000b4c4f306672e39d634e5e2026886a99930d81b8670a5d4046db9621e44997c4b78f583374a09c60995f18a6fd4f +a6c5053c4e748540ad2b622c28896c9d4ca3978ca4784ac8f09da5314a245f5cdc5d6203c84e6e0bcb3081829720a56d +8e37e67a70205a5c7da95de94ac4d0ebd287c1c9922d60c18eec1705030dfcbf74ae179e377c008bf5a8bc29c7c07cce +a66bd7c0243319b553d5cb7013f17e3504216e8b51ba4f0947b008c53bcb6b4979286b614a4a828ee40d58b5ef83e527 +97e2110b0fb485508a2d82ecc2ce1fbe9e12e188f06c7ef2ac81caeeb3aca2c00e5e6c031243b5ca870a9692e1c4e69b +8734ce8bbc862e12bea5f18d8a8d941d7b16a56ef714792fed912ca9c087497e69b6481fdf14efe1f9d1af0a77dac9b1 +b441dddac94a6a6ae967e0e8d7ab9a52eb9525fb7039e42665e33d697e9a39c7dcef19c28932fb3736e5651d56944756 +918b8997f2d99a3a6150d738daed2ff9eb1f5ed4a1c432d18eab4a898297f7ffbffd1e4ae9037acf589b1cd9e1185ef6 +a0247b8ac4d708cf6b398dc2d5c127a291d98e8bef5f195f820c4fddb490574ba4f62647c2d725237a3e4856eec73af0 +b45636e7e0a823c2a32e8529bb06fcccfd88e9964f61201ee116279223ed77458811d1b23bcb6b70508d16d4570a7afb +a99c1188fa22b30b04fda180d2733586ea6ef414618f1f766d240c71f66b453900d3645541c019361027aebe0a0f305f +b4c2f758e27fe233f7e590e8e0c6de88441164da3fcd5211a228318d3066dfdafc1d40246dd194f2b597f6fe9600b3d7 +972530819445b11374c3043d7855d5f1d3c4922b3b205d0bf40162c51605375dd0b61f49cd7f3d39a533a86a13005989 +992b533a13e5d790259bfdfdf1074f84a5e5a0a0d7be9cd6568cdc1662524f1a6666a46da36cea3792ba6707850f4d86 +9875d130457e04dc6ea2607309bfbb900ad3cb5f3e0574f808d27b20cbf6f88389d87dca19998680c5bc30d1df30a41b +adea8494a69e83221edf360ab847272b5c47eba5404665fb743d98c0682732c30085ae3ec82bc1e8e4aba8454c9b1849 +887d4c624ce05e224216c5f6fa13c5741012ac33330bc291754782f0bfe668decdc98c0e43a1ce28323effe6b639f477 +ab6b167aeb5e93ab155990b94895e7e7ff6dea91384854a42cc8a3b9983495b4b3c33ab1b60b2b6450ccf0418fada158 +a7588d0b7c6a6bc32fc474aa0f4e51dfb8e6e010346ad32c59d6f99e6f0522424111a03a4f56ba4075da8009ee7a63e9 +94d645cc3936db1563568193639badfc064dd5bda8d0631804ee00b09e141b200619e07506b5a8225130541436327194 +8d695c03cc51530bdc01ee8afcd424e1460d2c009e1d7765c335368e5c563cf01a2373c32a36400c10e2bf23c185ed19 +ad824a0a7ed5528e1f9992cbb2050785e092b1ea73edd7fb92b174849794a5b04059e276f2941e945bc0f3e46172f2af +ad6ed2af077a495d84f8eeed7d340b75c0d1c8b7c5a854dfc63ab40a3d0c2b0d45016d30b3373a13f0caae549f657976 +82454126c666023c5028599a24be76d8776d49951dfe403ebf9a5739b8eb2480c6934a34010d32cd384c91c62a9aa251 +b57070006793eca9fa2f5237453ed853994ad22c21deb9b835e1fb3fbc5ac73aec265a4a08de7afae1610dc8c42b7745 +ad94667c791cf58875eb77eb17b6ad02de44e4ba2ddc2efe4d0ff22a5e1a090c670354437847349fd61edc4ba5606f07 +b2aac0c345ffc00badaab331c12a22019617b004d32c099c78fa406d683744d96d51d1237ad0842f9f54655186f8f95b +8fed51076cc939b354e3b69034a594e6c9c98425ccf546154ab087a195375128444732388d2eb28f82877de971ec2f58 +8e521c0093deb9dff37888893db8ffebc139984e7701e68b94d053c544c1be0d85f0f98d84b2657933647b17e10a474c +a2c6c9a307aff9b1dea85f90fa9e3b8057fd854835055edeb73842a7ef7c5ae63d97c51fec19dd8f15d696a18a0424a6 +a3390b25a9c11344ed1e8a0de44c848313026067a0f289481673c2c0e7883a8fc9f6cab6ccd9129729a6d8d0a2498dc2 +82770c42b1c67bbd8698c7fe84dd38cc5f2ad69a898097a33b5d7c5638928eb1520df2cb29853d1fa86a0f1bcc1187e8 +a6fdf7a4af67bc4708b1d589135df81607332a410741f6e1cc87b92362a4d7a1a791b191e145be915aa2d8531ee7a150 +aecac69574188afc5b6394f48ba39607fe5bb2aa1bd606bc0848128a3630d7d27101eb2cea1fb3e6f9380353a1bb2acc +a23fd0c52c95d0dffb7c17ec45b79bf48ed3f760a3a035626f00b6fe151af2e8b83561d0b9f042eaae99fde4cbd0788d +a5f98068525cdd9b9af60e0353beb3ac5ac61e6d3bac1322e55c94b3d29909d414f7f3a3f897d5ae61f86226219215c6 +b2a4d724faac0adf0637c303ff493a1d269b2cdbec5f514c027d2d81af0d740de04fb40c07344e224908f81f5e303c61 +adeadb3521e1f32ef7def50512854b5d99552e540ec0a58ea8e601008de377538c44e593e99060af76f6126d40477641 +a18b7fc2fcd78404fed664272e0fef08766a3e2bc2a46301451df158bd6c1c8aa8cf674dd4d5b3dedfaceb9dd8a68ae3 +83bcfb49313d6db08b58c6827486224115ceef01ca96c620e105f06954298e301399cdd657a5ff6df0b0c696feec1a08 +8c94391eba496e53428ec76dfe5fa38f773c55c0f34a567823316522a0664a3d92bff38ec21cf62ac061d7d1030650c5 +b1fa196ccfd7d5f1535b2e1c002b5cde01165c444757c606b9848bc5f11b7960973038fb7cc3da24300fc1848e34c9af +b139f6c6449449638de220c9d294e53fc09865a171756d63bbf28ec7916bf554f587c24bddf51dd44372d15260d8fe25 +b716242299d4ee72b5b218781b38ca5e005dcf52333364f85130615d1dbf56216af8ee2c9c652d82f7aab5345356538c +9909f24e4ad561aa31afd3a3b9456b2bd13a1d2e21e809a66af62fec5f95b504507ac50e81d2233da2b223f5443e7585 +ae863530a02cf3a757f72b945c8c0725d9f634d2ff26233478d1883595ff9a1eef69e8babffdbfa161452fc204f5b5a1 +8eb82bde283b6a6e692b30236cbf41433b03eda8dad121282772edd56f144b1ebf5fb489d18c6ce8776135771cbb91e2 +9296141fadf8dadc885fff4999c36efa25ec76c5637a8300a1a7dc9cf55bcedfe159e0ef33f90eee9be8c4f085734e10 +b6c07f2e6fcbd6c42a8b51e52fbcd5df3aa9f7c3f0b3c31021de1aec2111d0a1c36b5ab489ba126af44fd43cf31c2594 +a70ca669c357535b363d16b240fd9cb9c5ba1b648510afc21218ea034e9bf5f22717ae31ff43ef89dded95b7132fa58f +b350721f8f6b4d164fd08aca30cd4dece9b4a81aed0ac12119c9399bab691d5945814306f9a61f0106b76d4d96f7b9d6 +b6886076c9d8c344bf3fb6975173d00fa82866012894f31c17e6fc784fbc0dd2d24d6a1cddd17f7379c74566a23219aa +87636e4a83ceadc170a4b2517b19525c98e2163900401996b7a995b2f3da8d6ba2ab92f909eade65074fac07cf42f6fa +8ff61d87c4699a067a54b8540e8642f4c7be09d3783ec18318bcba903c6714fcd61be69165e07e1ca561fe98e07507de +85485d6b569ac20e6b81a9e97ef724e038f4fee482f0c294c755c7b6dad91293814f143bfcfc157f6cfa50b77b677f37 +a49256cb1970cc1011a7aed489128f9b6981f228c68d53b1214d28fbcfb921386cc7cf5059027e667a18073efa525a74 +87bc710444b0c3e6682d19307bedc99c22952af76e2d851465ee4f60e5e1146a69f9e0f0314f38a18342e04ece8e3ed3 +a671a6cabfd19121a421fdfe7732eccbb5105dfb68e8cbcf2b44ae8465c99e78c31b99730beca5bc47db6fc2f167203a +a2f3270c184629f6dfc5bf4bdd6e1b8a41e8840a1e4b152253c35c3d9e7ab4b8e3516dc999c31f567e246243e4a92141 +b9795a5a44f3f68a2460be69ecacdbb4664991ebbedffed5c95952147ad739e2874c099029412b9653d980a2d4307462 +959053faec9a966dd5a4a767a3154e4b8e4f56ca540ae53e373c565dda99fb626f725e5a5e3721c82918f8c5f2e9e0a3 +b3ef9d6a1b3cd44a3e5112819fa91cb8a7becc3f5b164c6f759f93171d568497b01c8e743f4727b341a1296a0dbadf4f +b852dfdfbe2b8c77d938fad45f00737e14eacf71d5fecbb3e4f60052ec9efb502c38c1fcecaf71da69eabe8b33852a67 +921c7007f26bdd4139e919dfe27d87b489a0bc5bd6fb341e949e4451f14c74add0489b108c9c9666a54c5455ac914a9f +86b63d73ba31c02e5337f4138e1684eccdc45ab5e4f30e952fb37d638b54ecec11010414d7a4b7aa91f7cc658f638845 +853c55e0720b66708a648933407795571fc11ad5c234e97f92faabce9e592983dfb97a1705047ee803648ecf9fbb2e5c +995fe7d1dc09bb0c3c3f9557c4146534778f5ea9c1d731c57440fdcf8094f82debf19090b5d23298da1ed71c283b3ae5 +b9c49c911a0c4d716b7baec130f9e615bfa7d504aa8766ed38878a93c22b1f6353503d4f7f425d4902239fb4689429df +80504d964246789a09dcd5c0298680afb6fe50bca3bb9c43d088f044df2424a1828de10e0dbdc5c0aac114fa6d9cf5d1 +90249351f109f6b23a49a610aaa3b2032189fd50e5e87cdc3b20f23ed4998af3a8b292bf9fbab9bd1cbe0a1371081878 +abb5f0148850f0d80b429c2b9e0038772432340ef0862ccb5dcb7347026ca95bf9a5857f538e295aebd3a6a5027adb4c +b92ac9c0f7e73150798348265e5f01f3c752480c72613c6894a95e9330bba1c642b21b9cbd8988442b5975476634b4fa +af3fbcc825abd92c6d7ea259467f27045e288f27a505e6a3c9ec864aa08fcaca0d4123034513dbd4c82d4814075708ab +a738232a66030e0e9c78e093a92fcc545b10e62fb0ecb832bbbc71471b28eb6ec422a498c2402e2c6d74983df801e947 +ae60194ce2035edd1af253b9eefbb4b1b7609c9678256c89c3cb076c332a9f4442c3441ad2ecc9d73265359bdadc926c +8b2fd55e686f16725fc0addb4065f696275852320b03221fd22889825d66fae5bb986b03c47452e32b3a32c1fdfc8dfd +8e2e1a36673b7729b07e7bc5014584e1c03e9552f7440fbfda0a6a7f41953947fcdf8d666f843bfc03dcca5b06a14318 +95a3df04368c069f3fd32a20b627c5f043e952167c9e80bf5914bbf2086879909c60e089bbd488725ab977c0e6051728 +9856403b2211d0152d4eec10db7ec34c16ac35170714b75af3ebc398a676c171b24b6f370361de0f9057ba444293db14 +a2cb484b758af5fd8e2baca7f0406f849c71255e58ef110d685cd0c1137893a25d85a4d8582e3ced7dd14287faa95476 +b0f697b6a42f37916b90ab91994ae4a92c96dc71e4da527af41b9d510bc2db5a9b4f29183a758074b6437a1e62b2d1d7 +b39c49266aae46f257b7ae57322972fb1483125298f9f04c30910a70fe5629dba0ec86b94cc6ba16df3537a55e06f189 +86cd5595b5b769dfd9ceb68b11b451f6c5b2e7a9f6f6958eac8037db1c616e8a9defb68a0d6c2287494d1f18076072c1 +b462e8fa9a372d4c1888fd20708c3bed1cb00c17f7d91a0481238d6584fbbf2d238e25931154f78a17296a12825d7053 +a5ef28286628ba509bac34c9f13158d0013239fdca96b5165161f90b89d6e46295822ebdf63f22d7739911363a0e0e86 +a629a95a24e2545862b41a97ecba61b1efa792fd5555dc0599c175947e9501bffc82b05a605fd5aabc06969ccf14fff4 +af83467e4b1f23a641630cc00c38d4225ff2b4277612b204d88de12a07d9de52fb4d54a2375a7fd91eb768623c255376 +a630f29fb2e9a9e2096d7f3b2f6814ee046ebc515f6911d4bc54ad8a5a821a41511ff9dcfbe3176f35c444338ecd0288 +950dedc11bd29e01ba9744bec681ad9462127c35e9fcadfacc9405ec86b985a1b1c4f9ac374c0f1fa248212e5e170503 +82e8e7be8011ee0fd9c682d26a0ef992d0191e621d07fd46a3a5640ef93a42e1b98a33cad1f8017341a671d28caebb03 +a075860554e712398dac2fb0375067a48d0e4ca655195cefc5ccb1feb8900d77124aa52a12e4f54f7dab2a8f1c905b5b +81d2183d868f08714046128df0525653a2dc2ff9e2c3b17900139c9e315b9f4f796e0fb9d1d8cbadbaa439931c0e0879 +81fb1456969579515a75fb66560f873302088cde2edc67659b99a29172165482ca1f563758c750f00086b362ae405322 +a13c15ab19203c89208c6af48d2734bb0873b70edb660d1d5953141f44db9012528d48fb05aa91d16638cbda2ca8f0cc +8ba46eef93e4ec8d7818124a0b9fcfe2bcf84a98db3545d2b3d0192cfadc81fc667dcc22ab833c3e71508d0f3c621fe4 +b9bd60d2266a7d01e1665631a6ed6d80ffc0cd7f088f115a5d4ea785c518a8f97d955e2115b13c4960302b9825526c92 +b26fa4e87142150250876083a70c229249099331410f0e09096077fdf97b31b88dc57a3e3568d2a66a39af161cf5dfec +b9d147564124728b813d8660ba15fa030c924f0e381ad51d4e0cf11cc92537c512499d3c2983dd15f2e24ca166070d70 +b6fb44e1a111efb3890306fa911fafda88324335da07f7de729b2239921ef15b481630a89c80e228bec7ab6444a0b719 +a6cd9c7acac052909ef0cf848b6012375486b59b7bac55b42c41f0255b332c1d45a801f6212d735be8341053bd5070b9 +864258d69234786af5de874c02856fc64df51eff16d43bfb351b410402ab28f66895aec4025e370a4864f19ff30fd683 +84370fa1243b64b3669dd62e1e041ff9bd62810752603486aac3cba69978bd5f525c93cbc5f120d6f2af24db31ec3638 +b983c2cdc1a310446de71a7380b916f9866d16837855b7d4a3a6c56c54dab3e373a6fc6563b8309dc3b984d4e09275d6 +914f8587f876470f7812fa11c6f67e2dd38bf3090e8928e91fe2fe5595bee96cbe5f93d26fdced6b4e7b94f75662b35d +8b47bcb111d91aa3d80e4ceef283824aa00d1faeb6fe4111aecd9819869c0e1f6f4b6fb2018aebb07a0f997412cda031 +95b2befa98f9992450ca7ff715ae4da8c36dd8adcfef3f0097de6e3a0b68674b05cbf98734f9665051bb4562692641e0 +8bcd1651a2bfce390873a958e5ff9ca62aac5edd1b2fd0f414d6bcf2f4cf5fa828e9004a9d0629621b5e80fbbd5edb90 +af79bed3c4d63239ac050e4fa1516c8ad990e2f3d5cb0930fc9d3ce36c81c1426e6b9fe26ac6a416d148bf5025d29f8b +881257e86b7ab5af385c567fde5badf67a8e7fff9b7521931b3ce3bac60485c0fe7497339194fb7d40e1fad727c5c558 +a1b40b63482cd5109990dfb5a1f1084b114696cbbf444bf3b4200ab78c51dad62c84731879ea9d5d8d1220e297d6e78a +b472212baa2a31480791828ca5538c3dcc92e23f561b0412f8cc9e58839d1625ddcaf09c8078d31ac93470436843cd74 +8f516d252b1863cd3608d852a2857052bb2a3570066d4332fa61cb684b10ac8d1a31c8d32f2a0d1c77eee2ad7a49643d +8d20b75c51daa56117eda2fd5d7a80a62226074b6a3ff201519f2054eecfeff0aa2b2f34b63bea3f53d7d0ce5c036db9 +8282f433229e7948a286ba7f4a25deb0e0a3c5da8870562c3646757bef90ca1e8d3390b0a25b3f2bf45bf259a4569b77 +8a2dbf4b55cc74f0a085d143a88ebc8c2a75a08eab2703d13a00b747eaddc259a3dd57f7330be938131835a6da9a6a68 +aa0bc51617a938ea6a7b0570e98b8a80862dd9e1cf87e572b51b2a973e027bcd444ef08e0d7b5dee642e0da894435e91 +aa7319ca1ac4fe3cc7835e255419eeb7d5b2d9680769cc0ca11283e6147295db75713b71a9312418a8f5505cd45b783d +ab3f9c465663dc90fae327a2ee9cb7b55361a9b6fbe713540a7edd3cff1c716802fb8ad4dd8fb0c945d96b3b44c5795b +913a2ae88acffab12541fc08920ee13ab949f985a117efe9a5b2c76f69f327f60c5b5ad3fa5afa748034ac14298fc45a +9008f044183d2237b723b235953e4d8b47bec6a7b300d98075555478da173b599ba9c7c547c2f111ce1fae5ac646e7a3 +a26b4cc42b353e1c18222d2e088d7f705c36be12e01179db440f10fcfa9691d31fc4fc7e7ee47876f1624e6d44be1021 +995e75824f322294336bfa2c5d1a319f0d77f6a0709beabaf1b43015d8a78d62447eab907349524734170f0294d1ca7a +8b96f04a19dbe4edc71d1f2c6d3475ae77962e070ec5797752453283c027c6b29b6e58e8b7eb5c3f9770557be7e80b67 +8621459865234734bcfaa492ca1b89899525198a7916ccc6f078fb24c8bf01154815bb5b12e1c3d0a10bd4f1e2ea2338 +ab52174541185b72650212e10a0fe2e18ccfd4b266a81233706e6988c4af751b89af87de0989875f7b5107d8d34c6108 +966819d637bdd36db686be5a85065071cf17e1b2c53b0e59594897afc29354ecba73bf5fc6fa8d332959607f8c0a9c27 +b7411209b5ab50b3292c3a30e16f50d46351b67b716b0efb7853f75dc4e59ec530a48c121b0b5410854cd830f6c4b3ea +a5dc04adbadce0af5dc1d6096bad47081110d4233c1bf59a5c48a8e8422858620f4be89bf1f770681be2f4684ee4cce7 +af77a8f83cffb5f8d17be0ab628dedcad63226c9b13ce4975fb047f44bfef7d85e7179aa485abb581624913eddbb27ec +82bf28dc58c893c93712ce297cc0d64f70acb73a641cb4954ccf9bf17597f6d85eecf5a77c8984ab9afbe588562a0ee9 +988a7cef9a178e8edb91f3ec12f878fd68af2ac0762fa0a48a2423e24f765ed8f7837429fd8bc0e547e82e6894e63008 +a5d5969311056d84b3ee87f49286fac0bd9a7220c196cea4f9dced3b858dcdba74718eab95b38bd5d38d2d1184679c98 +af4d51b3ded0aaad8f12bef66c0616e9398fc42618852ac958e6ab2984a720a6111ac55b249d7e4523051740e12b346f +ac635b4a49f6fbb94a5f663660f28431ba9f7c5c18c36ebc84fd51e16077de7753595f64619b10c16510ecbc94c2052d +ae25eb349735ced1fe8952c023a9b186a1f628a7ddf1a4b6f682354a88f98987ac35b80b33189b016182f3428a276936 +ae3ab269690fdd94134403691ba4f5ed291c837c1f5fdc56b63b44e716526e18abb54f68ca5d880e2fb7bea38e74c287 +a748b03b2bd3fbc862572bc4ddc0579fa268ee7089bcfd0d07d0c5776afcd721302dbb67cb94128e0b1b25c75f28e09a +8f09a2aaa9ba3dfe7271f06648aba9cc1ea149e500a7902d94bb9c941a4b01d1bb80226fd0fd2a59ad72c4f85a2a95d0 +853d55ad8446fd7034e67d79e55d73a0afcb5e473ed290e1c3c7aa5497e7f6e9bbf12d513fc29e394a3dc84158a6d630 +b1610417fb404336354f384d0bf9e0eb085073005d236a0b25c515d28235cea5733d6fbd0ac0483d23d4960064306745 +86de805b3d4f6fbb75233b2cf4d22fcc589faa2ac9688b26730cb5f487a3c6800c09bb041b2c6ab0807bfd61b255d4c9 +893b38c72cf2566282ee558d8928588dca01def9ba665fcb9a8d0164ee00dedafbf9d7c6c13bcc6b823294b2e8a6a32c +8e50de7a70ac9a25b0b5cf4abc188d88141605e60ce16d74a17913a2aff3862dec8fbbf7c242cf956f0caae5bcc4c6bf +b5cf09886a4fb4ce9ea07d1601d648f9f9d1a435b5e1e216826c75197cd6dafd6b2b07d0425a4397a38d859a13fdb6dc +859dc05daf98e7f778a7e96591cc344159c1cbe1a7d017d77111db95b491da0a9272866d2638a731923ca559b2345ebe +8ff1792f77ecdfbd9962f791a89521561c7b82031a4e53725f32fe7d99634a97b43af04cbf3e0b0fdff4afa84c49eb99 +81e2cd8a221b68ae46dd7ce97563bd58767dc4ce1192b50ff385423de92206ff585107865c693c707e9d4ed05f3149fb +8fce7da7574e915def0d1a3780aa47ef79b6d13c474192bd1f510539359494ddc07e5412f9aac4fc6c8725ade4529173 +ac02f5df60242734f5ead3b8a62f712fefdb33f434f019868a0b8ddf286770244e2ddfb35e04e5243ba1e42bcd98a6a5 +a8d69783349a442c4a21ecb3abd478a63e2c24312cb2d2b3e10ea37829eb2226a9b8d05a8c9b56db79ffaa10d1f582d1 +b25b5cca48bf01535aba6d435f0d999282845d07ac168f2ca7d5dba56ee556b37eab9221abdb1809767b2de7c01866c1 +8af7e1d1f4df21857d84e5767c3abe9a04de3256652b882672b056a3ab9528e404a8597b1ad87b6644243f8c4cd3799f +a6718308dfa6992ae84fcb5361e172dbbb24a1258a6bd108fd7fc78f44cc1d91be36e423204a219a259be4ab030f27ff +b99cbe3552c1a5259e354c008b58767c53451932162e92231b1bebfc6a962eb97535966a9bd1dfd39010dfcda622d62a +a8458f6b8b259581f894e4b5ce04d865f80c5a900736ca5b7c303c64eaf11fe9cb75e094eece0424ba871b2aee9f7a46 +914f763e646107b513c88f899335d0c93688ffa6e56c3d76bff6c7d35cb35a09f70dc9f2fe31673a364119c67cd21939 +9210f2d39e04374f39b7650debe4aceeb21508f6110ab6fc0ab105ec7b99b825e65753d4d40f35fad283eeff22a63db0 +98729cf927a4222c643b2aa45b3957b418bce3f20715dd9d07997a3c66daa48dd62355dbd95a73be9f1d1516d1910964 +a602c399f1217264325b82e5467a67afed333651c9f97230baf86aec0dd4edeae1e973cafef2ea2236d6d5b26719954d +ac9632921d45900bf3be122c229ba20b105b84d0f0cba208ccdce867d3e9addfb3ef6ece9955950d41e1b98e9191ef42 +a76ce1f53e1dc82245679077cb3bca622558f2269f2d1a1d76b053896eba1c3fc29d6c94d67523beb38a47998b8c0aa7 +b22b51fcc1b328caa67cc97fb4966cb27d0916488a43248309c745cd6e2225f55ad8736d049250fa0d647e5f8daa713c +b7645c1923a6243fa652494fe9033fa0da2d32a0fb3ab7fcb40a97d784282a1ffad3646c499961d4b16dadbc3cbb6fd6 +acab12b490da690db77c4efdc8b2fe6c97ac4ba5afb5165d6647fdd743b4edbad4e78d939fc512bebcf73019c73bae40 +ad7a0fcd4e4ccb937a20e46232a6938fccf66c48a858cf14c8e3035d63db9d1486e68a6bf113227406087b94a0ece6a0 +a78605beaa50c7db7f81ab5d77a8e64180feea00347c059b15dc44c7274f542dc4c6c3a9c3760240df5f196d40f3e78b +8763315981c8efa9b8ae531b5b21cfc1bbc3da3d6de8628a11dcc79dee8706bd8309f9524ec84915f234e685dd744b69 +b4a6c48531190219bf11be8336ec32593b58ff8c789ee0b1024414179814df20402c94f5bfd3157f40eb50e4ef30c520 +8dac8a3f152f608ce07b44aee9f0ed6030fa993fd902e3d12f5ac70bf19f9cde2168777d2683952a00b4b3027d7b45ea +8baf7dfae8a5840c5d94eabfe8960265f6287bb8bc9d0794a6d142266667a48bec99b11d91120907592950a0dddc97d9 +b8595e6ea6b8734d8ae02118da161d3d8d47298d43128a47e13557976032dad8c2ccbfff7080261c741d84d973f65961 +8b93979c51c8d49f4f3825826a5b9121c4351e0241b60434a3c94f2c84f0b46bbf8245f4d03068676166d0280cf4f90c +aceb0fdaf20bf3be6daebf53719604d3ab865807cc2523285f8fef6f3fc4f86f92a83ad65da39de5bd3d73718a9c4bd2 +814dd41764a7d0f1a14a9c92e585f154a26c8dbf2f9bff7c63ae47f1ac588cec94f601ccc12e8a63a7a7fce75a4287f2 +b47b711848e54fa5c73efc079d0a51a095fa6f176e1e4047e4dac4a1c609e72099df905958421aee0460a645cba14006 +aaf7bd7e1282e9449c0bc3a61a4fca3e8e1f55b1c65b29e9c642bb30a8381ce6451f60c5e0403abc8cee91c121fa001f +b8b0e16e93b47f7828826e550f68e71a578a567055c83e031033c1b7f854e7fc8359662a32cc5f857b6de4aff49e8828 +b3eb70b8c8743a64e1657be22a0d5aeb093070f85a5795f0c4cb35dc555958b857c6c6b7727f45bf5bedf6e6dc079f40 +ae68987acd1666f9d5fa8b51a6d760a7fb9f85bf9413a6c80e5a4837eb8e3651a12e4d1c5105bfb5cfa0d134d0d9cfc2 +acd8fa5742b0bac8bd2e68c037b9a940f62284ff74c717f0db0c033bf8637e4f50774a25eb57f17b2db46e5a05e1d13d +a98dac386e7b00397f623f5f4b6c742c48ab3c75d619f3eaf87b1a0692baf7cb7deac13f61e7035423e339c5f9ae8abf +99169bd4d1b4c72852245ebfbc08f18a68fb5bcce6208dd6d78b512b0bc7461f5caf70472b8babf3e6be2b0276e12296 +937d908967f12bf7f728fe7287988c9b3f06c1006d7cd082e079d9820d67080736910bc7e0e458df5bae77adb9a7cbc1 +8c50e90ce67c6b297fd9406c8f9174058c29e861597a0f4ed2126d854a5632fa408dfa62ad9bb8b6b9b6b67b895d5a4d +8f4840a91b0a198226631a28e7a2e893fc6fed4d5eb3cb87b585aac7f4e780855a353631ad56731803296f931e68a8d0 +96a4b8c64d3d29765e877345383bf0e59f4ac08798ac79dd530acd7f3e693256f85823ad3130fb373d21a546fe3ca883 +b0dce7a6ab5e6e98b362442d6e365f8063ba9fef4b2461809b756b5da6f310839ac19b01d3fd96e6d6b178db4ff90ee1 +8f012cb2be5f7cb842b1ffc5b9137cafef4bd807188c1791936248570138f59f646230a1876f45b38a396cbdd3d02e08 +94a87b5ce36253491739ca5325e84d84aaff9556d83dcb718e93f3ff5d1eecf9ae09d0800a20b9e5c54a95dfebfcecd3 +b993ec5f9e82cc9ceeb7c5755d768bc68af92cc84f109dfaf9cf5feb3aa54881e43c3f598ba74ed98e8d6163377440ca +92f845d4d06a5b27d16aef942f1e3bcbe479b10fef313f9ca995315983090511701b39ccbb86b62d0c7c90a2d1f0c071 +b6ec6da0f9e7881e57fa3385f712e77f798abc523609a5b23e017bb05acc6898825541aed7fe2416c4873de129feceea +86b181183655badfe222161d4adf92a59371624a358d0ec10e72ee9fa439d8418f03d635435ec431161b79fd3fa0d611 +b5e28eeed55fb5318b06a0f63dbf23e00128d3b70358f1c6549fd21c08ef34cb1372bc0d4b0906cc18005a2f4cd349bf +85c4d3fddda61dbfb802214aa0f7fc68e81230fb6a99f312848df76cddc7b6dfd02860e8a4feb085dad1c92d9c6c65e0 +80f7fdec119309b2ac575562854f6c2918f80fc51346de4523ec32154d278f95364fdef6f93c7d3537a298dd88df7be6 +9192c1949d058614c25f99d4db48f97d64e265a15254aa6ed429e1ef61d46aa12355769f1909a5545cd925d455a57dbe +a0b1e7d928efc4dcbd79db45df026ae59c20c1a4538d650c0415ab7cb0657bc1e9daeacc3053ee547e8f9c01bdbd59c4 +893e84c41d3a56bca35652983c53c906143b9ad8d37b7c57f9dacbeb7b8dd34defc6a841f5b9857ffb90062bbd8e9dee +a7f89a448349dbc79854cf888980327f92aedc383c7fadd34fdc0eaa4f63d751315b4f979e14f188854ef4d16c9e8107 +833f2774a96187805f8d6b139c22e7476bce93bc5507344d345008080fb01b36d702b96e4c045617a23a8ca1770b4901 +80e46e86d68bd0a48ac6fa0b376d5bb93a5d6b14f08b3a47efa02bb604c8828c2047695f1f88fc5080e5548e1a37130f +943f42b7b4ad930059a26ad06b62e639f06c1c425d66066c55134e97c49abe412358c7cb994fcc1cf517ea296bca1f68 +8b9d4fe835dc6a2cbf85738937bbfb03f0119ab8df04a7d68860716ce6ee757dbe388a1e8854ddb69fe0c9fa7ed51822 +909030c7fde2591f9ea41ae6b8fa6095e6e1a14180dda478e23f9c1a87b42c082a1ea5489c98702f6ccd2ba5812d1133 +a715ec1beb421b41c5155c7ef065bbb50b691d0fa76d7df7ee47683d9e4eb69b9ea3e62fc65196a405d6e5e29e6c2c60 +8c9e801cb7ef780a535be5c2a59b03e56912acbfdb00447bfa22e8fc4b11dceecc528f848d5fba0eec4237d6f81f4c79 +b96b6af857c3bc0344082bd08ec49a9bed478d4d35b85a2099b1849cd6997521c42225305f414cdd82aef94b9e1007d3 +8764db720b4e44a4d2527f7f9b535a494a46c60e28eac06bf1569d0703c4284aefa6cb81fbba9d967286f9202d4b59ea +a66fd2f9158e1ffcdd576cba1413081f43eed00c7eb8f5919226f7b423f34ac783c1c06247819b238de150eb5a48d977 +82c52e817ac3bb0833ea055dec58c276c86ca5181811cf7a483b3703a06ea1bee90ae3aeaa2cffeaeba0b15fe5bf99be +987d07cb276f7f03a492cfb82bba6d841981518286402d3e69e730a9a0e29689a3619298124030da494e2a91974e0258 +b34f2c5740236bc6d4ae940920c5bc2d89ff62a3dd3a3ec9a0d904d812b16f483073db1e53b07f2b62e23f381d7bdbe5 +a1c0679331ab779501516681b3db9eefb7e3c0affb689e33326306ada6d7115fafd2cc8c1c57b2fa6c2072552f90a86e +94805e30d7852fc746e0c105f36961cc62648e438e8b9182fc0140dbf566ec14a37ad6e7f61cacb82596fc82aed321e5 +a42fb00b29a760141ff0faaeb7aca50b44e7bbc0a3f00e9fb8842da7abfcaae6fae9450abe6ba11e8ecf11d449cbe792 +8fb36ce4cfa6187bfe8080ac86b0fa4994f20575fb853bd8ffa57c696179cc39f58ff3b4bd5a2542ff1c8b09015539df +a1c54e7aa64df7fb85ce26521ecfc319563b687ffecd7ca9b9da594bbef03f2d39f51f6aaff9a3b5872d59388c0511c6 +855e48fdb8f771d4e824dbedff79f372fd2d9b71aa3c3ecf39e25bf935e2d6e0429934817d7356429d26bf5fd9f3dd79 +8ae6157a8026352a564de5ee76b9abb292ae598694d0ea16c60f9379e3bb9838ce7fd21def755f331482dc1c880f2306 +a78de754e826989de56fe4f52047b3ffd683c6ceaf3e569a7926f51f0a4c4203354f7b5cfa10c4880ba2a034d55a9b0d +97609477d0a1af746455bbd8cb2216adacc42f22bfd21f0d6124588cd4fec0c74d5bde2cdba04cdbfbff4ac6041b61b1 +a03dc3173417381eb427a4949c2dbfa0835ef6032e038bf4f99297acf4f0ba34a5fc8ccf7e11f95d701f24ee45b70e27 +aad6283e85cd1b873aeb8b5a3759b43343fdadc9c814a5bf2e8cf3137d686b3270f1ec2fb20d155bbfd38c7091f82c44 +92ab94ed989203a283d9c190f84479c2b683615438d37018e9c8de29c2610bb8fccd97bb935dca000d97d91f11a98d65 +8c0444a0b9feb3acb65a53014742e764fa07105e1c1db016aec84f7a3011d9adc168dbba8034da8d0d5db177a244d655 +95a33d25e682f6c542d4e81716cc1c57ef19938409df38bf8f434bc03193b07cedd4e0563414ce00ab1eebbd3256f3e7 +8716c30e3e4b3778f25c021946c6fb5813db765fde55e7e9083a8985c7c815e1b3d3b74925ba108d9a733ddf93b056af +a186aabc10f1fff820376fa4cc254211c850c23a224f967d602324daec041bbe0996bf359ed26806a8c18e13633a18a8 +a1e8489f3db6487c81be0c93bade31e4d56ec89d1a1b00c7df847f5cd7b878671015f5eaa42ee02165087991936660b9 +8f688c969c1304dfa6c1a370119d1988604026a2ab8e059016c5d33393d149aae6e56f3ee2b5d25edc20d4c6c9666ad9 +91950b651fefd13d2fa383fd0fdc022138ce064ee3b0911157768ad67ed1fb862257c06211cf429fba0865e0b1d06fc8 +86cff4080870d3e94ed5c51226a64d0e30266641272666c2348671a02049ae2e8530f5fb1c866c89b28740a9110e8478 +88732c4d9e165d4bb40fb5f98c6d17744a91ff72ca344bc0623d4b215849a420f23338d571a03dd3e973877228334111 +afcc476ad92f09cf2ac7297c5f2eb24d27896d7648ba3e78e1f538c353ceeb1e569917a2447f03f3d4d7735b92687ba5 +b622aa475e70d9b47b56f8f5026e2304d207684726fb470a0f36da7cb17c30dd952813fab6c7eb9c14579aacca76f391 +802cf5630c0407ae0d3c5cf3bef84e223e9eb81e7c697ea10ec12e029fc4697ce7385b5efab7014976dacc4eb834a841 +a08596493f4cd1b8ac2ec8604496ee66aa77f79454bb8ab6fdf84208dc7607b81406c31845d386f6ac8326a9a90e7fc5 +a54652ca9e6b7515cb16e5e60e9eabbccbc40bb52423d56f0532d0bac068aec659a16103342971f2cc68178f29f695db +a3ab54875cb4914c3a75b35d47855df50694310c49eb567f12bbc5fa56296e11f4930162700e85ba2dbfdd94c7339f91 +94183a040285259c8f56bef0f03975a75d4def33222cc7f615f0463798f01b1c25756502385020750ac20ae247f649a1 +b0004261cc47b0dc0b554b7c6ebf7adf3a5ece004f06e6db3bbac880634cdf100523b952256a796998a5c25359f12665 +a25dfeb0e18ebe0eb47339190f6a16f8e116509ab2eef4920f0d3ff354e3ead5abe7f5050b2f74f00b0885ea75b4b590 +ab10ef2f5dc0ede54e20fa8b0bce4439543db8d8b31e7f8600f926b87ec5b8eea0ac2153685c7585e062ffac9e8633c3 +8386eac1d34d033df85690807251e47d0eaacb5fe219df410ab492e9004e8adabb91de7c3e162de5388f30e03336d922 +b6f44245a7d0cb6b1e1a68f5003a9461c3d950c60b2c802e904bc4bc976d79e051900168b17c5ac70a0aed531e442964 +ad12f06af4aa5030b506e6c6f3244f79f139f48aec9fc9e89bbfbd839674cfd5b74cea5b118fb8434ba035bda20180af +88511306dfe1e480a17dba764de9b11b9126b99f340ceb17598b1c1f1e5acbdd1932301806fe7e7e5e9aa487a35e85de +a17cdf656e1492e73321134a7678296a144c9c88c9a413932d1e4ca0983e63afc9cdc20fd34b5c6a545436b4db50f699 +b555b11598a76de00df0f83f0a6b8c866c5b07f7ac2325f64fb4a0c2db5b84e0e094d747186c3c698ee4d0af259dc4c7 +88014560587365e1138d5b95c2a69bdae5d64eb475838fee387b7eb4c41d8c11925c4402b33d6360f0da257907aa2650 +b220634e6adee56e250e211e0339701b09bf1ea21cd68a6bd6ee79b37750da4efe9402001ba0b5f5cbbfcb6a29b20b0c +ac5970adc08bc9acec46121b168af1b3f4697fb38a2f90a0fbc53416a2030da4c7e5864321225526662d26f162559230 +97667115b459b270e6e0f42475f5bce4f143188efc886e0e0977fb6a31aba831a8e8149f39bc8f61848e19bcd60ceb52 +b6c456b36c40a0914417dd7395da9ed608b1d09e228c4f0880719549367f6398116bf215db67efe2813aa2d8122048f2 +ab7aef0d6cda6b4e5b82d554bd8416a566d38ded953ffd61ef1fcca92df96cdcc75b99a266205ff84180ab1c3de852a4 +81d354c70ce31174888c94e6cf28b426e7d5c4f324dc005cd3b13e22d3080f3881d883ca009800f21b0bb32fa323a0cf +94f3440965f12bee4916fcc46723135b56773adba612f5ce5400f58e4d4c21435e70518bdef4f81e595fa89e76d08fc6 +a6683e7a1147f87cbeeb5601184cc10f81bca4c3c257fd7b796a2786c83381e7698fb5d1898eb5b5457571619e89e7d6 +8ca29539600f8040793b3e25d28808127f7dc20c191827a26b830fff284739fb3fc111453ff7333d63bce334653a0875 +98a69644048b63e92670e3e460f9587cf545a05882eb5cba0bcbd2d080636a0a48147048a26743509ab3729484b3cc12 +84d40302889c03c3578c93aca9d09a1b072aadd51873a19ef4a371ca4427267615050c320165abece7f37c13a73d4857 +87954271e3de3f0b061c6469d038108aac36f148c3c97aefb24bf1d3563f342ea6c1c1c44c703e1587a801708a5e03f8 +86b6f5367e04c5caa3ec95fd5678c0df650371edac68f8719910adf1c3b9df902cc709a2bddc4b6dde334568ca8f98ac +a95fed2895a035811a5fee66ca796fdecce1157481dd422f8427033ed50c559692908d05f39cb6bea5b17f78a924633c +8ba05bdadde08a6592a506ea438dbdc3211b97ea853d1ad995681a1065ececce80f954552b1685ef8def4d2d6a72e279 +90b6b7494687923e9c5eb350e4b4b2e2fa362764d9a9d2ebb60ee2ad15b761e0850c9a293123cf2ef74d087693e41015 +8819ea00c5ea7b960eb96ab56a18c10a41fd77e150ab6c409258bc7f88a8d718d053e8f6cb5879825b86563e8740808d +91e42031d866a6c7b4fd336a2ae25da28f8bde7ace6ff15dc509708b693327884e270d889fff725e6403914546055c28 +85763642052f21cf1d8bd15fd2dc0c2b91bba076751e4c4f7a31fbdb28787b4c6a74d434d6ef58b10f3ad5cde53ef56d +8b61c36c7342a1967a1e7b4c01cddf4dce0e2025bc4a4a827c64994825f53e45277550ceb73c34bb277323fb784aa3c6 +80b9634a45c8b3770e993257bd14df6a17709243d5429969ab8b9a4645bf2a94f9b3cd3d759169887b4aa0eb50f4f78c +b5c44db9439dd8aa4edd151d95e48a25c1154e1525c337f97109f40131db81a4898344c8c3144c270bdc835c269b3477 +863080fcbc227eea32d0dc844f42dc642fbda7efc398ab698be3a3c6f3bf8803dea6ba2b51fed6151f9522b4ab2a8722 +8481e871129e9cb9d2d109c513cbba264053e75192e967f89659dcfcc1499de9ae7a1ac4f88f02289150231c70b4da01 +834d8183698d3d2d1352c22c373222cb78d0f4c8cb15e0ad82073dde273b613515ebcd184aa020f48f8e6fc18f3e223c +a227e300f0c5bc1b8d9138411413d56c274cc014ae8747ec9713f3314d5fae48bb6f8cc896f232fd066182af12c924e4 +ab7242835e91ba273de1c21eb4fca8312bdda5b63b080888b96a67a819b50294a7f17a7dc0cd87fae5e7f34bc24c209a +86eb27c898a5d6c3618c3b8927acee195d45fe3f27b0991903520a26fb8021b279e2a8015fbbba5352223ae906c7c5d6 +a61b1c200b0af25da8ad8e29f78d000a98683d1508ae92ee7f4326a7c88e0edb645b6cb5dde393ac74d322895e77ba24 +887739318c710aae457b9fe709debff63bfbb3ffbbb48a582c758b45d6bf47a7d563f954b1f085c3bc633ffd68c93902 +aacfcb0e2b0a868b1c41680487dc6600577ce00aa2edeee8c6141f4dc407217ddb4d37b79e7c9182258c750d12a91508 +ad8cd2cf5ccd350cd675a17f31b86a0e47499c6c4c11df640a5391bb10989c9c70df0a3ddeba9c89c51e15fedaf67644 +8aba897d32c7ef615c4dfa9498436529c91c488a83efc07ba9600875c90c08b00f66a51469eb901451b6e18e7f38ffd7 +aab8a600609b80e36b4a6772308bac77929a0c5d8d92bbc38e9999186a1c2bfdbef4f7a2b1efba9c17a68dc15a9373ab +b95811d1454307a30c2ac8588c8104804b06c1aec783fed75a6f12c9df626be57865850100f1ad28073e3867aca941cf +8b119d3bd4ee644469457df5d8a0020fd99b8b20bd65ab121cf95a7f55e50dd8945fcf1dff9d269d9d0b74b4edbc7726 +a980b912df832ea09353fd755aa3eec9eb4cfd07ca04387f02a27feab26efa036fca54cc290bb0c04a8a42fdfd94ce2f +91288e84da1d4ee2a4dad2df712544da3a098fdb06a5470c981fb6d6f3dcc1c141b6f426d6196ff3df6f551287179820 +98b0473bcffcbd478fd1b49895c61dd2311dab3cdec84f8e3402f8add126c439ffcb09cae3b7f8523754090d8487b5a9 +abe76988cf3065801f62a1eb3cfe9f8185bd6ab6f126c1b4b4fde497ca9118d02a0db3fadccd4ca98826b30475fa67ef +94a316a0faa177273574e9e31989576a43e9feb4cc0f67aa14d5c1967c4e10fc99db3ef4fdca2e63800a0b75f4b84256 +975ad39adadc7e69e34981be2e5dc379b325dc24dddacc0bb22311ff4a551a0020a8bdecf8ab8ac5830ca651b7b630ce +8b3bc73b640dc80ac828541b723a968fb1b51a70fa05872b5db2c2f9b16242c5fe2e8d1d01a1dbeaac67262e0088b7b0 +aa8d892a6c23dbc028aae82c1534acb430a1e7891b2a9337cedb913ff286da5715209cffb4a11008eae2578f072836cb +8dee9747a3ae8ed43ce47d3b4db24905c651663e0f70e2d6d2ddb84841272848a1106c1aa6ba7800c5a9693c8ac2804e +81e2c651b8b448f7b2319173ecdc35005c2180a1263e685a7e3a8af05e27d57ec96d1b2af2cae4e16f6382b9f6ec917c +98a9a47635de61462943f4a9098747a9cf6a9072a6d71903d2173d17c073eff3fc59b2db4168515be31e6867210ecbcd +912b2398505c45b0bb4a749c3f690b1553b76f580b57007f82f7f6cce4fadd290d6df9048258978c8a95ef9c751a59a2 +8ac8f0893fe642111ef98ae4e7b6378313a12041bbca52141e94d23152f78c2e4747ae50521fc9c5874f5eb06976e5cf +946b4a8eb05b529aaed56ac05e7abeb307b186a7835623fa4e85ed9eb41a4910663c09ea1bd932a2c467d28776b67811 +a4be51abeddd40e1da6fdb395d1c741244988ff30e10705417b508574b32dce14d08b464486114709339549794df9254 +b33b6b7d66cb013e7afeabbd7ed1e0734eb0364afe4f0f4c3093938eec15f808985fb7f3976969bf059fa95f4d8e335b +a808adbcf0049f394914482483ee0f711d9a865615ff39b5313ed997f7a0d202ad9ed6e6de5be8a5c1aaafe61df84bca +8856268be15a78465ad00b495162dc14f28d4ef4dcf2b5cba4f383472363716f66dabc961a6dbdda396e900551411e41 +b16ba931e570e1bf124ea3bd3bdf79aed8aa556697ea333e6a7d3f11d41538f98dcde893d0d9ba7050442f1515fb83b1 +91ecde1864c1a9c950fd28fa4c160958246b6f0aa9dda2a442f7222641433f1592d38763c77d3f036a3dbb535b8c6d8f +92cda991f69fbf8e55c6bf281b07fff5dbbb79d1222b8c55686480669247b60212aac27aa7cccd12fcee94e7a759b8af +b1d9b5b4e996b375d505d7250a54c12d32372c004a9cabf1497899054cb8b5584b1cef1105f87b6e97603ccbf2035260 +86e98bde8b484fb809b100f150199f13a70c80813ad8b673bf38e291595e2e362ad1fa6470d07d6fbe2cf7aeac08effc +aa12f7c39ba0597a8b15405057822e083aca3cee6ed30c4e0861eeb22620823588d96c97bb1c3776b711041c4dc3d85d +b477b34f29334f3bae69c7781d574342b7c27326088f9a622559ab93075c7357953ae84eb40e3421f453e04e9b4d5877 +9625067cb2120ce8220a469900aa1d1bb10db8fe1609988786b07eb2b88e0ddb35a3eccd4b6741e1fa2365c0db6b1134 +997b92af7765f587d70ea9718e78a05498cd523fc675ad7b0e54a4aae75fbeac55d0c8d72471471439dacd5bfcfae78d +88b59eaea802e6a2cf0c0075bf3fd6891515adcb9adf480b793f87f1e38d2188c5ed61ac83d16268182771237047ec8a +a57d078b230b1532c706a81eaabfef190fb3eb2932f4764631e916a0f0d837d6014da84ede100abaf610519b01054976 +94ed5c5b96f6afa9f2d5e57e1c847ae711839126ab6efb4b0cf10c4564ff63c819d206fdc706178eb6a0301df2434c01 +980296511019c86cc32212bad6e4a77bc5668b82a2321a1ecabc759a8bbc516183a4787c7f75f9ee7f1338691dc426cc +b10ef97db257343474601fd95f9016c205e28bd22bf7b8f9e30c3b14aca1cc9a11e6404ff412ef269c55fb101fee5a37 +b670d5d9c77fc6aa14212dd3eae100620f3510031b11a9625aa40bf31835c9fd717753b555bd159b1aa64a2104929340 +862054fabf6d6d529a7584d1a48f72d2eb216caf959c782ec36c69c26aef4595415d19a28b041946811b34a629105241 +ae4bf2ccd7b0f3774653848b5b4d39e5517dcbcff30d8441d78bc387ff42b573f16b7b0a7366e6ca5cef1dd9f0816df9 +8f810527badcb49f1542a0ccd12e3541efa084243f7106eae003458c176f4c1f01daae9d4a073c2cb2aced747e8a4576 +8a32c2067aaf6baf32db67acd4974a22a6da33db5444028a7c8c4135f9c84e102dc3b2c635b15afa6dc907d0270daffb +b15fc057f306a60b20c8487125b6b334ab749cf70eb8a30c962f625bb203ebd0d2a315949ee3b7a99e3d91acec384806 +a37f145d321359b21cba7be8b64dfae7c67a20b7b324f27c9db172d58e77a49fa02ed3d06d09d7644bf1fd81f4aab44b +b338d2e39a485ee4297adcf5e58e16c3cc331c5dffeade0be190907c1c5bdfed38537a6d81dc39a2cdfc1bc45e677886 +b69d84d8511b3aedfdc7c7e66f68b24e12e5a2365dbbe014bddd2e99e54143428cf8b74cf12c0e71316038aa5300e87e +ab210cc38661667450561a1857337879633f5d5bf2c434a3df74ff67f5c3ba69a7880872f19ae4dcbbb426462cd7d0fb +94538ef487a58a5ff93a5e9616494c5f066715d02be5b249d881a00bd0edfe2fe19dd7a5daa27f043d1dbb5ac69cf58d +afb47a899c1b25fe800241635fa05de9687a69722802ad45434f551971df91d8ca9edda0d835d82eb8f36ff9378ed7e8 +827a10d7536230887283a9b1dedccc6b95ef89cb883c4ee7b9821058b0f559704d1636670c0ada2b253bf60b7cb8a820 +97cc07965065d64409f19fb2c833b89ca3a249694b16b58818a6f49d3800926627ce0f87e5c0853ae868b4699cfdee5e +ae0c93d44780ef48ea537cf4cb8713fd49227f4b233bc074e339d754b5953e637a7289c6f965162701e4b64e4eaec26d +80953053397c4c0ba9b8e434707f183f9ced2a4c00d5c83b7dc204e247ad7febc1855daeb906c53abfdf3fe3caca30c4 +80f017e87b471b5216ebe25d807be6c027614572337f59f0b19d2d1f3125537478cb58e148f3f29b94985eac526cd92f +8a8e1c0d49801a8dd97e9e7c6955fc8b2c163a63bd6a4be90bb13e7809bb0dddc7a5025cc7d289a165d24048eac4e496 +8530e5b5c551a2e513d04e046672902c29e3bb3436b54869c6dea21bab872d84c4b90465de25dff58669c87c4c7d2292 +ae3589d389766b94428e9bde35e937ed11aac7ead3ce1b8efe4916c9bfff231d83b7e904fe203884825b41022988897a +ac02e629a900438350dd0df7134dfa33e3624169a5386ea7411177b40aa7a638e8d8aef8a528535efdbe1ca549911c0b +b1ac60b7270e789422c3871db0fa6c52946d709087b3b82e6eba0d54f478520b1dc366bb8b7f00ff4cf76e065c4146eb +a7465e1f8e57de1a087144d3c735fee2b8213fcbf2b9e987bb33c2d4f811de237bf007402e8d7f895563e88b864f7933 +8ab0007ba8984dee8695ec831d3c07524c5d253e04ec074f4d9f8bd36e076b7160eb150d33d15de5dd6e6fb94f709006 +9605bbe98dadd29504ce13078c1891eca955f08f366e681d8b5c691eadb74d6b1f2620220b823f90ef72eb4ab7098e16 +942a083d07c9cb7f415fedef01e86af4019b14ef72d8ab39fe6bd474f61ba444b9aac7776bea7e975724adb737e6337a +b9a49a8c4e210022d013b42363ac3609f90ea94b111af014f2c5754fbc2270f6846fa6a8deb81b1513bb8a5d442ea8dc +99cd62b177d5d7ce922e980cc891b4f0a5a8fa5b96dfc3204673fbef2e7fb2d7553bbacd7b2e6eca4efb5e9a86096e2e +94e30b65b3edd7472111566dde7fab0e39a17e1f462686050f7134c7d3897e977550faf00174748cbeaec6c9c928baa8 +a32fbcb29f3391d62092f2720e92b6ef4d687d8a3eae39395e0464669a64a38fe21a887f60bc9519d831b9efde27f0f4 +8f1492c4890d8f9deecb4adada35656e078754dcf40b81291e7ef9666d11ba3747a478f9420a17409d7d242cecd2808f +8942960b319ef65812d74cb1d08a492334db58d41e8437e83ddf32e387d9f3ad36834f59e6a71d1afb31263773c3ec49 +88d692f4976c99e763b027df9c2d95744d224724041dfbe35afc78b1f12626db60b9d0056b3673af3a1741eaf5f61b43 +9920cd37eab256108249a34d3f1cc487829cc5f16d1bce3a2328fe48b4de735ebde56c8b5cf4e532a4d68792387257c5 +87d34c9f5a913b806504a458c843eda9f00ff02ad982142543aa85551208cab36ebf8b3409f1c566a09a60001891a921 +a2ee8339c96f790b3cf86435860219322428b03ea7909784f750fe222bc99128d1da2670ad0b1f45e71a6856c7744e09 +84bd257f755de6e729cc3798777c8e688da0251a2c66d7ba2e0ce5470414db607f94572f5559f55648373ce70e0b560e +8d0e170714ddf5dde98b670846307ab7346d623f7e504874bfd19fbf2a96c85e91351ba198d09caa63489552b708fbc8 +9484cc95d64f5a913ed15d380c2301a74da3d489b8689f92c03c6109a99f7431feb8a07d9f39905dcef25a8e04bcec9b +b14685f67dd781f8ef3f20b4370e8a77fef558aa212982f1014f14b1bdd8b375c8a782d1b8c79efc31b41eec5aa10731 +b22fb1541aa7d2b792aa25d335d66e364193fdbf51b24a90677191cae443f0ce40a52faf5983d2cb5f91f0b62a5f20e1 +b06fa9489123ab7209d85e8c34d7122eb0c35c88ee6c4c5e8ae03a5f1ae7c497c859b0d62e0e91f5e549952330aa95a4 +b5cd71617ff848178650e6f54836d83947714d2e074d8954cfb361d9a01e578e8537d4a42eb345031e3566c294813f73 +848d39ea2975d5de89125a5cbe421496d32414032c1e2fbc96af33501d3062745b94e27dfe1798acaf9626eabff66c79 +ad35955efd5a7b6d06b15d8738c32067ffa7dd21cf24afc8ea4772e11b79b657af706ce58a7adcc3947e026768d9cdaf +aff6d7c4861ff06da7cb9252e3bd447309ad553b2f529200df304953f76b712ac8b24925cf4d80a80b1adaa2396f259a +b4b88d35e03b7404fc14880b029c188feecb4d712057f7ba9dedb77a25d4023e5a2eb29c408fde2c0329718bdaf1ff63 +88e96720e2f7c63236cca923e017ca665b867ba363bc72e653830caf585d802fad485199055b5dba94a4af2c3130a6f6 +982675dc0299aeedba4b122b9b5f523ca06d54dc35da0f21b24f7c56c07f4280265fb64cec2f130993521272c3470504 +95c77d418490e7e28293169cf7a491a7dcc138362f444c65b75d245c1b986d67c9e979a43c6bd8634dae3052df975124 +8fd6c4dff54fb2edc0bdd44ccd1f18238c145859ccd40fbfbc1cf485264445b9d55ffd4089c31a9c7a0543cc411a0398 +b153eb30af9807b5fe05d99735c97471d369c8a1af06b2e2f0b903b991eb787ab5a88c6e406e86225582acf8186ad5ef +826b55de54496751b0134583b35c0c2049b38de82821177e893feeeeb76ceeb747c7a18312cb79a6fc52f2c18f62f33e +91650d7205b232c495f1386bea0c36e136a22b645ffd4f5207f5870b9ce329c44524781c983adf2769f4c05b28a8f385 +b8d51a39162ebb38625e341caacc030913f7971f178b3eee62dc96f979495a94763ea52152198919c6dd4733bc234f64 +a1fbd3673f2ae18a61e402fe3129b7506d9142f2baca78f461579a99183c596b17d65821f00d797519e9d3c44884d8a6 +b7c5f5407263398cf0ed3f0cf3e6fcebdd05c4b8fd4656a152cedcdbf9204315f265fd8a34a2206131585fad978a0d6c +94fa71804e90f0e530a3f2853164bc90929af242e8703671aa33d2baad57928f5336e67c9efdcbd92c5e32a220b4df07 +b75dcea5ad5e3ed9d49062713c158ebc244c2e4455e7a930239998b16836b737dd632a00664fded275abe4f40a286952 +a02f7b37fc30874898618bfcc5b8ff8d85ef19f455f2120c36f4014549d68a60a0473ddfd294530dfd47f87fbd5e992d +8b48e1626917b8ba70c945fe0d92d65cab0609f0a1371fd6614d262d49fe037f96991c697904d02031ec47aab4b32f48 +b368f02c21d4af59c4d11027e583ca03ef727f2b2b7918ef623f529ceac76753a05a4ce724ce2e018da6ecc5c1c1261b +a95cba06eeae3b846fc19a36d840cbcf8036c6b0dc8c2a090afcf3434aaf5f51ef5d14b1e9189b1d8f6e4961bf39bbf8 +b32ca4dfbeb1d3114163152361754e97d3300e0647d255c34ec3025d867ed99e36d67ebafe8255b8c29be41864c08edc +8e4eddefa27d4fe581f331314d203a6a0417c481085134d8376898f9260f133e2bf48576528d62adf29953ad303e63a7 +92b7d5505833f00d5901ae16c87af028de6921c2d1752a4d08a594eb15446756ea905b0036ae6ffe6b8374e85eb49348 +b50e9018d3c4e05ba9b28b74b6634043f622d06aa8123da7cd0bc482b3131912149214d51bdfd887484422e143c3c1c0 +ab980a2f5317dfcb92baa4e2b3eb64a9ac2a755da6c11094d57e781ae5cf43e351824f1dd3abb4c6df75065b3784210b +aaabb009dfcb0bae65a0aee26ed74872c226965c52a6ed0998209e020a9ee806297dba4b15845cf61e1a514de5d125db +a1fe78f67000ebb6e90fe33e1a9dd5489be6e15fedb93b2a37a961932b77137fe85d46e89a132ecf7bcfb7aa95e16757 +85bc6e7d660180de2803d87b19ed719d3f195ea0a92baf9bfff6113c743f4237f51355b048549913e95be8ddf237864d +87a167968c4973105710e6d24ad550302ee47fe1f5079d0f9f9d49f829b9f5c1cd65d832d10fe63533e9ad1fa0ad20f5 +b2ad1a7b95b8a89d58e0b05c8b04ae6b21b571d035ae56dc935f673d2813418e21a271cccaf9d03f0d6fa311f512d28c +8268e555319992d5ac50cb457516bd80c69888d4afa5795fcc693d48a297034f51e79f877487b6f7219cfdd34f373e14 +b235411f1f6d89de3898642f9f110811e82b04ad7e960d1dd66ec7a9bf21de60e00cfabcd3004f3b5c4f89f5d9c7422a +b6963effcfe883f7ed782a3df3c40edd70f54ceca551859bcccb5d3e28fd2c1fcbdd7acc7af24a104687fd02b53c704d +862645c944e1e2909b941578cc5071afd7353fed1c2c99517e2de7573037704ef5d35accf6ec79b8269da27564209d50 +90f585eeb1a053e2f18c1280c9d6a561c0bc510b5f43cd68370ed6daac4b3749852b66c371397b6a7c1ece05ee5906c9 +876d9a3686feb79ce781e87ac3e3fbeef747b6ab031285e808c8a73f73f55b44507850dcaa745c0791d2cae8ad61d74e +a7ecc3b8c10de41a7bd9527228a0d3b695a651a5b5cb552a3664a887077d39ee60e649aecd68ed630da6288d9c3074ad +83529f1f2b4dc731ea05c1ee602fa2e4c3eebe2f963f3625959ba47657be30716d64e05e8b7e645a98bf71c237d9c189 +834ca6b14428c30a4bc8d5a795596820af6f3606f85bee9f3008f3fb94b3adffa968d21a29e2588d7a473d8b5d3a8b42 +b8d08cd8b73430984fd16e8db0525ae2b76253c92cccd7b3470add4d12d082eafb55a72bde04870924d0bdaf61f76c5d +96ef32df669690c2391f82136fc720231e4a185c90ba79eef7beaadedf7fbeb56ed264825564bdc7da01829b47f4aa88 +93d637b2f04d71891a80a1ee93fd9c9046d671bc4c15c4e597cfcc36f4ae85a7efc111359628965fd10d36c39129b160 +89f28dd3f7bc43749d0e3750c136385d4ffaf2c40354d3be38341416d755de7886d8108d83721b36f99feb3bccd73c88 +ac6392e274659f4c293e5cb19859828f101959c4c0939920a8dfed0e2df24a0cbf89a7aa983e947318c58791c893928e +83b2d4ce42c2fa0f672cd911365d1f1a3e19f1c38f32bedc82820ad665d83ae5fac4068e4eca6907bd116898966fed92 +b5e0144d6e59a9d178d4ee9f8c5dba18d22747fcdf8dc4d96d4596a6e048e384cd1e211065f34109c9ed6b96010d37e5 +b1a65e6b38c9e84b3937404d5b86c803c2dac2b369a97cbf532cfdd9478ee7972cf42677296ad23a094da748d910bc48 +849d7f012df85c4c881b4d5c5859ab3fb12407b3258799cfc2cb0a48ae07305923d7c984ff168b3e7166698368a0653d +84d9b7ee22bf4e779c5b1dd5f2d378ef74878899e9dbb475dfdcd30c2d13460f97f71c2e142c4442160b467a84f1c57d +964e497ef289fac7e67673a6cb0e6f0462cd27fc417479ecb5eb882e83be594977fb0c15a360418886aece1aaf9f4828 +ae1226222098a38ce71f88ab72de6ededb2497e30580e7ae63d4829dcc9c093bdd486102b7a7441cb06253cf0df93772 +a72865b66d79009b759022e53b9eedbd647ff4b1aab5d98b188100d01fc6b5d8c02b80eb6f53dc686f1fdda47d4722b8 +93aa8d7d8400bdfa736521133c8485c973d6d989ec0a81db503074fe46957a3999880fd9e4e7f44de92adf6ac0abe99b +a75e5ab84399962ada1f9ebcfc29f64405a1b17cd0a983950d0595b17f66386393d95a5aa4c6c878408984141625141c +91b1e5e75f4b55ec2e8f922897537082a1414eedc2bc92608376a626d8752d5d94f22f0e78ea1970eb0e7969874ad203 +83bf9c308424ef4711bfa2324d722f550d95f37d7f7b4de0487ccf952b89d7219ca94e7fa25bee60309efefd9a0e4716 +a42060476c425ff7979456d3c5484bc205fb1ef2d7149554a4d483d48e2a19119f708c263e902943bcf20a47e6c7d605 +8170c45ea126e6367aa5f4a44b27f7489a5dd50202cb7c69f27a2bdf86d22cf6b00613b0080d75fca22439eeaaaa9707 +8e5a82da70617697e42c6b829e1889b550c9d481408fe4cf8dc9d01daccabdec01f9e1b8c27dc84902a615d539bf9bc6 +80606c51401d0bf5f2700ebce694c807ab1f7d668920bdcccef2775e0939472419a8f404567bd4f9355095517eb4d628 +a40314565d60d0ddf8995673e8c643b1baa77a143b3d29433263730a6871032260abc1320e95af8287b90aa316133da0 +a87e07e84435f9e8a51ce155cd3096aa4b20d18e493c9dcbc0ac997ac180f3a255bf68ccd8195f2564d35ec60551a628 +84d2ab98416643c457bf7ddd9f1aa82967ecea189db08f3558f56803fe7001693ed67ec6ca8574c81ec1293b84a7c542 +937c3b955889ceae77f28054ce53d75f33cfe3a04f28e049cea8b8ade2a0440d5e2e8c4f377e6c1ae2115d68cc95fc16 +885a911f16845fe587b15ce7cd18cc2a84295bf609732340f74e0f5275b698cffed3e9aa1440e19e6940a7fa8f24c89c +ad90059a50c399996aaa0a10a8f637b7bab0dd5d9100301f0159a2c816596da55c30b2568d1717705fd2826b117a42d6 +828de9ff1e095c189da1f1ee18009afe14613ac696025add6f4e330488e02d5f1a90be69edd9a17bfb3355a0ca77b525 +b7aedb8394064a58dd802be6457555c0cf7b94805ed00cc66f38449773f4b1865feaee3a6f166eb51b2123b89d853a4d +b09c564ff37ccea34e90f2d50a40919a94c2e10d4fa58ffeaed656f88f9f4ae712d51c751b1b8f443dc6c9506d442301 +b24882d66b2ebb0271ebb939c72308d81f653940e70d6f1bcaae352f829134aff7f37522cc42de9e7fe6243db2c4806f +8e6f8dd906e0d4eb8d883f527e926ad1d8156b500c4cfa27214450c8112267c319900de2443c87bed1e4bb4466297dd5 +ae42f4578e8d79b6aa2dca422ada767e63553a5ee913ff09cb18918116905b68f365720a1a8c54c62cce4475ba5cdd47 +ade639bcd5017ea83ec84689874175ed9835c91f4ec858039948010a50c2b62abc46b9aee66a26bf9387ab78f968b73e +8d310a57aeb123cc895ee2fd37edc3e36ce12743f1a794ad0e1a46d0f5e4c9a68b3f128719ed003e010f717ec8949f43 +8606c086fcf3e2f92c1b483f7e2a4d034f08aef1a9d5db9e8a598718e544b82544268a0a54dfed65b4d0e6027a901d47 +8ccd95dd673d8cfdfa5554c61bcdbe6bb5b026403a320856fe51571e7c59504fe1c035f2ad87d67827339d84c0e1a0c6 +955a7cb4afcf70f2eb78756fc3a82e85ab4330eb89a87117294809beb197d1d474001e25306e8ad71daab6928abf6d64 +ae6b44ec6294736ea853ddeb18fc00cce0ac63b38170ff0416a7825cd9a0450e2f2b340d27a7f2e9c5ac479b4cb8a5fe +a88ec3f12b7020dd593c54376597b056e70c772c0ec62c24c5bfd258b02f772161b66e5dcd95c0c0fceb23433df9ff23 +b4a83933b4de552dba45eedf3711f32714e58ae41d4dab8a6114daeb06e90a5a5732c70384150d04124ac6936ca9804b +b8b7c4fa549b0fa1dc9c1f0af0750d6573f1648767751882d41f0dd7e430e3934590757e1c8b436ac35381bdde808117 +ab598b911234a98cfde07234cfc0d2fddfc5cb9ea760212aa3e175a787ce012965c8fcfdf52d30347f5f1b79cf4a0f54 +a9d354f9dfbd1976e5921dd80cbb56b2e15df53ce099ecb4368eff416998130d7830209282aaf1d4354129845f47eb80 +8c889afff546c721969e4d8aae6e6716ad7c2e9c1914dd650e30419ee77d630efb54dfffb4ec4ff487687b1864bf5667 +94ed2fa79116c7c8c554dc306b1617834dd3eab58baf8f0d085132c4688ca4a6bd38420281283678b38970a3f02b9a94 +944fdc8f0516d22f1672193d183833d3e3b043e26807fb2123729a0216c299785b1c4e24b5aa56e9bbe74fa54d43e22a +a48521454a3e0c10a13d8e810fad9d0522c68eea841821a8e0e57811362f7064a8f9c50f79c780a02df7df8c277feaef +8f3d26670ab55e1bd63144e785203373b2b13b76cac305f0363e48a6339fac3364caa3fceb245527161fc2fac9890912 +b4d6fe71001cb4141f6d8174dd7586d617cfccb54471e1fbce30debc2b1dead62cab29565abb140b682811c6231acb03 +91dc8afc4934fcc53ef851462a055cc1c3c87d7d767e128806891738427606d2fbfa832664d2a7f95f8ffe2cf0c44dc6 +b297eb432c74071764272c1b1663547ba753e66bf026643bfc0e42a9c5cdfb05a88083ad67d6ddfe6ab290678c607b29 +b343d1df85be154faeb5b21741a5ac454ca93f70a0b83a98f5901d1be173a1b2969d43e646363c5d4975924e1912599e +b2d74a66e4dfc41128aee6a3f0ff1e5137a953ed7a2a0ab5a08d7ea75642f12bd150b965c8f786ad0caf55ef7c26be4f +a54141faa8dd9a567c3cd507e4fc9057535ffe352fa1e8a311538fe17e4a72df073fbf9371523e5390303db02321650e +8e229a58f1acc641202d2a7c7e120210b9924e048603b9f785a9787ad4688294140ef3f4508c8c332d2dedafff2485be +9523554c11d39b56e6a38b3b0fadb7a9a32a73c55e455efdcfda923aff1e9f457d1b7cbc859b5ecbb03094eae8b87d38 +a199ffdff1812aaea10cd21a02b3e7bf3d8e80e501aa20bb2105b5f4cb3d37265abcda4fd4c298d6c555e43fa34517f8 +97f1285229b07f6f9acd84559afef5daad4320de633c9898b8068c6cb3b19b4468b4445607559ddf719f97d2410e2872 +a1dfff82908c90fc38ec7108c484735f104e6ce7f06097e1e80f6545702b6a0bc2a2706203cd85162edb7e9294fdedba +b12a706311c617d6c19e964e296072afce520c2711086b827cff43a18e26577e103434c0086d9d880c709df53947b48c +88503a6f48cef2f5cd3efa96a5aacc85dc3712a3b9abbb720a2cff582a6ea3c2afc49288b6832c8599f894950843ac11 +83ed63e38dfbe062fe8c7e6bc2eeb5a116f1cc505c6b038990038de6051281f9062e761ea882906ccff69c9c5b8a4a25 +911090d5d0231dde1189408dca939daddcb69a812ac408d1326060f0220781bcc131c9229e6015540f529d9fb33d9b0a +8a8352f1d9e5c7e80276e4448f997d420d5a7e0e2d5be58ae4106f47f867d1caa478b2e714d9c3263e93e5cc4c7be08b +9362f1ea9995f9b3850ebb7c8d5bf95927ab5ea25ee00e85d7456b3bf54459798b1fffde049d445c0d0587b0ab0a1694 +8859502b391273f4a00b6c0e87e5cdae676b7baf6c402f12b3360db6a5dfb4931ece4da0e1e4d98c7a71c3d01a183a9b +a9a5edf474120f9bbec9485d8b1e6f83be68b10de3d765219b0bf3e5d2840e478f1fb2bf806d78a8b8ad22ec50cf7555 +82c75daf983b06e49f0d75a042dfaae8cc92af050293d9059d6e8b01ca3ab2597e7adfc1159ed805513488944e739fa5 +a5cf240f04a9bfa65b811702c923d209e01f9535e217fa55ae3e0d1eb3257d6749e5587e727091e860609d1df29a1305 +95608ab8ade1c9fb814bad78d9cc99a36ad3e9562d5319830e4611ceea508ef76be04639294be9062f938667e33bce6e +8e44181f35c38b02133473de15560ae6588ac744cfdaf5cdfc34f30ca8e5ff6c85eb67dddc1c7d764f96ed7717c89f06 +8007b6ddece0646b7e9b694931a6a59e65a5660c723ebdffb036cf3eb4564177725b1e858ed8bc8561220e9352f23166 +a2d9d10fa3879de69c2a5325f31d36e26a7fb789dc3058ee12e6ccdda3394b8b33f6287ba1699fce7989d81f51390465 +81993d0806f877ca59d7ffa97bd9b90c4ebf16455ea44b9fe894323c8de036c5cc64eacf3f53b51461f18fa701a5860d +a20030f457874d903b2940ec32fa482410efecb8a20e93f7406fc55ab444e6c93fa46561786e40e9bf1e3c7d5d130bc8 +80c72d4985346ac71a231e7bbbb3e4a91bf50142af0927e8eb86069303eb4ce7fca1aa5b919d5efc82f2f09b41949acb +91b857d2f47f1408494940281127ba4b9ac93525788e700889aa43402eedea002e70eded017f5f5263741ed3ee53a36c +97445d007f08e285ea7f4d25e34890e955dac97448f87d8baa408e826763c06cbd58dd26416ba038d6c28f55bcea2d3a +a409c89526c2886f6a6439e2cd477351fc7f886d1a48acc221d628e11895a4eedd426112a368a0dbd02440cd577880a8 +a2c6adc7866535f6ffc29e00be4a20fa301357e1b86dff6df5f8b395ad9fb1cdc981ff3f101a1d66672b9b22bd94ec0f +8887fc53ffc45e4335778325463b3242190f65ae5d086c294a1dd587f62dd0d6dc57ca0c784bf1acaa5bbba996af201c +9731d3261a7a0e8c7d2b11886cd7c0b6bb1f5c57816944cc146caa518565034cea250eeee44ddffaeb6e818c6b519f4d +afe91c706efb9ee9e9c871e46abde63573baa8b2ea2b61e426cd70d25de3cc8b46d94c142749094287a71f4dfadd3507 +ae7bdf6ecc4fc0d8d8a7fa7159aae063d035f96ca5a06b6438b6562a4eee2b48d9024dbe0a54cfd075eac39b7a517f2b +a382e5205bfa21a6259f42e9ebc11406b5da2aad47f7a722212fdd6fef39117dd158a9991ff95e82efa0826625168a1c +862760c80bf44c2d41c2a9a15c887889eaeea32acc894f92167fb6f72593377c228499f445ccb59794415597f038ac9e +b4e96595a91a611c4563d09f29a136a4c04f07be74dd71a6bbabc836617ecb95494e48971a8229f980b2189fd108d2e5 +b5e7200357317c36244c2e902de660d3c86774f7da348aca126e2fc2e2ba765fa0facd29eebcb3db3d306260e91a6739 +a64c7133156afee0613701189c37c1362e2b4414f7e99408e66370680c554de67832c30c211c2c678dab5cfcdcecb3f7 +88f4cb67b1db497a91a0823ee3541378133eb98777842d73e43ab99efe8aa52fa02dfb611c1691be23684618394988d6 +89a9382a147d7387d0ff9516ee0c75cd1f8ee23333f4a2c9693d1a8cbe03680bc5b10c43c238c2190db746cac409bf39 +ad510bcc067373d40b05a830bf96fac5487de1ad5b708a13f62484c09b00fba6c5b00b981004e5ab3f28e55c9a5bce26 +8384156d7117675547279ad40dc6bf81e8f9a57b2d8cfebeea6b9cd1d8534dc0cf704068bc3ba0815010cd8731d93932 +a818fb76e53165b2f86c7f2317d64cf5e45f48405a34560983cd88bfbd48369e258ce2952233a8ce09c464e07afcade6 +ab19a4ed90527e30796064634b66cdc023bc5966e2c282468f5abef7879fc52986d5bb873a796b077d10e7b374b60309 +a17dafe2484d633fe295f8533662631b0bb93cdb4e7cd6115271f20336f602f7f8b073983cd23115093c7f9891c4eef5 +804acbc149d0334c0b505a8b04f99c455a01592a12f64d1ec3b82b2f053ccc4107e47f418f813d6f400940c7c8700a4a +965e097a825d8511d095b247554ec736bcb3701ead3ba785bd425cbabd56f4b989764e0965a437fa63e7e16efd991fc0 +b6701675ca27d7a4084f06f89bd61a250b4a292ee0521b2a857c88c32b75f2a70b97f98abce563a25d57555b631844e0 +abbdf65fcbdf7d6551ccd8d6e5edc556f1ecd275ccd87ee2bda8ea577c74615f725aa66e0911e76661a77f5278e0c2b9 +ab715ae372c900239a0758a3524e42063afc605b8fb72f884dc82ab9b0ff16715f3fb2fd06f20f15f9e454f73a34e668 +b45f41ea1d25a90af80a8a67c45dea881775fed000538a15edc72e64c7aa435a5e4375dcdedc5c652397c02b0bc61b16 +86f7be9252f8ed9078e642c31a70a09639899f7ffcd7faaf1a039fec8f37e1fa318fba0ed1097f54fc55d79900265478 +a30e5ed4277dd94007d58d5a3dc2f8d3e729d14d33a83d23c44ddfc31c6eac3c6fe5eb13b5b4be81b6230cfd13517163 +87e723d916f5fcda13fab337af80354e8efe6b1c09ae5a8ceeb52df45bfca618eb4bec95fefef3404671fb21e80bf9db +a521b8a04dc3abd3e9e0454b9a395b3638e5394dc2d60e97fda61b0a1880d1d73a64a4633f3d7acbd379bde113240d03 +851686c79c5403d5f05fbaac4959fcbfdfb51151bec55e10481b3c16e3be019e449907ae782ca154f76a805543d5755d +8ec1929e746b6c62b0c3fdd8f4e255e5c707e6e0d8d57ff9e409ae2dd6e76fdb50af923749992cf92d1b5f2f770bafbc +9175f7b6820d47205c9e44f8c684833e1e81da46c1fdf918a4dcafbc3231173f68370d442a20e45f8902bcab76a4e259 +b4f66c698115333b5ac00c9fe09aa9e1e9c943fbb4cce09c7d8a6ed4f030e5d97b48e944fd6d3e69ac70f1ae49d35332 +b958878b875eead61a4416a4597b1c567ddbb1eaaa971033f4a656f01a277822c1f4ea3972045156c2a5a28d159f5ddf +8188de8ad5258024d0280137a40909d24748137ac7c045dddd2bc794eac8edd5850b9d38f568fa8174b2c0593bb57e96 +91152c7bafce7a0358152221081bc065796fa4736bfc7d78076a0a6845287cde2ee2a2c9b96f500297c0a00410634888 +a5328ab939a2d3bd4c21e5f3894c02986b6590ad551c7734be3f4e70380eb7bc19629e9031b886ce3b4074ee4edee63a +97c4d49db40e266bcedaacb55edca4e1ebf50294679b271f3a2332c841705089b5ba96ef2064040fa56c36bb1375a8d9 +85cf0514f340f9d865b32415710d7451b9d50342dbf2c99a91a502a9691c24cd3403cb20d84809101cd534408ddf74e8 +950c3d167f59f03f803dcba3f34fe841d40adc31e5be7eefff2103d84e77a7cbe4f14bd9c3dfa51cde71feb3468a9c00 +96a69624e29c0fde3b92caf75a63ac0f3921e483f52e398652f27a1ec4e3cc3202f17af1f66224731bc736a25638d3e4 +aeac4170cf4b967227f66212f25edc76157eb4fb44c84190b520ecc2946470c37da505790e225fd1b0682bef7fc12657 +a94146a04e3662c50c2580ae1dba969cbb3fb0f43a038729c9e8be6ed45860b2c7de74f248dfa50ccdbe2ecaf3f2b201 +917b8e2880e85b8db723631c539992ec42536146e7091d4a3f87d37f051b5da934d84393523814f19962c78e6cb12ef8 +931f140ff8f7de79e399f5cd8503558d566b5c2ab41671724dd38aed08dd378210f01ac8fa9911f3047993dbc10cf8c4 +859eb9b560bc36273694f8ae1a70d25e7f206013597c4855a11328162ba1254bb736f1ae41240c8ec8dea8db035e08f2 +b4ad2cb2c3a3e6ab1e174f2dbfb1787a8544f3c9109215aa6d33265ef269455e3cde9858738b4fe04711a9cf9050e7d4 +8a3b342b87b19c0cdb866afff60317e722013c02dee458ba71e7123edc8b5a9f308c533b9074c7dd0d684948467502d1 +89185ac5cc5ea8f10a1f2a3eb968bb5867376d3cff98ef7560b9a0060206c4046ff7001be10b9e4d7ad0836178eba7e4 +845f48301f25868f6d0f55b678eab1f8458e3321137dba02b4cfbb782cbc09f736a7585bf62f485e06a4e205b54a10b7 +931a6c523d4a66b51efadb7eefadba15bf639d52d1df5026d81fd1734e7f8d5b51b3f815f4370b618747e3e8eb19699c +8eb3a64fa83dcd8dd2258942aea3f11e9cf8207f2fdd7617507c6dae5ea603f9c89f19d1a75d56eaa74305a1284ce047 +912a5050ed6058221d780235fb0233207c546236316176a104a9761bc332323cf03786dbac196d80a9084790506e0a88 +945fe10ec8dc5e51aa6f8ba7dace6f489449810f664484e572bfe30c2fe6b64229f3c8801e2eb1a9cb92ff3c4428cdf7 +b62383bf99c7822efd659e3ef667efee67956c5150aea57e412cbd6cd470807dfaad65c857fada374c82fcfca2516ad1 +a727a31c45b2970d08a37e169ea578c21484dde15cb11f9c94eaaf3736652619ce9d3a44e7431d50b0e75b658ebbc1da +97bf54ea9b84b82e4616027bd903ef6152439f1c6a8e1bae6db1d10fdf016af2cac10ff539845833dfd1ddad1403aa8c +a08cf36437e010e59b2057aedb7192e04b16f1cc66382cdef3490b7ad1544ae51f03e87cba0fe43a275841c247a2a0cf +acafab9fa28c1a607df2246490b630ddda1ecf0885ad24c2ecb2c2c1b7b9c7de8066714bf5b9b25f61981d08576789ec +851f0375128d2782586223467d0a595f4c5baa79616622a32f7d6ce1f08af06f8a109bd6527f88d93367dba17be661e8 +a2f1187c2a7cbf776653ff834ed703dd32e68eaf36f0700709be929f4c0ce5fa1d9930d1e3ea2aa01c7a16239e66cb33 +b3721f4a5d24ca112f020cb3f849543bf0e7f84b470fb00126ae80aaaa6f2c208d8359cd82ad9fbafd3ef2ac70656fb2 +98773ac3ce9528c73cfd8e7b95976ce597f67e146357642ac4fb6cb35046f3f39cf6c4a7b5af5c7740dda358aa0d2d08 +92c883a5d820541692af75be1b25dd4a50a4b91f39f367a551a7d5ad6065a26b60d68221a01e4950559717b559c2626a +b82e46dd25fd1234dad26fbcd8bb5177d7b87d79d362ffb9c2f6a5c16eb2ff324d135996fcd6274d919634597869d772 +82a53ed356ced5e94d77ee2a7f6e63f2ad8240aff2d17c5012cf5d1f18512c88c24793339b565dfbb659bd7c48dcbcd2 +84d20c7859b35a1cae1ff2b486d50822f9e6858b6a1f089ce4c598970e63e7c0f7dfbcb3337845e897a9dedf9d449dd3 +974892e5cf5ee809e9353d00e9cd5253d04826a8989d30cf488528c5dcdcad7650e23b4d228c3eb81f6647d2035a9e02 +b2327854910dbf3d97fe668da5fc507e179c4bc941f39bdd62e8b6035f004449c467240f656417e501f32dee109f0365 +88888f73475613d45d0b441276b1dd55835b69adfb27e26c4186936dae047b85478cca56be8dc06107b89a28f3bbb707 +836ba22e40511feff81a5dace3df54e2c822b55e66874dd1a73929994ec29909ffc2a8e39bfc2d16e316b621eb4a5ec6 +a754cedcccf4165a8d998f326f3f37d2989f92ca36d9da066a153c4aab5a62bb0011896bcbf90f14c18e00488d4123bd +86c26fa9584314292c4b7d6fe315f65dadd0f811c699e6e45c95a7a4ea4886c57dc5417b67edd78e597d037c7689568e +b205589648aa49ef56637712490e6867aa3b85b2b31e91437a249fb51bdb31401bff57b865c9e27293b30014b4604246 +afab0843ede582e5a1898ee266235066b94ea378884eaf34919ceaacc0e2738e1074b6ed41e0a1dd9711563e24f0215d +996ed65fbcab7611eada5bd0fd592d3e44705098b8b1dfba6dcdbdcfa1382fe893fa55270a0df0be0e1938bd71ab997c +881bc448a5ef8c3756b67ecb1a378a5792525d0a5adb26cc22a36c5df69e14925f67c9cb747a2f7e5f86ba1435509d7c +b219303c02c9015c6a9a737b35fb38578ab6b85194950a0695f7d521206e1e12956cd010d4d6c3bc3fafd6415845d5d1 +91748829bbd005d2ec37fc36fee97adaccb015208b74d2f89faa2e4295679f7685298f6a94b42d93c75ca9d256487427 +a41d6fd33b9864ebc404d10a07b82ba9d733e904875f75526d9a1f1c1c08b27160dcdb9023c5d99b8ff8a3461d57281f +b68978d39c97d34f2b2fea61174e05e05e6e49cde587e818b584201cf59b7096cf1807b68f315119c6db8d6110b28a9f +b64e66cec798022d64ce52477475d27ea7340817fe7f570617f58c3a9c74071d7ea6b54743d4f520b62aecad9a3a6620 +87b2b9e1c1786b7824f239a857024780a1457e51c64599b858118885833fb87a17d408bc09dcc0607d15ec1e53683a74 +9814799bac07dab4f0c934cc3c051676ca13abd49cf8d4739864e5bb9f2a8474897695113f49239f28832a8658332846 +806931a1526a843a9c2045943d616a8102b02b1f219535a1f1fbda659a1244f1bfead52ca7f1851ff8a97169b91c9ec0 +b8678249595a9641c6404c35f89745b93d8e7b34d9d44da933a1b2f1606972624c5108f1c04eb42e454d0509f441ed9e +81426714851741045a4332eb32b6dfe6422a4a2e75b094fb7c3f37da85648c47ee8af1e54ba26f4e1b57ebe32d0e8392 +b7a1875ea3f119fe0429fd9068548f65cf2869f8519dbbce0b143e66127cb618c81d7578e8391d676b2f3963e9d87f43 +872220a803ea0c6294cdc55aceea42cfacfd7a482982bcb90c0361c351a900c46736a890609cd78f02fb5c8cc21fa04b +974f0380197b68205ff4bb2c9efe5626add52c0ad9441d7b83e6e59ddb2ed93ad4e9bbdbf33b3e0a206ed97e114ea0f2 +a840f2d9a74fca343aedb32ac970a30cbb38991f010d015dc76eb38c5bb0bfe97dd8951de925a692057262e28f2b4e9d +b0913c3ce61f12f9fdc4be3366ed514c3efc438f82fc58c4de60fe76098fbc033a580ec6e4531b9799611c89a8063a66 +a0180d533eee93b070dac618be1496f653a9a0e4e3455b58752bf1703ec68d0be33ec0b786f9431ef4208574b0ad316e +a4a6b871bc95d3aa57bed90e14a0a1dda6e7b92b7ae50e364593ce6773fbf736672b1f4c44e383af4c3cc33e017a545a +a3f44cf19fe52bacc4f911cab435a9accbe137bdbe05d34bdd8951531eb20b41d17e3540e8d81e6b3eea92c744562ee5 +ae6b6d0ff3b30ff0b7f9984ef741cba27ffb70d558de78b897199d586cf60622ec2d8a9d841712fe719cf0f97628842c +87abf72f98c81d6d3a57ab1e224fe4b502ab0d8090d8abc71791271550b721c220d4e2e7da3be94a20c0e63d98e39a50 +b2f73ebdfe7133af57353052f4599776e16862905e64d97e1020c4bb84132e476d1ab79a9fb71611410f3f9d56c95433 +ae1a928253af2b210d31e1b64c765fcbd20a96b8d53823a6b9b6e7fc62249abf4a66c6a6aedb0b687e7384af9a845e0d +99c54398627833ca1435718154de171a47c709e4d5c58589fdabe62e72f2a7a11ae561bc31d7cbe92df4aff23e08cd0e +8a1310bbf1a31fae18189479f470977d324dec6518a5d374ab2ffcc8f64412fb765df57d2ddf69b9a6efaeb2b4c723b8 +898312c6c0d3d3438229b19a8a233eca8f62f680c2897f4dd9bbcacde32c5996d56ac0e63e3e9360158761185491ce93 +81b3f965815b97bc6988d945496a51e4a4d8582679c22d138f3d3bd467ed1f59545da2d66e7b4c2e0373628ae2682686 +b9aca91c6e6f4199beb6976b28e0e35e36e8752618468d436b1cf00d8d23538d0747920e5b2c31f71e34dfe4d5c86a0d +b908f4aa18293295b8cacfda8f3ea731bc791074902c554764c603ab9a1de1bbc72654fd826bffc632d95ce9f79c27d9 +a7316ae1baf4b1196961d53be7fe36535499287aba9bc5f3bed4323039b4121b65bb0bd15a14c1b9cd8b65ede3566da2 +815e39208f205c5fac25ac9988c14a62ab01657c7737a24472d17b0e765644bc2cbb7ff1e8ea169b8b0b17b6996c4704 +89a451d2b740cdaa83ccaa9efb4d0ff5822140783979a4fee89eda68329a08c018a75d58bd9325bdc648b0d08340b944 +8cd08f768438c76bae6bee1809dd7be38ec42e49eb6a4d6862db7698f338bf6b4b409088e4f3d1c5bee430295b12a71f +a4bd8c312103a4bfeb25b0cfffec7a1c15e6e6513b35af685286333c1dce818ffeb52826f2f5bada6b67d109c4ab709e +93afbef5382d89fa539ca527f3e9b4a8e27ab69fd5d5023962cc6d8932b33cb4dfc5f14343e1a3749bfd5e100c9924e5 +8d8e69d046992ec9ff14f21840809166cae8e0e9e7c8f14fb29daf163b05abe6611daa4010960e1141c5ab24373fb58e +96f8e72e96ba673c9265e9cc312f6b9c3b931745fc62d2444d59404bb08e5fb02ddb60715181feb9971cbd954526a616 +8d444c2b8e4d0baadb79e3147a2ee20f1bfe30d72eb9a02f15d632185fb8f4e8c3116066f7de1ebfe38577aaccacb927 +971410c0b10e3698f4f64148b3d2148fc6a4a22217fcf4253583530a9d6fbec77e2cf6f7bb5e819120a29c44653de3fc +99e7e1857bd5ee57007b7b99494b1f1c6bf1b0abd70c054770427d59a3c48eda71b7de7a0d7fcf6084a454469a439b41 +8c8a4cd864894f7a870f35b242b01d17133cb5dfdf2e8007cd5f1753decc0d1fd41be04e1e724df89f1d727e760fdb15 +890a24328bdeaaadf901b120497d1efa17d798f6f4406661e46ecdc64951f9d123d724ab1b2b49e0e9a10d532dd6f06c +a7cbe1f42981c9518608569a133b0b449e9d67c742d62f0d3358112c97e65ee3f08ec0ff4894ce538b64e134d168e5c8 +87c976dea77b3b750c3a50847f25b851af95afbaad635f9bb9f7a6ba8f0c4faeb099dd777cf7eac41072a526474cb594 +9882aa5e9bcc4ea2dd3de4bb5a0878a672bea924b50c58ae077563b6df0268910a60e969d3da1694ae7394ad0d9acd3d +90d35ce677327c461fb5dcb032202e851af1d205e9d21a34ed2b95635f13f8fb8dfa470ea202ccfa4b08140d0cf1d636 +b3b4cbb521cce2b681e45e30a4d22078267e97ccdbdc611b2c9719705650dd87e0ca6e80cf2e174f8f8160be94232c36 +95892b00478e6b27ed09efe23a2092c08e691b4120336109d51e24efbf8aba31d59abf3cf55c0cdab1c210670b9743ba +8643018957fb8ef752673ad73102d0b928796c6496e22f47b6454c9ed5df784306f4908641ae23695db46ebfcfb0b62b +b166ce57669bf0543019ecf832d85164c551c3a3a66c05b17874bccd5d0ae87245925d6f8edc62ac13dbd5db265823a2 +89fb4800ce4b6c5900d58f1a216ad77a170ea186f3aa0e355840aeedcf374e92a15ae442800c9d60334544be020b17a4 +8c65e586215a97bf11ffc591bce5147b4e20750e82486cc868070c7736c3de697debc1f335674aef24b7afdd41922d93 +90f68ce0c97d2661d3df1040ce9c4fa106661a719e97c7b2d7c96f0a958930c57d6b78d823a2d41910261ae1f10e7b0e +adda85e1287371ccbe752aa2a3c1d5285595027ba4a47b67baf7b105a22fb8548fa2b5b3eb93ca6850ecc3995f76d3dd +b26535d218f48d6c846828f028c5b733594ce01186e22e412dd4f4a45b3d87d2ac1bfe5d54c987e4e8aaddeb86366d7d +a081bd86962ea3d4fd13df6481f3aeaabdd7ceae66f7bbb913e601131f95d016cf147d045253d28457a28b56f15643c8 +b3d852cef4c8b4c7a694edbf6f0e103f3ae7f046a45945c77a1a85ec8dad3423636a89058fafc6628aabff4dbb95c2ba +b424ffc94e06e6addc90a6324e0482814229b5902e2a266d0c2d716e40651b952bc9f00d7dad9b6050377a70a72c7f24 +b2cafd908cae0ca22eaa2d9a96175744897a20eb7b0a6d43b0098cb1c69e3cb55373888201e4ed32816655eb7d8a3dd7 +b61177ecf1ae9d7e7852d98cbf6080d9f1e33c90f2436720b4ea4690437e8c7850c3754768fc1312cb4e838d855c5ccc +81b486644e1ae22cf0ba3a37e1df34dc186c82a99ab35ad6f475c37babdea574ddfbe5811d4aa020581292a793d66bd2 +97ae848a823ea7a99f91834e537fb47208f616c08fe32c8f8fe06bd35c9b638698c513265d0b4de9e572a2f9692b98e2 +81b8fef4ea5d399c65e78f40e47c559ada86d890777c549ce362e7ab81b3bfb00d5ff4ae4ee30fd7bda7ee90d28f85d8 +aada6912cc748923ea40bf01922c06c84bc81b2ab0bb3664a0579b646f03d47ce88de733ac7f2cb9be4a8200584cdb71 +89b48b9c79332f8f58eac9100ada5bb7decdc4b1555c5d383e2c1ce447efb0ebdff9c50bb52bc3042107f33a61ab2520 +a32ecca8b870b2b6e9d10b5c1d8f925b3d629d271febad65abed316262bb283c60cade0e91047fbd0fac53ac6db372b9 +b829cd1f13409e3573a8e109c9541b0a9546e98b6c879a11152b5564477ada4d8cb4b3079040e05a5cb63d75ef11eaab +91f3b100baa19e960b170fe9e03b799faac5b9c6f305c56115940bf81f6e64dcb9cda77e8de70ed73a21c0e8a74acc58 +b25b5e872c84065aee04822bbcb4f3bdff57fbd7cea314c383765cc387786c17de3d5bb3de3ae3314bdede14542bfac6 +a89bea9eca1f5a17a3efccfa4987d8e5366b0dba70ef1fef43aaea83c528428d1498c8b056ac27f16e8946ee93f7028e +818a1f7b0b8b06ea0514d6b4a0296da4f69cb18ac8e48c5579e6ba2880b06215fcbe81672566b8b94fcc3c0cadecb191 +98dd6e6b4b4d63d9aa7464a2be08ae8babac4da7716a3f109340bc9187d59c6ca0c88e6877a67c65096f64a3ced22a4b +a2069c5bac4f6590042aefb37570cc20908b0df9d0130180f565ed8a53b4ea476a274de993561fb4d009f529fe7aa1cd +860b7ec2410f033a7b0c5ca08f88a0ad29f951a5ebd5383408a84367e92f1bd33bee3b87adef2466b7e33b47daabf30e +a408855a8414102c3cb49f47dda104edf0887e414723da59b6b6537ada7433529f6a4d1a4ad4fe311c279213cdd59356 +8ca0d81dcb43b89a4c6742747d29598ede83a185a8301d78c6e7f1c02c938441360a1ab62a5e571e3eb16fe17131cbc0 +af7875a495cb4201cdb26e23b7c76492f47f8dd4c81251de2397d73d4c8d5f419cdbad69ba88ef0dc3552e460dbcd22e +80e901e433dca34f3d386f39b975e97f7fc16c7f692808221fb2ee60c1aaa8db079cc48c7d72fd548aaf8dde8d0b8f05 +b6062319e13926416e57a0ffc65668bfa667e708a4e3f5cb26d8a6a32072f5b790d628052d5946c5068dd17cf4a81df8 +90094b569e8975f8799863798912dbf89b12d2c2d62b3e5fac7efc245436fcd33af23b8c509ae28c6591d3f020966e06 +a504f72d3d06a0c9b188a1035c7c6d80047451c378b6c5b2ffa1f8cecdb64871cb6440afb296974c0a528e5e563061a1 +959061c4924e133a419e76e000e7c62204093576ff733ce0b8ae656ec6045ef94c5a1f3c934fb76fa9188c5eb397a548 +a8b9d0b58de38cb86cb88fb039a7c4c0c79e9f07f03954af29013baa18fc2633883f8f9ca847209c61a8da378f9075d3 +b16d8341da4ff003ed6d1bbdb3be4e35654a77277341fe604b4c4e4a1cb95e61362094fb3d20ab8482ea14661c8b9852 +8ea4ca202e3aed58081a208a74b912d1a17f7b99a9aa836cfaa689a4a6aa9d9fbfe48425cad53b972000f23940db4c5c +96a372f55e9a25652db144ec077f17acc1be6aa8b4891e408f1909100cd62644a1c0296a3ddc38cd63ef46bef4e08462 +87df40018ab3a47c3782e053dbd020f199fda791f3109253334a71be4159f893a197a494de8f94d6f09efa5811a99977 +aff82d2ea6b3ad28d0ca1999a4b390641d727689dc2df6829a53e57d4f6418196f63a18495caf19d31fc23fdff26d5e2 +9091053c4a18a22d13ad309313b6d2133a96df10fe167f96ec367f9b8c789ecca7667f47d486fc5ba8531323b9f035ac +a4842090515a1faccc3d8cadbb234b7024254eba5fdfcef0d15265c7cec9dc8727c496ad4e46565d1f08504c77e511d2 +b1d8a37b1a97883d5804d0d2adaa8dbf0c2d334ef4b5095170b19613fb05e9c648484093d0c70d545cf9b043b449c707 +b1ea40f3dd1c3d437072f8adf02c32024f32488dd59389d1c3dfe78aca3df0bab7767f6ded5943cc10f50555da6092f5 +ad219c6a8149f10391452892b65a3268743baa7402736f810a35d56cdfed83d2172b03f15c205f0dc5446baf855907a5 +afe44c3e1373df9fc53a440807fa6af8ebc53f705e8ee44a162891684970b04fb55d60bc2595626b020532cb455ee868 +859ae154b017eae9be9da5c02d151de747cc23094d8f96d5db7d397e529b12fb55666f55e846e2bbe5e6f5b59c9d8b05 +8aa01354697de23e890fe54869cd3ec371f1be32064616ca3a556d3019541ba8e00d683f1396ca08e48988f7f7df5de4 +b8f682487460b9d825302c40a7d6dd0353ff43bf24cd8807cdfa46c043e3f5a7db182b27a8350b28e91888802a015af4 +b6d4d6c3ac40f8976b50be271cf64539eb66dc5d5b7cec06804dfe486d1e386037b01271cf81ef96dba5ea98a35a4b43 +9385a2fd1cd3549b0056af53f9e4a6c2dfcd229801ffda266610118ade9a568b33e75b6964e52fcc49c8e3b900e1e380 +98f4aa0e4ef039786cbd569536204e02b0b1338568d1d22bb5bc47b5e0633fb7ffe1da93eb9d825b40b9b7f291f84d51 +b7b3460cf706dc270a773c66d50b949dabad07075021d373c41fbb56228355324d120703e523ea3f345ef7249bfff99d +81b826255f95201987513d7987cdc0ca0529524d0e043b315a47583136dbada23a114d50d885bb3f855fa8313eff801a +afdc6c35161645a14b54f7b7a799910e2e07c8a5efe1827031a2eecd5d9263b3baa367fdd867360fabc41e85ab687e74 +817b361ce582153f2952f3042e235ee2d229e5a6b51c3d3da7bbe840b5c6ec2f01446125045848d15fd77dc46c8a8fe2 +aeb599265398af6e5613297d97d2b70222534590fcbd534d68b24a0289b6366ac8188b753f6fd1000ee73ef44f8fb7af +a5a9e528b606557be64460c1ad302a43e741357827b92ddc50766a7e6287740fc23bd528d9faf23345ce8bff527d5bc7 +a8d3b3b438d5f75efaae6ce7b67c2212899ece5b5bdc9bac655e271fd1846ea8560e646fdbded3d9363eefe29473d80d +984c7976d557e2f591e779c2885f5033da6f90d63a898d515b5da3adbffa526764cd8eb679b771573fdf7eed82c594ec +8ac748689cc3280e064807e68e27e234609e3cc87cb011f172204e1865ad7fdc78bec1672bd6e6fddcf4e7902b0f38bf +877bb392059540b1c8f45917254b8cc34fb7e423952bdc927e0a1622efec4113fa88988686b48134eb67ddebcb7c3ef4 +ac04b154ccd307ca20428091585e00121b61bae37b22d5d2a1565bc1134be3c81ccf3715fffebe90744164e5091b3d9a +90745c04278c3a47ceea491d9dc70a21a99d52648149b1ab623b5396b7d968fd3c4d1a2d08fc5638e8790463e0cf934e +80bf26ca7301e370f101cc69e7921e187cf5315b484fc80a872dec28bb65886569611a939958f4a3d2d3da4350011298 +87cbf4d6f0c06cc5f24e0f173a5f2f9bf2083a619dcce69a8347c1a6cd1d03325544610f2984eb87a13241e6ab9a22b7 +8909368817a515789ff4d19ed26afafa5729a24b303a368ea945a9287bc9facec9e1c8af19cbec8dab4acbb6a6ddf6c7 +ad8d2f82b08e0990dfd6b09fd54db3a30fd70aad218275550f173fd862347e1258a4716ca2bf4c40e4963850b2277eab +a9467ceacf9337cae4f2c7eeb3e03752ac7d77692b07d5e5d75c438fbe7dc2029ff84f7759372a0ddfa953b4ec7e9e38 +a5feb7669e84b977cb1a50ff3a39c28f7ad1ecc33a893fdf1ddae7a0d8a4c5f6fbaff25cc56631b708af038a961f3b55 +8f2e1fa07963ba18db890b44c3b9ae7f8992b702a5148679df69e4d9d4b1c082b2bd2ae53f96a4fe24b54f3dc1588f17 +896778f35cbecb43f001277c306e38a9c637275101f1a09546f87378b10ccc025644bc650b3b6c36e4fd0c09fbb3df35 +91dc702778176a4d089dc65502d703752dd9a766f125ffef26bdc38fe4abcae07cdea14102c3448d10f8dd6c852ee720 +a5df3004cec6b68b937cadded0dd2f48bd3203a903a3e1c22498c1193f4567659ecaaf3deb7ed7cf43796da9188f5dc6 +b18b4c8ffcb8599c24d9851abf8ee43047cbd4c9074c9cfbf88376a170da4554978988f550afde8a45306ca32713c204 +8370bc38c84da04d236e3c5a6c063e1db6613dcc4b47239d23efdcb0cf86846955b60da3e50f17b17cd3f7e0c29302d9 +ab7d6bb6be10aa52ef43abbe90945e78e488561afb959dc2fe768f8fd660d267c7203a2b7bdfa1b44cd07898f4849e06 +965c96047d82d76ec2cfe5035fd58d483cd2cb7f65c728ab3049562c5d1943096d6a5014c05babc697d79c07907cf284 +9614f7006aef6f0478ebd37fbf17276fe48db877394590e348c724059f07c3d1da80d357120d3063cd2b2bc56c58d9d6 +819c7b2a1a4bb4915b434b40a4e86dd7863ea85177b47a759bc8ecd8017f78d643982e8a091ee9a9e582f2b0208725a5 +8e159a185b5790a3ed444b6daab45f430f72f4ac4026750cbd5c7cd7947b5e00f2b10eaaf5aadf8d23054c5b29245546 +b48cb6f6c0aaea04833e10d735b67607846158b6663da380ef01c5bca3c9d537611716867dc2259883e5bc9daed57473 +8b48ce8b5ab76b7d662c29d0f874f5eec178baf3f14221bffd5d20e952f54f3ed053182a486da1d1f400e0acef58f673 +b6fd3cba177bfbcb5e7ebb1e3c1967cad5848c09c615ba2a6c277908f8b1f4f1ac5f184c33f2a401e8bdafcaed48bb88 +abd8f44c4a447de8fde1c119f4fd43c75b4cc99de9c817a019d219d4b2ad2a73b60606c27e36e9856a86bf03e7fc861f +af9f7e8b3e9e8599c7e355433c503a05171900a5754200520fd2afed072305be0e4aebb9764525d2c37a5a7eede72025 +a0960a58bd2681804edd7684793e3cbb0e20d1d4bd8721b192baf9aee97266be14c4ee8b3a3715845dca157ba2fb2c1d +949a37213209adfbfa4e67c7bad591c128352efd9b881c1202cf526bf4f657140ef213acf0efeb827a0c51a1f18809c4 +9192fae84a2a256f69a5e4a968d673bebf14ea9a2c3953f69fe0416f7b0fafa5166f3e4588d281f00d6deac1b6ec08bc +b1a249662f34a88d2798eae20c096268d19f1769d94879b8f1aa40a37b3764349b8e6ab970558436a88a5aa5c37e150d +aea87086dcd6de0b92886b3da0813ff271a7107ab1a3cb7021b85172c1e816a84dbb1a8fdb47e8a8eb5e6fcddd5b919a +a586b5078b3f113eec9f074430bcf9aabe4e82752e5b421c6e31d1c2a911512e34154bf8143b5197e820c5af42aa8ac7 +a6eda122e400a6600f025daa383685a10f72f62317a621698bd0106b331077b05ac1afc68ece7a2e285c54a366921a3c +8875e9ba654ad7b1d57ede84e2b702600416d40f7475fe2df25dd1b95c0178a227ee187547898e5b9d1ce8ce9ebd15c9 +af2cb289f8c75f4ddae9e3ef9c1977fe4d4d513e411777b03b996f5baa372eb995b5ca96255fad9ace776168806ecc42 +8d24c465d26bd93290f45ef035bb6dde4530d9d7d051baf583b1f8b98e9886de262c88b5709084710cffa7c767b4c27d +8cf35b1b28a7726645971805170392d522f5e7e6cb94157fe9c122a987051c1c90abe3c5bdb957ef97b1c45dd9bba05c +93e2bbd82a3cb872cea663f9248b21d4541d981f3f8d5af80a43920db5194857f69e2884753f6ed03b6d748dbfb33620 +8b774b97657db654ebdafce3654d645f849203452e876e49dad7af562491cb6531bd056f51cb5b2e8f0a99e69bd8566b +b5333c49d3e1c4c52f70f3a52f0ad77165bed6ad9dcbfaf1364e7a8a0f24570e85a218e4c2193f63d58a7dd975ceb7a5 +b4a34c443e4fdaab8e69fcda1fce5e72eaa50cf968f5d3d19084d049c5e005d63ab6e1d63dee038317da36f50ffb6b74 +824a224009c6848b92d6e1c96e77cb913fee098aaac810e2c39a0e64d5adb058e626d6a99be58593d921198edd48b19c +a86f1fdd2e1ba11ebda82411b75536fc0c7d2cdb99424e0896d7db6cae0743ee9349ffa5bff8a8995e011337fa735a9d +b406b5b89b8bed7221628b0b24eb23b91f548e9079a3abd18be2ed49baf38536a2c1ec61ab1ddc17928f14b006623e7b +8a7ea88d1f7420e2aaf06ee90efa4af798e2ec7cd297aacd44141471ed500107fdd93bd43b6de540314ef576646a7535 +a7a8c071e68bbae9aca110394cf56daad89404dff3e91ea3440670cd3d0423b67905e32b1ba7218fd4f24d2f8bd86ce7 +b959830f152e4d31c357be1ded5782aed5d6970e823cf8809434cf4fddd364963bc7cfda15c8f6b53eda16ab20ca3451 +b59232c8396c418238807ce07e0d248ad2045289e032678b811cc52730f99b480eb76f6adf985e6d5e38331d4bb2b9d5 +a14092fddecc1df18847ab659f6cf7c8603769a4e96fbe386d8303b225cebbbe8f61d6ab3dca08e3ed027e7e39f2641f +941cb0632acd395439f615c6b4b7da9ed5abf39700a8f6e6f3d3b87a58a1a7dbb2478a6c9ff1990637ada7f7d883f103 +951b8805ecb46c68101078847737e579206f2029e24b071bae6013e9dde8efa22bce28aa72c71708caf4e37f9789a803 +b2cbf22e53f6535fa950dd8de4aa6a85e72784dd1b800c7f31ec5030709d93595768748785ff2dd196fbedf3b53cd9d7 +8d84ea3a7eafb014b6bd6d57b02cab5ac3533aa7be4b86d2c5d53ce2d281304409071100d508ed276f09df81db9080ea +a2204b60836cba8bf29acd33709e6424226ae4d789ef6b280df8a62e30d940bc9f958ff44b5590d12fa99fcde2a4a7a9 +86692c58214f326c70eb2aaf2d8b26eae66fb624f143a3c144fd00f0249e30e0c832733a7822fac05c8fe74293768ace +b1cb3d64eb5b9ca0e01211128f990506fba602cd1417da02237205aa42879ae2a6457386da5f06434bcb757f745f701d +b3eb4290a53d5ff9b4596e4854516f05283f2c9f616ec928a0934b81c61afc351835f7eca66704a18a8b6695571adb30 +b0bfb1d44b039d067d7e0e2621e7c4444a648bce4231a6245179a58cd99758ec8c9e3f261d0adb22f9f1551fceb13e4a +a29320f71a9e23115672ea2b611764fe60df0374e0d3ff83237d78032e69c591a4bdec514e8b34f4b3aeb98181153081 +8a6abe9c8a048002b2ff34154a02c2f13fc6dbae928da47c77f3e5b553ea93d8f763821a6ead3c6069677870fdff7ff3 +b73ab66a62f427e1a5e315239a2e823e2a43550d245cff243c2799eb2e4701fabb7d5f9ce74a601b5ee65f6555dacf64 +b64858e98b9c10de8c9264b841b87e7396ba1da52f0f25029339ca1d13f7f9d97f4de008cfe12a1e27b0a6b0f2c9e1ab +807d2440d1f79a03f7163f5669021f3518094881f190cb02922eb4e9b17312da5e729316fe7ba9bfffc21ed247b033cb +a7f06458d47ebe932c2af053823433a8a06061c48f44314fad8c34846261c8c3f7f63d585a7930937327ad7d7ca31a6f +82ac2215eba9352b37eb8980f03374f5e0a2f439c0508daa7a32cdce398dde2a600e65a36795a4f5cc95bbcf49b01936 +a1882c83a2f946d54d74a008eac4aed70664db969e6799b142e0d0465e5662ba0d224a1cc33be339438d69bdad446ff6 +8009776f7a34a3c8779e21511fa409b0c5a38e172d1331acc29a16114e002f5f2f001381adb5fb3427a100752d775114 +b24441019af4a0df2dc68e3a736f358da0fd930c288398a18bb5a8d9a1e98ea376395f19d8e03a5f020b83fcb709f1af +ac72b4de3920c4f3c9b8ea90035cd7ed74d34b79e79aab392f057c3e992ebe79050cc1c6ccf87120e4162b29419147de +973e75577cd2a131a0bd568fd44e43554ac5a9ea3bf10f02d1ad3ac6ce9dc7a8a7ea93aacf3325f7d252d094a0de1376 +98a114de2a86f62c86862de37c328bf6a7fccff4d45a124addbe0eb64debe365409fcb72ce763f2a75030e1ff4060c64 +aff753e1dd4707f1a359eaec06ebef1903242889a2cb705d59dd78a79eb5b894731f5a91547479506145ca5768877dec +b856e4234858b5aa515de843e8bd4141c15a4cc02c51640e98a8aaa1e40344f1ff8ef7c3b913ea2ae7411713daa558d2 +863525eb2f8147a6d1d0d4304881795bfed348913cd7f38d815d929a426788b69e41f022dba5fdcaf56c85720e37fefe +a14ad76b145a6de2e0f8d4f615288c1512701a7b3010eb8a95941a2171bc23561e9c643764a08c4599040a3b4f5e936a +a18bfc66f6139dcb0485a193104fec2e7d52043837a4c0cadb95743e229712a05cf9ce4ccb482f36ff1ce021e04b574a +991c8e6678077d6e5f5733267c1819d8f7594e3b2c468b86a5c6346495a50701b1b05967e9590c15cef2f72bc10a38f9 +a034e7f9b547b047c99b99a0dd45509b0ac520d09130519174611de5bcdb9998259e1543470b74dcd112d0305c058bad +95ffe0d02317b5c6d5bfddbcec7f3fdfb257b26ad1783bb5634d983012e2ea1c6b9778009e1b6d10564198562f849ac0 +b3db442aa4adb33577583b2a4ad743f41efe0e1f87bfc66091d1d975333ffc00b4afc43057bcb88a7d68b0c9695d38dd +ad2e97d10d7c53d231619e3f2e8155a27ea4f2fb3c0cecf5c7f14f4cfcdd21f62ea46d843b21df748b2892131633fed2 +905d7aad6d3b56bad48694b6b20b27e370ebca8b91d0821e48e2f9cad39910c26cc11c77c266894db3d470485a63ed11 +99bfadefca796ce6af04ede65ba5ef5bf683ff7e2852bb9c406fda77b95ef382289853dfe4d933525071e4cab8ce3936 +94d9905ed4ef92107d0adb9ea38f085a2a24b8f792108bec702d747c215b1f14aafd486ea0c07ed42602b12d8f602b93 +a78dce23ca09dda2d5e7fe923290062546825286d624de35ac5756b6c8ae030e211f4f9c9c8d18a924f5880e3b383d1f +abce9e2128ff51fa17e73d93e63d7134859b2f328eedbcefb337c39e752d6750d9cffe6abfcd359c135dc5a12018827b +a9ea7d91e8a3524acb3182bedd7e1614d37b48f8eb2d8f677eb682d38408b8d512786d8bb65811f4d96788b9378e59b3 +912c9f804fb57dd1928f8274be58b42618f589fc72a7e5b6cb4d4b5d78c547f80737cdd77ebe5d2b71eaf60b8fd2b663 +b7227ec9a62d5538974547f717fdd554ab522d8782667fc3e9962e9c79a21134ef168371bf3b67e28d0964e92cf44028 +89440a781c812a19c758172bf722139598023ed0425374fbb0d91f33be7b7f62a36d7aa34696c4fb0da533bd5dd41532 +b31e4a9792d6e9c625c95aa3c0cd3519410dec07940afab820ef9f63017415d237a47f957d0b591b6de399ffc2a8a893 +a66ec47393df2693be161daaa88be0cf07b430c709ca97246d10a6080ae79db55c9e206b69a61f52512b868ba543e96b +90ca425dee74cc6a7e8eb1755cf9b7b76ba2a36ab851333b0fb7b35e8e6e189702456f2781ad87b4215993d62230ff4f +88b64741f93a2ae5d7b90b22a5e83c9d56bcee5c6bfcedb86f212acc776cc3ebd0b62cc025f596cd8db4f4b6a7aeebab +a1b6c7d2358bb201b42264f8fbebaa242ef105450bab21b4a2f16f368048c16ad1f3695841787eb33a0192f1f6b595eb +8a932f1cd227ceb18389791ed9ea1ff26571715ed1ab56601a994795713a8f7f031d1e8472ec3eb665b7bfbbca8ca623 +8bb2e34a2bf77f9f657dfc51ff296a6279a4d7d15860924f72b184fb7d5680320c7769954b9dac73c4bfe9c698e65e58 +af54e7367891c09f2cea44cc7d908d37d058162ec40059d32ded3983a4cabfe5057953878cf23bfad5292dbd0e03c0e1 +8a202532b9205385cf79f0299ddcb3156fd9fab09f9197bce762b5623f75c72ab1d74334ee6f0d289007befe222bf588 +83bd0f5896eaad58cfa7c88fc5ed505cd223f815dcfe93881b7b696cdd08b8b5ede03ea5b98e195c1a99c74ac5394c1b +b4a84d9940e58e3b4f804e4dd506f8c242579cfa19323c6e59047e5a1e35150699a2fab2f4862dba2f0ee4ed1d8970f8 +8c9ec477d057abebc2e2f6df5c4356a4f565bde09f499a131967d803d4bf36940ca2ed9d4a72adbe0a4a8b83fc686176 +8598f43c32623fd5b563d1ec8048ffc36db3d7f9b3a784299811687976f64b60585b2a2707050a3c36523b75d1e26716 +b55eb07014fe5ad3e5c9359259733945799e7429435d9bf5c72b2e0418776e329379433e17206f9f0a892d702a342917 +a5ed942eda7b36a3b0f516fafd43d9133986e4c623b14c0f6405db04e29c2d0f22f1c588150f670dbb501edda6e6dd4b +92b6abb28cefab2e332c41c98bfa53d065b7d262638389603a43f4431e6caf837b986254c71f7cdacf4d6cc4064b0195 +b01806178a28cc00d1561db03721eef6f6539676d93dd1fa76a13b42a31d38797e99b1848de92fd11821a342b04f3f72 +a2f10303437acfbb5912e186bbff1c15b27ed194c02cbc1c5b482b0b732c41fa809136e8e314e26b5bfe57690fe3b250 +9990207fcc711102e7e941b3ac105547a3e7301390e84f03086c99c6d3e14efff3a2e2b06e26227f496d88d5cdaa3af1 +b903cdb0c2fd578612398c30fe76d435cd1c2bab755478761244abb1e18ba8506fd9c95b326422affbcaf237309959d7 +99e0c12cae23f244f551d649302aac29bfdeb2c7b95578c591f512ad7ac562bd47e7c7317ac9bac52c9ea246617bdb48 +b996d267ab5149c1c06168ee41e403be83f99c385be118928d6e2c042a782de0659d4d837f0c58b26df0ce22049a5836 +989001b8414743765282f7e9517e4b8983a929341b8971d7dd8a87d246f6c8ba5e550c983566ddd932c22948f4fa5402 +a0b006a2c9124375364b8fc5ddb543a7468fa6d321ea046d0fd2bfdaef79e5e3600b3d56190733491ca499add1298c7f +80881d6f3ee507089b7dfb847fc53dd443d4384ef6fce878d07d9b4a1171eefea98242580e8a6a69664699f31e675cfb +adc48ef53d88b9d70409ed89cc3be592c4bd5eb65d9b1b28f2167dc4b12406889c00f2465c554f3aff673debc2997ccf +a62f5d9f167b9f4a4aab40d9cd8c8a48c519f64a1985823e20e233191b037c02e511b0280487112a9f8b1f1503b02db7 +b89aa2d4fb345a1d21133b0bd87f2326eb3285bd4da78b62174bf43d30a36340e4217dbe233afb925ab59e74c90fccf0 +932ba22acdd2f9d9494da90958bf39d8793af22417647d2082d2c3e6a5e17a2d14b0c096139fa8fa3f03967ca2f84963 +b67b107e71d96de1488b4154da83919d990502601c719e89feabe779049ddf7e4fb7e146eb05e754b70bbead4449efb1 +84509de1b8dc35aa2966d8a48501f725d59b4c65f3abf314b2009b9a573365ae3163c1f276708c66af17de180aae0868 +849153fe837a33fcb32c5fa6722c2db9753e984867c112a364eb880d87467782142d1c53a74b41df1dec7e900c877e1f +903d05c73ae043b69b18e980a058ce2254d008647a8d951175b9c47984164b34fc857108dcc29ad9df0806d7e90405f4 +a6b05917ac32c0b0eeea18f1ef3af5343778c543592078fdf6a1b47165013e2676bfe6a592a24efab9d49c4bd92b8fc0 +8648482f6947a5a8d892a39f098160aae1a648cb93e7724ea9e91b0d1a4f4150b91481f6e67d3bf29ff9d65ba4fa61a8 +a6ecaabc38895013297ae020686f04ea739c4512d2e3d6f2d9caf3f54000fb031f202e804ee615eb3357714a18657bcf +912f5935acc2dd20d5ef42b2ad5b307c925324a84a3c78ff66bc5885751934bd92f244e9636b60a744d750a2a7621198 +a0d6f261a776c5b114298f5de08d6e3372649b562051ea2470d3edfc376048793e18fc57ec84809b463dc72496d94329 +940744cd3118d1598c248b38503f6f1fbdbe7a147e683e5b3635140aa91679f8d6c1472600f8e9c36117a60203be6b4e +ab81737c839fe340f6f1fb7275811cb0c0d5fe8bbc265f6a56c6c68d0291bc7234eaa581ff26f8929d9a5bed4aac7002 +8df47341160f1c728c3e31be17a32e42b54faaa1286ef2c7946882ca4dd46443b8428f3654616c6e4053f1cda2e11994 +a721067e75c3c791f4d9f58d4810ac9621606e29c6badb593d6bb78c39968b45be1777ddb9bf03696d4d4be95b2dc1bf +a4e399213d3c4350c2d0cbe30757ba7e1f9680f58e214ff65433b36232323744c866a87d717851ba1dbd6769599f69a6 +b0be851d1e43dee27abe68f85e2330d94521b5f1c1a356ad83fcd09162c0ca9c2e88bccbcc5bacfa59661764361867a3 +86111bdd3dbfca232aa5802a6db41d639502e43a2e24cb06bb5d05c7f9b5ccac334d16b61d1c5eaac4fa0cab91113b46 +a4f805b11c174c34250748b9beebfb7c8c243198fb13463911906ee4effe7d331258a077e374b639a0c5cdcdff166b7f +87e4cf2c6f46d2dbac726a121127502921decf0195d7165e7bbeec6f976adb2d1c375eaa57f419895a2c70193215dc4c +8ff06de2c1c4d0744483bb4f7c5c80bf9c97b4df23e86c0bb17f1498ea70e0ee3af20827da5e8cb9d7f279dc50d7bd85 +ab112c0116471b4dc3fd1e6d918f99158eb7a08153e891ddbba2fe5bf0eeb188209e3019176e758231c3df937438136c +a67f89194e99e028a5da57747268e5ef66fefb881144043429920d222d37aaf268ebf73ca1da659fcdac3b4e7a65092a +b4da1dcc791566140d6abeaa2923cb6b21a6e6aaa30bb4cc70011e931eefa71f96b7e05358c0654bad7ce45191ab9fa8 +8283933231bca359db588c80e043ad6ea765fb0cba5ef233c5d514ba01ddd1b409efbadb368f26763402e4576dc4655f +97f568ce3edacd06f3e31a15462f5f9818a8c3fdbcf92b1ac5840b0b6e73166a154013dd52e85a18e8ead3fc9e54aca0 +a9cd1601c41e5ab2018f986443914fb703ddb6b06a36c06fb58065f2fee8e1751071ef924ea3ad76f0c19baccb1b5f8b +92aad71bb7e929cc35a48020d16a5822f4f106a7f59985005a5ae5ba8e8016ec33727610393498f56b4f353b3d5161b8 +89427780aa4e7ac894c681fbe2889153b94db883f17f109bc9caa93f0c259dda42aab502bbefaf572c56f70abbc42db8 +aa8cf76ff847dfe59534432ed8520bb48bf412c28497747dce04d2b2a54ba843c3be1564630cb49ec0217167847ba590 +a1570a6748a2303e74a31c2131d05ab372ec006ee92ef74c42f2e9a250663bebdfb3777e7ad91f50c954889a59c2d434 +a4c2b1bbc48199c31ea8d8196729eab00ce0200350d4aa9f23347a3289355e5828cb2f93036a14d2d9ec575fb3835239 +84819d0bedbaab5bf8afdf23f59a7ec5f50da3063cfdd1ef5fc4ca4c1fe68980b5c80e30a49f38e5816765e81dfc5a57 +a57cfb5e877b88202f589be777605deafbfc85ed1357af03a18709cfb4b668a271199899243cd3750f1cb77ebc40bba7 +8d95934bbb0efaf3339f27cb96de46e4486aa58a2c40dbc77c1c3ac7c27a228062824b9045c046631b2e286e8549603a +b99a8356abeee69f40cb3bd8c87e8039a1e076897dde430bfbf989dc495c48609a7122bc6c1d1c32ccac687b47d5558a +aac2edcf2fe5d3f1a84e8f1f27ece920eabe7793bf0ed5290cda380752e55d57a55a362c5253bebb71e4a55f2c437ff6 +af7c76876072c3b0091e22b9c5b27ce99bf1f0079ea1a7816ad9c06e9e5fc407595c7f4f9953e67d86fb2da656443dc3 +9175b64d104f78d3310c9c02f82e04c8e9878d2044ea5ee9c799846a3d23afa5fa2aa4af7350956136c69a0eed03cb2e +b3328e953317494a3d976e7f7c3d264258a5d4b2c88e12d06786a9e7b2affd41086762ef6124c6a6e5b6b028db933c14 +a49d166065e19d39299ee870229e4a04be81acd6af3a2201f3a291a025dd5f8bc3e676ee123cd4b9d8455f6a330b395b +85fa15bc8947ba03681d87b50bd2f8238b1c07849a7ed4e065053fad46aac9dd428186a6dd69dc61b5eba6ffec470831 +b6fcb2f694a47d3879b374b8b2967dcd59bd82a5d67ae6289a7326c18791b1b374e12571e8c8ea16a4bfc5525ced3ec4 +b6115f52566aa90ccac2aab6d2dbf46eca296d047db1eb29a1b8a2bc2eef7a24e90407f8dae528806aceb2a1e684d49e +9707e66220233f6a48a93e8dec7b253d19075eaa79238e519b82ce1ac5562cca184f8a1c14f708a96c34ad234673d646 +a0822903fb3825eae07ee9d3482277c0b8fc811856dfe4a51cf24b373f603924166fc5485185f99c4547cd6476b62270 +88dac6366c439daaeee2532b2ddbe206132cf6e12befbb8e99870ac684e04e62de150cba0e22e395a0b858948f40808b +a72dfba9caad3179f43fead0f75e33ba5342470d8c9cb7c86d30d2c7ce7244a8aafd1d558b0ec8e2a9436de2c2e95ccc +8d696046defcc32cc19954c559213100f0ba273ea12abb55ca7c42818071d853846bd4213af2c41ecd4442f6b4b511b1 +89d6f2d52cf65414da15a2fb1911c53afbfb50bb5f2638844abfc325ff2651cd9130be4beff05dc4046adfc44394a182 +afb91abd7c2a9cfe62855ede3c6960ad037fe8778364a2746ff7c214c55f84e19a474a9a0062b52a380d3170456ee9c6 +87f724a16ec8fdae8c05788fa3f823ecc3613df46581a63fc79b58f7c0dc2519b6b23e3dd441a0ca6946dfe4bc6cd0ce +86760f90f6bedfba404b234e90fbf981d26c29b87f2fa272c09540afa0f22e6682d08c21627b8a153c0feb27150458e2 +ad4d0342f255a232252450ce4209507ba619abfd1ffcb9c5707cfa45f89be41d88f1837acea993a1c47211b110250b4d +ace54b5889bccdf1d46c4ca21ed97cca57f7d12648381411d1b64afdfc64532a12d49655776ea24cf5eabe34145705ad +936dac693d0c1b1e5de1701f0bc46aef6e439e84bc368a23c0abe942eb539a2950e8929265786fcdb18d40a44bda14b9 +94fafbc544decec1d489b9ad6b23410b9de4779f9f44aabd093d7fab08340a4646a8cba31633e49c04d2690b8369a1d7 +98157e757f1a677c5d9d65c47759727a4dbc49fec2da4d9889c4ea90573fb42e2a8d72eaef92b782ac6f320970f09363 +8eaa0498c191c810c7e1ca7398f7c80dd0a7e7d7829ed07039490f60e7c2ae108843c06fe38fa36d45d63da46cba887c +a0ae116e5b0d2dccf83f056ad876037225687904e0290fe513fdc6b2dbe4cbf5fac1d828352e64734895895840b3c57c +b592b318dbbd7ec4872aae5e64bdf2305db2e5e8cfe0ad77b691f542ba5e066dd20b09b0b08ff0d798bd79ad946ddf7f +879e50c8c3e7f414ad2b38632bc482b71759cd561aeb2215550186ebb4559e4cf744cdf980512d8321954b3458d21e11 +aed5c6c7ce0407d7b2c04785fcb9deadb9b9413e37cef5b1d918f474cccc7de012fe1fa6f5fa93cb7ef9ac974d9fbc20 +892274a9f0afc68fa74be276c2a16de5cec674193f96b27a80bbb9f3add163f85716b531f3c920b98577a0225f84e8ca +938fb7a53266b997a7669596577af82f5289b160b7fcf06d76eee2a094696f6f12b28c2c65b833a52529a116c42e6c7e +892083929b6067f5045b1208f3dc8f0ee25bd0533a8831f5c23bb4ff46a82d48f0a34523359df5061d84a86b718d5060 +99159ae9574df6c16273eda66b6d8b79a327940e335b28c75d647f4744a009f4b5f0f385e2017bd3e7fbf59e629cd215 +a03e5757ef7738eba32d396923ff7ef82db2c15bb6adc8770fcb37260b7bda3be62473bc352a9a2ef7ec8ebe0d7688bc +ae3c24a85c9b1fa55158b2acd56d2016f70dca45a23f3ef7e0c6b096f4a7c54c14020d61bec7c7f87be4a595bf254209 +a920a6f9cc803fe31352fca39c13f8ac1e8d494fcf11b206092227c2af38469b1fbc068b8fe014800b70f137107aafc4 +b893853be57519ffa6410da605e7d3a746ebadec4788c7907f6e0dde9f20f5a6a01181148b874b3decf9b4814846a11a +b46f43918c5195729f6532439f815d1eb519e91005bc641a4a30ae88700982bf4ed07a342e77945780317c297c903755 +8e431bf4497d0ef6538c93c4bdda520179301a0104eebcfd104efa1edea876818d7d31079656f01a5ff76c4f5fcd71df +92e3dbcb580dfb9cc998f878052b0c3be1c5119e5249ae9bad3538ebb0f0c4ab5a959b04033b96d61836ef07784e6b64 +b712d9d63aa888156f4ec83e939c6bad53de18045f115f54fbf4261fb02f10a8a46a8d716ab43d4acbad3b02283c32fc +b2334e776988b4f772446a47c87416b4f19f9b44164a5f828424d3f35ef10baa56afe810d49b0b86b786b9c0227681a6 +a3f25ad18e435ef585fa90e6cef65a8ba327e5e33701979e27e64ef7d8e09e2591e52bff9c5749d35643456d18625685 +adcfa48ae43cac6fa9866b4cce10a243969965942c891d5e6c0e5b03bd4763f9b63779fbf40d26ac674534fe7cc478d7 +a0eb3448e045038740e2ee666e88aa0f8b8e24b1b55d7d4964f01bfc0c581f7e9d4c0e79f8cfbfecfa8b024b216c8ea6 +8110aa1d82f11965af4f4eedb4de09ee9c353481b2d7ee7a2bc2f302d2a5ae6c31ebc6451309ba7c305da41070b0f666 +b074fdad419d42783ebda17f19863aa499eec71fda5aab6cdcc389276b7bf08053795d15890175ca3dc89f6d8d17758c +a14665846d95d7d5f0b5381502080c822776ec0994ccb1ae1ffbb3f19205ce9c7c9bf9c2d2ca098807ce99f29e4f07a0 +b4884842670a333cb5548a842fa2971881e26b442dfab0b91d6bf3b4cbdf99adbbc9d14fe2bb46872cfcabedae85db30 +94549b01cb47ba16c0cf6f7522c833545397de0b3388c25d03e60132eddada6401682f9ffd8c50d1a61b4d2dde37461f +a790c9b4cec96e4c54777f3e03cea5769b20382cdcaf1de494bac2b9425eaf453eff643c62ab284cc1af33bbd36013be +b1b45fd298ed11609aa1ae6c5ac655e365bb451de1b9fc92aad40422ba85c6a454f33b8142acabe55171328c13d92edf +a74cea9e7096e38327064f058a3cdaa34e6eafaa9c7d58f753c40be67998152380fbd612b9dc0751bda7befcdffcc749 +b18978dfc5efb07b7ef992c7b0cf5d1b4ca551578b1dd13057b7aced8b1deb9f2036e1e3116248a803e922659d206545 +8153c07603cdff6622835a9853b795274390abf7197d7a192193bec44acb43e8cd50b56c11a03f4a2a27124c36974f3d +86b987f30bb9a37cc91d22dffffcd346ec5773e846a6c2b8f9e03b25ffcae859c470c901c4e29695d325dfe4eee927bd +af5e980b9507d10d5269c1a5d02bc16f4f009b663e413ea6a7c655250f3a21c608c12f4002269a05d3779907e7be7d69 +a6f737fab2af9f27bfb8ca87f5fdab6ad51e73ccf074e90576db57b309dfa0a95f9624526dfa4feaef39c388802f2ae9 +b7ed51f699f615f58a7ff4f99d52c4ce7a8d662843c1f4d91f1620fa119b80a0f6848f9fb6c4b9822dc019830e7dfd11 +b71f27f291aa6ef0723ed79c13a1c7a1c40198ffb780a129d9d20e250406bc91f459705b2b6674c9bb412a7b5dd9ff07 +9698cf8f638c3d2916fefa5f28c6050784479f84c2ee76a8aeda7e562630a6ae135b445ec4e29af8588ca5ad94a67f49 +9270aa5030966a9990d8bc71b00b9a7a1d7c1ad8f4c7f78a31b3d7f86467332f21407c74a89ba4f574d723acaf0d2042 +b1b82faceed8e2297cd49cc355471d15ff8dc2ccc78f6944c8f7a75d3ad1629a2e2f1d0a2ff7fa2b3c38cd19839aa5e9 +8a8c4ed49dc9bd961773edf8d41d04385b11bbd3577024639a39319cc7068380236bf73fce0b83e6535bd3f95cef0e65 +8d04ec1e7d148b7e66910ab45a0e6bf409612a3b560bfa784e26f2963152821c646a655cf17a0ce3d4ba4c4ebeeb4a1e +8e9d707f6186d93accb60813715ed1f6b3001ff6d2f87daf8b906bd0b988c1833b2ccd80dee9bdefb45901e81bb82971 +9762317ca6a5e6fe0b2991e0fa54b5fbf419dd0550d70074957d65cd7ebf79ceba607dd40d709ed635c822b3b4da2cac +82b53cd9a1eca2f5d3256723dc4b6531ca422bd87bab36243c727d1952db58d7288ab11467305d875d172ce165b1e4a5 +b4dbeafa05c87029ae257bee1ed7603645fab41f6ba7ac8b57ced5b4774a72ba3e671c2433a93acc3c498795b5cccc42 +a916d3ab7f0e7cef294e11c97c910a19c338ad8e615406e6d1c8995b4a19c3b2527100cc6b97a950ec5a4f3f6db7d01a +b9a785c7123609bdc96f8dd74500c6c77831d9d246f73244de964910b4045ce3242c881271bb1a4bc207d67de7b62e97 +b5f94084f695d0821c472e59c0b761e625b537c8ae3a09f11d9a57259e148cfadba1e43bf22c681b6b32390121cec208 +8f91b36d8570f19a90cf3ed6d5bb25f49a3315ddb566280c091fe2795c4e25ed2c6a1ef8d2669b83f2d7bb78fc8c40f5 +80f27359a73ed8fdd52762f0c7b9f676be2398b1f33c67877261480bf375f975f626c2ca3e7a9f59634db176ed672c98 +b96b91e3d5148ca793edefe4ca776b949c9305acb6f3a3cf87767a684014d2c8f2937c2c672eef8510f17d2da5d51385 +99c4e1ca2cabd4388ea2437dbdf809013d19be9bd09ff6088c8c0cfdb9ecf8fd514391a07b4288dd362434638b8834d9 +b6fdfb812e145f74853892c14f77c29b0c877d8b00055fd084b81360425b3660cd42236ecc853eadb25253e1cd8445c4 +a714af044ef500104576898b9409a9a326ef4286a45c3dae440bd9003fdf689c5f498f24a6f6d18502ce705c60a1cf14 +a9444e201be4a4d8c72119b3d3b13098afee6e5d13c5448fa2e9845cc9188239778f29b208749c960571dfa02b484f05 +91c826a6b8425f93ff395d9fdfa60dbfa655534c36c40a295906578540b9a0e6b94fd8d025b8b8611433022fbbc4fb0b +a355d76bc3cc48ba07026197130f25a593ec730d2ef0d5d2642bfcad745ecbe5c391324bc2485944060ff3100c952557 +b5f9b5a289a6f9a7252cc1f381c892bdb6836a5998f323ee21ae387936148ad1ad7cc6eca37ecece36404b958ae01e8e +a3c7ae04a6208851f6cc40ff270047283b95218905396c5dedc490e405061cbefd1251ecf77837d08c5ec1c77d2776ce +aa02ee387dd2cc7a23cf5cd582da0bc84bb33a7158d76545cbd6e06b26a6f30565dc712d7a8594c29f0529a892138802 +8aff025c841f167fadaf77a68284c355ace41d6df3a9f1e41a6e91454b336f0b69ea34cce495839b642a7c43997a8fd9 +82eccf0b6b4b6460f676d677266451d50f775446df313fc89bdf4c96e082340f6811939d215a54ba0fe30c69b3e43e25 +af324d871b038ff45a04366817c31d2c1e810359776fb57ac44907c6157004e3705476574e676b405d48a48bfb596f59 +9411dcca93ef5620ce375f379fea5c1017a2dd299e288e77b1ab126273631a299d7436f3bf3c860bf795e5faaaefa804 +934fca809e66f582c690c3778ea49de2e7940c0aeb8d7edad68f2edccdfda853d2c4844abd366fbc2215348935e4b2e2 +a1b1fa4c088418f2609d4dea0656b02a8ee664db25f40d53d8f4b1be89a55e5abecbf2c44c0499874abeb3d3a80acf71 +ae6ed7a0ba6280c679b0bf86111afad76fc5d930e9fb199df08134ba807f781d7e0b8b9b2c8c03b02d8cc20dbe949a28 +937d200a72fe4ab8d52f6cb849e322bc5959632b85a93c89744b33e832e8dcf1dddd6ffac0c049b03c105afb8930f7f5 +b4b4a46ebe0c5db16004933c08ad039d365db600a13d68be5346b1c840cce154f56c858874e866de8c3711e755c6e5dd +afcbcb7170c8caa2b77d2b3388dc2f640aeb9eff55798aeceb6eb6494438be05a2ae82f7034b2d439a45ad31d8c64b07 +a2c676273081b8761f58e0b11306ddb6a4cde3d90e7c47b434468700c5b749932819b01efd7637ca820e10fc28dfb427 +b445715162d834c9ee75ac2ff8932ace91c8242d67926b2a650217e4765e0531c2393c9438a52852d63dbbe2cceaafc5 +a0c0ebdc1480fb238a25fbfc77fae0db6e5e74b91809f0ff20a819e56b8c3141549615d1bd7b99829898f6028e8c86be +b3d11933e9d1db8ca617934261ed26c6f5ca06ba16369e7541482bf99c4f86520d43fbb10f4effb2fdf3cc70a189fdb5 +888ac610f8fd87a36b5646e1016eaf6dbca04aa0cc43f53a1046d74a658c4d2794606e79fb07fae57cf9d71ed339f4b6 +979818dab00c58435dc0d0d21185943f95819d2a13531abd2d798e1773c4bbd90047f4eebe117868743db75604a50227 +a6fbcd2656e475065fe44e995e8e2b5309b286b787a7597117e7acc3bb159e591a3e7304ef26f567b5720799d8ae1836 +a03f0ac08d2101ec4d99ca1443eea0efa767a65448a8ecd73a7818a99e863a04392bec8c5b8e5192834e8f98d4683f13 +b3c4ea8c6c3ee8aab2873d446ad702000b0e927e0991c9e30d83c6fe62a604efdc3ac92453313ff0d5e0ac6952922366 +ab25c857f26830631113d50145e961441b5e35d47b9e57f92466654dffebde43e4f78b0867d20929f97c2888c2f06509 +98950aa5a70ef41f274775f021a284d4d801a2efe2dea38460db8a3a8c08c243836d176e69127c2cd17497b0ca393e9e +a9698113febfb6d87fcb84bad82ce52d85a279d3a2933bdd179d53cfe8d6c6c68770e549a1e2947e7528a0e82c95d582 +832b504513266259db78478bd1b5a3b0f3bf2c6d25f1013e64bf0cfae9dc23da8ecd25f7f1047d2efb90e5f1d9b4b3cc +b588bba7bcc0d268ab260d5c1db2122cee7fd01583c7cc27a8ae6b48b29f34c6ea8a6acbb71b9b09c6156ec0a0766142 +a73d2223c7afadc381951a2e9e7bcb7b5c232369f27108c9f3c2ced2dc173e0f49531d0ca527eb142fbb70285307433f +9152cd6b97bd3278465348dde2095892f46342aed0e3d48675848c05b9aee6ef5ad7fe26e0dcd4ab176532289d40eedd +a7812a95a43b020721f688dd726356dda8ebe4de79b4f0fdef78615795e29681bff7c6ff710ff5b2d6ae3fd81bdb8507 +83724c16049e9eaae3269ea8e65caa212f0592e0190b47159bb3346208ccb9af3cfe8f6c3176fa566377da1046044ab8 +877634ec37c7dcd3b83705b103c31013697012795f11e8abf88d54bc84f2c060f665f0c3b14ef8087d3c6a8a7982d64f +b3e53aaacef7a20327bdbba8cd84513534d2e12fd5e1dcf2849f43146e098143b539ebd555623d0ecc46f5ebb4051fca +952d58ecafca9b7ffc25768ee4f05ce138f0289d72978eb5e5d3b23a0daedcb17478890afdce42e30d924d680e13c561 +a10dcc725f9a261de53dd3133858c126f6aa684cf26d92bce63a70e0ff5fff9610ad00d2b87e598b0a7548cfd1ffe713 +b7bc5d0c6b665d5e6f4d0af1c539d8a636550a327e50a0915c898ac494c42b3100e5fae0074c282d1c5073bf4a5456fb +8adc330d3b49ddf3ed210166afc944491aaedb28cb4e67472aeb496f66ce59184c842aa583bfb1a26d67d03b85065134 +b2df992a1310936394a1ebca94a7885b4c0a785638f92a7b567cfb4e68504ac5966a9e2b14891d0aa67d035a99e6583a +96f5da525d140739d19cebb706e2e1e0211edea1f518e040d361d5aca4c80f15be797f58cb4cd3908e4c360c18821243 +b2c0d9173a3d4867c8842e9b58feb1fb47f139f25d1e2332d6b70a85a58811ef99324bf8e52e144e839a4fe2d484e37b +ad95a7631ddb4846d9343d16533493524dfd22e8cbfc280a202343fccee86ab14446f6e7dad9bad9b4185c43fd5f862e +97f38ab82a51a7a792d459a90e7ea71c5a2f02d58e7d542eb3776d82413932737d9431bd6b74ec2a6a8b980d22d55887 +ad4e4c57ec3def5350c37659e8c15bd76d4c13d6de5453493123198dda2c2f40df349f20190e84d740a6b05e0b8f3deb +a691bc10810d11172a6662e46b6bbc48c351df32f325b319553377f525af44a50aaa02790c915b3a49824aa43f17fff0 +a80ccac79bb4014ee366dbf6e380beb61552bd30ef649d4ec39ab307e4139b7775e776fab30831517674ff3d673566f6 +b11e010b855d80e171705ab9e94364c45998e69d9120e4ca4127049b7a620c2eec1377356e7b877874e767f7c44afef4 +96bfab7777769a1e00ce16ada6667a0d21d709e71bd0371c03002427d138d9172640cdd5c529c710fea74bb9d19270c7 +a5bffd2c30e29633b4ecf637c1e792c0378252e2a99b385a093675940b48de2f262c275332ed4765f4a02467f98e3ddd +8d11929d67a6bd8a835b80660a89496250c766e713bddb2cd7052d67b92c39a38ce49005d38b4877856c4bef30fb9af4 +8e704597a0dba1dbd1ff8c9755ddac3f334eeeb513fd1c6b78366603ebc1778231deb8e18f2889421f0091e2c24d3668 +904fbb3f78a49e391a0544cf1faa96ba9402cba818359582258d00aff5319e3c214156cff8c603fbc53a45ede22443e9 +af12ac61eaa9c636481a46fd91903c8a16e7647534fc6fd9baa58ae2998c38ffbd9f03182062311c8adfef0a338aa075 +87f2e544b2993349ab305ab8c3bf050e7764f47d3f3031e26e084e907523d49e1d46c63d0c97b790394f25868e12b932 +a279a7bef6de9d4e183e2bedaf8c553fadfc623a9af8785fe7577cadced02b86e3dab1e97b492d4680c060ea0126abeb +8ece08667ed826f0a239cea72e11359f7e85d891826292b61d4edbdc672f8342e32c66bec3e6498016b8194168ba0e0d +90a15162586e991b302427bc0307790a957b53ab0e83c8b2216f6e6302bc496cb256f0f054ff2cccdfe042763de00976 +9966c0413b086a983f031a39080efde41a9fedcaf8e92897ce92e0c573b37981f5ea266b39dc4f4fb926a1bce5e95ad7 +9515be2f65a57e6960d71bfb1917d33f3f6d8b06f8f31df30fc76622949770fea90ff20be525ae3294c56bc91efb7654 +86e71c9b4059dc4fd1ce7e28883e4f579a51449cab5899e371118cdb6afe2758b1485961ca637c299896dea7c732151b +8695b4ff746d573f8d150f564e69fe51c0726c5d14aa1d72d944f4195e96165eca7eba8cac583fd19d26718b0ce3eb61 +813eecf402151c99c1a55b4c931716e95810fc4e6d117dfc44abbf5ef8dcdf3f971d90d7fa5e5def393681b9584637e0 +a9caf7219eed1db14b7b8f626f20294a3305ed1f6c22f6a26962772c2fa3e50b5234f6d9ba7fa5c3448824c2a15271b3 +b2b2ee20de9b334f2d82cbe0d2e426ca1f35f76218737d0069af9b727a1bfc12d40cf8b88d4afcbeaadf317b7f7ad418 +b853960749521a17ff45f16ac46813d249c4e26e3c08fd33d31ef1ed2b2e157c9cb18bd2454fb5c62690bdd090a48f60 +88772297d2972471b3db71f3ddbf5945a90154768ca49fa6729a5e2299f1795445fb3d4d969d1620e87dca618fbc8a6c +a2bb783fd13aee993e3efd3a963ebc8a8eacfc8450042f018f2040353de88c71ac784b0898bdff27f606c60a3d5ef2c6 +9210903ac619edca0cb8c288ed6dcc93c472f45182cd6614a8e2390801ddea41d48a4ac04a40e2f0adfd48f91aabe2ea +a621d00f83260c22db9fa28757ea81dabcc78b10eeaaf58b06b401db6cc7a7d9a6831a16f171ead4e8506d0c46a752ca +b25c525bf6761a18bbd156ac141df2595940c7b011ed849dbb8ac3a2cd2da6b63ba4755324d70dc14c959deb29fb9ad3 +a35111d0db3e862e1b06249d289e0fc6b110877d254f2ae1604fb21292c227a8b6d87dd17a7b31166038d6860b1bd249 +90bf057309867d95f27637bd10ef15ceb788f07d38aca7ad7920042293d7c4a1a13d4ca1d6db202864d86d20a93e16cf +a88510e110b268d15dcd163ba1e403e44b656771399ac3a049dcb672a1201e88bf60bdd1d303158888a3d30d616cc0bd +b33b7e1f765e9cbd5eeb925e69c39b0a9ea3348ab17f1dbb84b66f4a4b3233e28cbdeb0903d6cfe49ec4fc2f27378ff9 +b777da64fa64d9bc3d2d81b088933fce0e5fcc29c15536159c82af3622a2604c2b968991edea7b6882c9e6f76b544203 +8ea598e402a056fd8031fbf3b9e392347999adc1bd5b68c5797a791a787d006e96918c799467af9ac7f5f57eb30b4f94 +b6901a389bf3b3045e679d015c714d24f8bbe6183349b7f6b42f43409a09f0d5bd4b794012257d735c5fdf6d1812554b +b5866426336d1805447e6efc3f3deb629b945b2781f618df9a2cc48c96020846e9108f9d8507a42ba58d7617cb796c31 +a18ccc6ad1caa8462fa9bec79510689dd2a68d2e8b8e0ddbeb50be4d77728e1d6a18748a11e27edd8d3336c212689a4d +abbd48c48a271b6b7c95518a9352d01a84fb165f7963b87cdc95d5891119a219571a920f0d9ceedc8f9f0de4ab9deb65 +94a4e5f4d7e49229e435530b12a1ff0e9259a44a4f183fb1fe5b7b59970436e19cf932625f83f7b75702fd2456c3b801 +af0a6f2a0d0af7fc72e8cb690f0c4b4b57b82e1034cca3d627e8ef85415adec8eb5df359932c570b1ee077c1d7a5a335 +9728025e03114b9e37ed43e9dcba54a2d67f1c99c34c6139e03d4f9c57c9e28b6b27941d9fca4051d32f9b89bec6537b +941601742d1e1ec8426591733a4f1c13785b0a9b0a6b2275909301a6a3c6c1e2fb1ffa5fdcc08d7fb69f836ae641ced5 +b84b90480defd22f309e294379d1ca324a76b8f0ba13b8496b75a6657494e97d48b0ea5cfdb8e8ac7f2065360e4b1048 +95cc438ee8e370fc857fd36c3679c5660cf6a6c870f56ef8adf671e6bf4b25d1dbad78872cc3989fdfe39b29fc30486d +8aafba32e4a30cad79c5800c8709241b4041b0c13185ea1aa9bc510858709870b931d70b5d9a629f47579b161f1d8af7 +865b0155d9013e80cba57f204c21910edbd4d15e53ae4fee79992cb854dc8b8a73f0a9be92f74893e30eb70f270511bc +b9a49ce58d40b429ac7192cdbf76da31300efc88c827b1e441dd5bdb2f1c180d57808c48992492a2dc5231008629159f +8d1438b10f6cd996494d4c7b5a0841617ec7cf237c9e0956eac04fda3f9ded5110ec99776b816e3c78abd24eb4a9c635 +af2dd18211bb8a3e77c0a49d5773da6e29e4e6fa6632a6eeb56c4be233f6afe81655d977932548de2be16567c54ffbd7 +92b92443f44464f2b48002a966664a4267eae559fa24051983bcf09d81bed5bcc15cb6ff95139d991707697a5d0cc1ab +a1864a2bac0c0dd5b2fb1a79913dd675fe0a5ae08603a9f69d8ca33268239ac7f2fed4f6bf6182a4775683cb9ccd92a8 +948e8f1cf5bd594c5372845b940db4cb2cb5694f62f687952c73eb77532993de2e2d7d974a2ced58730d12c8255c30a2 +aa825c08284fa74a99fcfc473576e8a9788277f72f8c87f29be1dd41229c286c2753ff7444c753767bd8180226763dfc +8384d8d51415e1a4d6fe4324504e958c1b86374cc0513ddf5bcbffabb3edcf4b7d401421e5d1aa9da9010f07ef502677 +8b8223a42585409041d8a6e3326342df02b2fe0bcc1758ff950288e8e4677e3dc17b0641286eaf759a68e005791c249c +a98a98cc2fb14e71928da7f8ce53ab1fb339851c9f1f4bceb5f1d896c46906bd027ef5950ca53b3c8850407439efedd4 +866f44d2e35a4dbffe6cd539b6ef5901924061e37f9a0e7007696fb23526379c9b8d095b417effe1eecda698de744dcb +91774f44bf15edafdf43957fdf254682a97e493eb49d0779c745cb5dbe5d313bf30b372edd343f6d2220475084430a2e +ab52fc3766c499a5f5c838210aada2c3bcc1a2ec1a82f5227d4243df60809ee7be10026642010869cfbf53b335834608 +a0e613af98f92467339c1f3dc4450b7af396d30cefd35713388ccd600a3d7436620e433bf294285876a92f2e845b90d0 +8a1b5ca60a9ae7adc6999c2143c07a855042013d93b733595d7a78b2dc94a9daa8787e2e41b89197a0043343dbd7610f +ae7e4557bc47b1a9af81667583d30d0da0d4a9bb0c922450c04ec2a4ae796c3f6b0ede7596a7a3d4e8a64c1f9ee8ff36 +8d4e7368b542f9f028309c296b4f84d4bde4837350cf71cfe2fa9d4a71bce7b860f48e556db5e72bc21cf994ffdf8e13 +af6ed1fbff52dd7d67d6a0edfa193aa0aab1536979d27dba36e348759d3649779f74b559194b56e9378b41e896c4886f +a069ba90a349ac462cac0b44d02c52a4adf06f40428aef5a2ddff713de31f991f2247fc63426193a3ea1b1e50aa69ded +8750f5f4baf49a5987470f5022921108abe0ead3829ddef00e61aedd71f11b1cdd4be8c958e169440b6a8f8140f4fbf9 +a0c53cefc08a8d125abd6e9731bd351d3d05f078117ff9c47ae6b71c8b8d8257f0d830481f941f0c349fc469f01c9368 +94eea18c5ed056900c8285b05ba47c940dff0a4593b627fdd8f952c7d0122b2c26200861ef3e5c9688511857535be823 +8e1b7bd80d13460787e5060064c65fbcdac000c989886d43c7244ccb5f62dcc771defc6eb9e00bae91b47e23aeb9a21f +b4b23f9dd17d12e145e7c9d3c6c0b0665d1b180a7cfdf7f8d1ab40b501c4b103566570dca2d2f837431b4bf698984cad +847a47c6b225a8eb5325af43026fb9ef737eede996257e63601f80302092516013fde27b93b40ff8a631887e654f7a54 +9582d7afb77429461bd8ebb5781e6390a4dde12a9e710e183581031ccfacd9067686cfaf47584efaafeb1936eae495cc +8e4fd5dbd9002720202151608f49ef260b2af647bd618eb48ebeceeb903b5d855aa3e3f233632587a88dc4d12a482df9 +87b99fe6a9c1d8413a06a60d110d9e56bb06d9f0268dc12e4ab0f17dd6ca088a16ade8f4fb7f15d3322cbe7bfd319ae1 +b562d23002ed00386db1187f519018edd963a72fca7d2b9fcaab9a2213ac862803101b879d1d8ac28d1ccae3b4868a05 +b4cc8b2acacf2ce7219a17af5d42ce50530300029bc7e8e6e2a3c14ff02a5b33f0a7fecb0bb4a7900ea63befa854a840 +9789f0fe18d832ff72df45befa7cabf0a326b42ada3657d164c821c35ac7ed7b2e0eba3d67856e8c387626770059b0c3 +986c6fe6771418549fa3263fa8203e48552d5ecb4e619d35483cb4e348d849851f09692821c9233ae9f16f36979c30c2 +a9160182a9550c5756f35cea1fe752c647d1b64a12426a0b5b8d48af06a12896833ec5f5d9b90185764db0160905ca01 +82614dbd89d54c1e0af4f6ffe8710e6e871f57ef833cbcb3d3d7c617a75ec31e2a459a89ebb716b18fc77867ff8d5d47 +8fc298ffba280d903a7873d1b5232ce0d302201957226cddff120ffe8df9fee34e08420302c6b301d90e3d58f10beeb9 +898da9ac8494e31705bdf684545eee1c99b564b9601877d226d0def9ec67a20e06f8c8ba2a5202cc57a643487b94af19 +88218478d51c3ed2de35b310beedf2715e30208c18f046ee65e824f5e6fd9def921f6d5f75fd6dde47fa670c9520f91a +89703ae7dff9b3bc2a93b44cdbab12c3d8496063a3c658e21a7c2078e4c00be0eecae6379ee8c400c67c879748f1d909 +a44d463477dece0d45abb0ebb5f130bfb9c0a3bbcd3be62adf84a47bbd6938568a89bc92a53ca638ff1a2118c1744738 +95df2b4d392143ee4c39ad72f636d0ed72922de492769c6264015776a652f394a688f1d2b5cf46077d01fda8319ba265 +aa989867375710ed07ad6789bfb32f85bdc71d207f6f838bd3bde9da5a169325481ac326076b72358808bd5c763ba5bb +b859d97d0173920d16bc01eb7d3ddd47273daac72f86c4c30392f8de05fee643e8d6aa8bebdbc5c2d89037bc68a8a105 +b0249ec97411fa39aa06b3d9a6e04bbbcd5e99a7bc527273b6aa95e7ae5f437b495385adaefa4327231562d232c9f822 +8209e156fe525d67e1c83ec2340d50d45eba5363f617f2e5738117cdcc4a829c4cc37639afd7745cbe929c66754fd486 +99fd2728ceb4c62e5f0763337e6d28bf11fbe5df114217f002bc5cd3543c9f62a05a8a41b2e02295360d007eaab796a6 +902ebc68b8372feeaf2e0b40bd6998a0e17981db9cc9d23f932c34fbcc680292a0d8adcea2ad3fb2c9ed89e7019445c2 +8b5653f4770df67f87cb68970555b9131c3d01e597f514e0a399eec8056e4c5a7deed0371a27b3b2be426d8e860bf9f2 +8f5af27fdc98a29c647de60d01b9e9fd0039013003b44ba7aa75a4b9c42c91feb41c8ae06f39e22d3aed0932a137affa +81babb9c1f5bcc0fd3b97d11dd871b1bbd9a56947794ff70ab4758ae9850122c2e78d53cb30db69ece23538dc4ee033e +b8b65d972734f8ecae10dd4e072fa73c9a1bf37484abcfa87e0d2fcecac57294695765f63be87e1ba4ec0eb95688403a +b0fe17d0e53060aef1947d776b06ab5b461a8ef41235b619ca477e3182fadaf9574f12ffc76420f074f82ac4a9aa7071 +ae265c0b90bf064d7a938e224cb1cd3b7eca3e348fbc4f50a29ac0930a803b96e0640992354aa14b303ea313cb523697 +8bc10ffde3224e8668700a3450463ab460ec6f198e1deb016e2c9d1643cc2fe1b377319223f41ffeb0b85afd35400d40 +8d5113b43aea2e0cc6f8ec740d6254698aff7881d72a6d77affd6e6b182909b4de8eb5f524714b5971b418627f15d218 +ae2ef0a401278b7b5d333f0588773ec62ead58807cdee679f72b1af343c1689c5f314989d9e6c9369f8da9ce76979db6 +b9c1cb996a78d4f7793956daaa8d8825dd43c4c37877bc04026db4866144b1bf37aa804d2fe0a63c374cf89e55e9069f +a35f73851081f6540e536a24a28808d478a2bb1fd15ee7ff61b1562e44fbafc0004b9c92c9f96328d546b1287e523e48 +82007f34e3383c628c8f490654369744592aa95a63a72be6e90848ad54f8bc2d0434b62f92a7c802c93017214ecf326e +9127db515b1ed3644c64eaf17a6656e6663838fed4c6612a444a6761636eaaeb6a27b72d0e6d438c863f67b0d3ec25c5 +984c9fcc3deccf83df3bbbb9844204c68f6331f0f8742119ba30634c8c5d786cd708aa99555196cf6563c953816aec44 +a0f9daf900112029474c56ddd9eb3b84af3ed2f52cd83b4eb34531cf5218e7c58b3cab4027b9fc17831e1b6078f3bf4a +90adbcc921369023866a23f5cea7b0e587d129ad71cab0449e2e2137838cea759dec27b0b922c59ac4870ef6146ea283 +8c5650b6b9293c168af98cf60ad35c945a30f5545992a5a8c05d42e09f43b04d370c4d800f474b2323b4269281ca50f8 +868d95be8b34a337b5da5d886651e843c073f324f9f1b4fbd1db14f74aba6559449f94c599f387856c5f8a7bc83b52a1 +812df0401d299c9e95a8296f9c520ef12d9a3dd88749b51eab8c1b7cc97961608ab9fc241a7e2888a693141962c8fd6d +abda319119d8a4d089393846830eee19d5d6e65059bf78713b307d0b4aad245673608b0880aa31c27e96c8d02eff39c0 +887f11ae9e488b99cb647506dcaa5e2518b169ee70a55cd49e45882fe5bfb35ffaf11feb2bf460c17d5e0490b7c1c14d +b36b6e9f95ffff917ca472a38fa7028c38dc650e1e906e384c10fe38a6f55e9b84b56ffa3a429d3b0c3e2cf8169e66a9 +a0450514d20622b7c534f54be3260bab8309632ca21c6093aa0ccc975b8eed33a922cbcc30a730ccc506edf9b188a879 +87cfaf7bcd5d26875ca665ac45f9decd3854701b0443332da0f9b213e69d6f5521ae0217ec375489cd4fad7b4babf724 +842ad67c1baf7a9d4504c10c5c979ce0a4d1b86a263899e2b5757407c2adcdcf7ed58173ad9d156d84075ef8798cb1c4 +ac1a05755fe4d3fb2ab5b951bafe65cca7c7842022ca567b32cddf7741782cbf8c4990c1dd4ea05dc087a4712844aebb +a000c8cecc4fddeb926dc8dd619952bc51d00d7c662e025f973387a3fc8b1ef5c7c10b6a62e963eb785e0ec04cb1ffbe +8a573c9986dbeb469547dfd09f60078eab252d8ec17351fe373a38068af046b0037967f2b3722fa73ed73512afd038d2 +b8dff15dff931f58ba05b6010716c613631d7dd9562ae5138dbec966630bcdb0e72552e4eefc0351a6a6b7912d785094 +990e81fd459433522e8b475e67e847cb342c4742f0dbf71acc5754244ccd1d9ff75919168588d8f18b8aea17092dd2a4 +b012f8644da2113bef7dd6cdc622a55cfa0734bd267b847d11bba2e257a97a2a465c2bb616c240e197ff7b23e2ce8d8e +a659bd590fde467766e2091c34a0b070772f79380be069eef1afecc470368a95afd9eed6520d542c09c0d1a9dca23bd0 +b9239f318b849079477d1cf0a60a3d530391adacd95c449373da1c9f83f03c496c42097c3f9aca10c1b9b3dbe5d98923 +851e9a6add6e4a0ee9994962178d06f6d4fbc0def97feef1ba4c86d3bcf027a59bafa0cf25876ca33e515a1e1696e5cc +803b9c5276eed78092de2f340b2f0d0165349a24d546e495bd275fe16f89a291e4c74c22fdee5185f8fce0c7fbced201 +95915654ca4656d07575168fb7290f50dc5dcbbcdf55a44df9ec25a9754a6571ab8ca8a159bc27d9fa47c35ffd8f7ffd +88f865919764e8e765948780c4fdd76f79af556cd95e56105d603c257d3bfb28f11efca1dfb2ce77162f9a5b1700bac8 +b1233131f666579b4cc8b37cfa160fc10551b1ec33b784b82685251464d3c095cdde53d0407c73f862520aa8667b1981 +a91115a15cf4a83bda1b46f9b9719cfba14ffb8b6e77add8d5a0b61bea2e4ea8ce208e3d4ed8ca1aab50802b800e763a +93553b6c92b14546ae6011a34600a46021ce7d5b6fbfcda2a70335c232612205dbe6bfb1cc42db6d49bd4042c8919525 +8c2a498e5d102e80c93786f13ccf3c9cab7f4c538ccf0aee8d8191da0dbca5d07dff4448383e0cf5146f6d7e629d64f8 +a66ab92c0d2c07ea0c36787a86b63ee200499527c93b9048b4180fc77e0bb0aa919f4222c4bec46eeb3f93845ab2f657 +917e4fc34081a400fc413335fdf5a076495ae19705f8542c09db2f55fa913d6958fa6d711f49ad191aec107befc2f967 +940631a5118587291c48ac8576cdc7e4a904dd9272acb79407a7d3549c3742d9b3669338adbc1386724cc17ee0cc1ca3 +ae23ae3a531900550671fd10447a35d3653c5f03f65b0fdffe092844c1c95d0e67cab814d36e6388db5f8bd0667cd232 +ae545727fca94fd02f43e848f0fbbb1381fd0e568a1a082bf3929434cc73065bfbc9f2c840b270dda8cc2e08cd4d44b0 +8a9bc9b90e98f55007c3a830233c7e5dc3c4760e4e09091ff30ee484b54c5c269e1292ce4e05c303f6462a2a1bd5de33 +a5a2e7515ce5e5c1a05e5f4c42f99835f6fde14d47ecb4a4877b924246038f5bc1b91622e2ff97ed58737ed58319acfa +8fa9f5edf9153618b72b413586e10aaa6c4b6e5d2d9c3e8693ca6b87804c58dc4bf23a480c0f80cb821ebc3cf20ea4fc +925134501859a181913aadac9f07f73d82555058d55a7d5aaa305067fbd0c43017178702facc404e952ea5cfd39db59b +8b5ab1d9b5127cb590d6bddbf698ffe08770b6fc6527023d6c381f39754aecc43f985c47a46be23fe29f6ca170249b44 +aa39c6b9626354c967d93943f4ef09d637e13c505e36352c385b66e996c19c5603b9f0488ad4014bb5fc2e051b2876cc +8e77399c6e9cb8345002195feb7408eb571e6a81c0418590d2d775af7414fc17e61fe0cd37af8e737b59b89c849d3a28 +a0150aeca2ddc9627c7ea0af0dd4426726583389169bc8174fc1597cc8048299cc594b22d234a4e013dff7232b2d946c +98659422ef91f193e6104b09ff607d1ed856bb6baed2a6386c9457efbc748bd1bf436573d80465ebc54f8c340b697ea5 +8d6fb015898d3672eb580e1ffdf623fc4b23076664623b66bfb18f450d29522e8cb9c90f00d28ccf00af34f730bff7ac +996a8538efa9e2937c1caad58dc6564e5c185ada6cdcef07d5ec0056eb1259b0e4cef410252a1b5dbaee0da0b98dac91 +aa0ae2548149d462362a33f96c3ce9b5010ebf202602e81e0ef77e22cfc57ecf03946a3076b6171bea3d3dc9681187d7 +a5ce876b29f6b89050700df46d679bed85690daf7bad5c0df65e6f3bde5673e6055e6c29a4f4dcb82b93ccecf3bad9cc +81d824bb283c2f55554340c3514e15f7f1db8e9e95dd60a912826b1cccb1096f993a6440834dad3f2a5de70071b4b4b5 +914e7291da286a89dfc923749da8f0bf61a04faa3803d6d10633261a717184065dcc4980114ad852e359f79794877dd9 +ae49dc760db497c8e834510fe89419cc81f33fd2a2d33de3e5e680d9a95a0e6a3ccbdf7c0953beeb3d1caf0a08b3e131 +b24f527d83e624d71700a4b238016835a2d06f905f3740f0005105f4b2e49fc62f7e800e33cdc900d805429267e42fc0 +b03471ecaa7a3bf54503347f470a6c611e44a3cee8218ad3fcad61d286cfb7bb6a1113dad18475ec3354a71fcc4ec1e2 +881289b82b30aff4c8f467c2a25fced6064e1eece97c0de083e224b21735da61c51592a60f2913e8c8ba4437801f1a83 +b4ce59c0fc1e0ecad88e79b056c2fd09542d53c40f41dea0f094b7f354ad88db92c560b9aeb3c0ef48137b1a0b1c3f95 +a1ffb30eb8ef0e3ea749b5f300241ebe748ed7cf480e283dfcda7380aa1c15347491be97e65bc96bdf3fe62d8b74b3ae +b8954a826c59d18c6bfab24719f8730cc901868a95438838cd61dac468a2d79b1d42f77284e86e3382bf4f2a22044927 +818e7e7c59b6b5e22b3c2c19c163f2e787f2ff3758d395a4da02766948935eb44413c3ddd2bf45804a3c19744aa332f3 +a29556e49866e4e6f01d4f042eed803beeda781462884a603927791bd3750331a11bc013138f3270c216ab3aa5d39221 +b40885fa0287dc92859b8b030c7cca4497e96c387dcfe6ed13eb7f596b1eb18fb813e4ae139475d692f196431acb58fe +89cd634682fd99ee74843ae619832780cf7cd717f230ea30f0b1821caf2f312b41c91f459bdba723f780c7e3eed15676 +b48c550db835750d45a7f3f06c58f8f3bf8766a441265ca80089ead0346f2e17cbb1a5e843557216f5611978235e0f83 +90936ee810039783c09392857164ab732334be3a3b9c6776b8b19f5685379c623b1997fb0cdd43af5061d042247bc72f +a6258a6bae36525794432f058d4b3b7772ba6a37f74ef1c1106c80a380fc894cbeac4f340674b4e2f7a0f9213b001afd +8f26943a32cf239c4e2976314e97f2309a1c775777710393c672a4aab042a8c6ee8aa9ac168aed7c408a436965a47aeb +820f793573ca5cc3084fe5cef86894c5351b6078df9807d4e1b9341f9d5422dd29d19a73b0843a14ad63e8827a75d2da +a3c4fca786603cd28f2282ba02afe7cf9287529e0e924ca90d6cdfd1a3912478ebb3076b370ee72e00df5517134fe17f +8f3cdabd0b64a35b9ee9c6384d3a8426cc49ae6063632fb1a56a0ae94affa833955f458976ff309dafd0b2dd540786ae +945a0630cd8fa111cfd776471075e5d2bbe8eb7512408b5c79c8999bfaeca6c097f988fb1c38fa9c1048bac2bca19f2e +8a7f6c4e0ba1920c98d0b0235b4dda73b631f511e209b10c05c550f51e91b4ba3893996d1562f04ac7105a141464e0e9 +ab3c13d8b78203b4980412edc8a8f579e999bf79569e028993da9138058711d19417cf20b477ef7ed627fa4a234c727a +82b00d9a3e29ed8d14c366f7bb25b8cfe953b7be275db9590373a7d8a86ea927d56dc3070a09ef7f265f6dd99a7c896e +b6e48a282de57949821e0c06bc9ba686f79e76fb7cbf50ea8b4651ccd29bc4b6da67efea4662536ba9912d197b78d915 +a749e9edcba6b4f72880d3f84a493f4e8146c845637009f6ff227ff98521dbbe556a3446340483c705a87e40d07364bc +b9b93c94bd0603ce5922e9c4c29a60066b64a767b3aed81d8f046f48539469f5886f14c09d83b5c4742f1b03f84bb619 +afa70b349988f85ed438faafa982df35f242dd7869bda95ae630b7fd48b5674ef0f2b4d7a1ca8d3a2041eff9523e9333 +a8e7e09b93010982f50bd0930842898c0dcd30cdb9b123923e9d5ef662b31468222fc50f559edc57fcfdc597151ebb6e +8ce73be5ac29b0c2f5ab17cae32c715a91380288137d7f8474610d2f28d06d458495d42b9cb156fb1b2a7dfdcc437e1c +85596c1d81f722826d778e62b604eb0867337b0204c9fae636399fa25bb81204b501e5a5912654d215ec28ff48b2cb07 +96ff380229393ea94d9d07e96d15233f76467b43a3e245ca100cbecbdbb6ad8852046ea91b95bb03d8c91750b1dfe6e1 +b7417d9860b09f788eb95ef89deb8e528befcfa24efddbc18deaf0b8b9867b92361662db49db8121aeea85a9396f64fd +97b07705332a59cdba830cc8490da53624ab938e76869b2ce56452e696dcc18eb63c95da6dffa933fb5ffb7585070e2d +971f757d08504b154f9fc1c5fd88e01396175b36acf7f7abcfed4fff0e421b859879ed268e2ac13424c043b96fbe99fc +b9adb5d3605954943a7185bddf847d4dbe7bafe970e55dc0ec84d484967124c26dd60f57800d0a8d38833b91e4da476a +b4856741667bb45cae466379d9d6e1e4191f319b5001b4f963128b0c4f01819785732d990b2f5db7a3452722a61cd8cc +a81ec9f2ab890d099fb078a0c430d64e1d06cbbe00b1f140d75fc24c99fe35c13020af22de25bbe3acf6195869429ba5 +99dcea976c093a73c08e574d930d7b2ae49d7fe43064c3c52199307e54db9e048abe3a370b615798b05fe8425a260ba0 +a1f7437c0588f8958b06beb07498e55cd6553429a68cd807082aa4cc031ab2d998d16305a618b3d92221f446e6cd766d +806e4e0958e0b5217996d6763293f39c4f4f77016b3373b9a88f7b1221728d14227fce01b885a43b916ff6c7a8bc2e06 +8e210b7d1aff606a6fc9e02898168d48ec39bc687086a7fe4be79622dd12284a5991eb53c4adfe848251f20d5bfe9de0 +82810111e10c654a6c07cbfd1aff66727039ebc3226eef8883d570f25117acf259b1683742f916ac287097223afc6343 +92f0e28cca06fd543f2f620cc975303b6e9a3d7c96a760e1d65b740514ccd713dc7a27a356a4be733570ca199edd17ba +900810aa4f98a0d6e13baf5403761a0aeb6422249361380c52f98b2c79c651e3c72f7807b5b5e3a30d65d6ff7a2a9203 +b0740bfefea7470c4c94e85185dbe6e20685523d870ff3ef4eb2c97735cef41a6ab9d8f074a37a81c35f3f8a7d259f0e +af022e98f2f418efbbe2de6fefb2aa133c726174f0f36925a4eafd2c6fd6c744edb91386bafb205ce13561de4294f3a6 +95e4592e21ba97e950abb463e1bc7b0d65f726e84c06a98eb200b1d8bfc75d4b8cff3f55924837009e88272542fd25ec +b13bd6b18cd8a63f76c9831d547c39bbd553bda66562c3085999c4da5e95b26b74803d7847af86b613a2e80e2f08caae +a5625658b474a95aba3e4888c57d82fb61c356859a170bc5022077aa6c1245022e94d3a800bf7bd5f2b9ab1348a8834e +a097ee9e6f1d43e686df800c6ce8cfc1962e5a39bb6de3cf5222b220a41b3d608922dae499bce5c89675c286a98fdabd +94230ba8e9a5e9749cd476257b3f14a6bf9683e534fb5c33ca21330617533c773cb80e508e96150763699ad6ecd5aee7 +b5fea7e1f4448449c4bc5f9cc01ac32333d05f464d0ed222bf20e113bab0ee7b1b778cd083ceae03fdfd43d73f690728 +a18a41a78a80a7db8860a6352642cdeef8a305714543b857ca53a0ee6bed70a69eeba8cfcf617b11586a5cc66af4fc4f +85d7f4b3ff9054944ac80a51ef43c04189d491e61a58abed3f0283d041f0855612b714a8a0736d3d25c27239ab08f2ec +b1da94f1e2aedd357cb35d152e265ccfc43120825d86733fa007fc1e291192e8ff8342306bef0c28183d1df0ccec99d0 +852893687532527d0fbeea7543ac89a37195eadab2f8f0312a77c73bdeed4ad09d0520f008d7611539425f3e1b542cfd +99e3bd4d26df088fc9019a8c0b82611fd4769003b2a262be6b880651d687257ded4b4d18ccb102cba48c5e53891535e4 +98c407bc3bbc0e8f24bedf7a24510a5d16bce1df22940515a4fbdacd20d06d522ef9405f5f9b9b55964915dd474e2b5c +80de0a12f917717c6fc9dc3ccc9732c28bae36cff4a9f229d5eaf0d3e43f0581a635ba2e38386442c973f7cb3f0fdfa7 +94f9615f51466ae4bb9c8478200634b9a3d762d63f2a16366849096f9fc57f56b2e68fe0ca5d4d1327a4f737b3c30154 +a3dcbe16499be5ccb822dfcd7c2c8848ba574f73f9912e9aa93d08d7f030b5076ca412ad4bf6225b6c67235e0ab6a748 +98f137bf2e1aea18289750978feb2e379054021e5d574f66ca7b062410dcfe7abb521fab428f5b293bbe2268a9af3aa4 +8f5021c8254ba426f646e2a15b6d96b337a588f4dfb8cbae2d593a4d49652ca2ada438878de5e7c2dbbd69b299506070 +8cc3f67dd0edcdb51dfd0c390586622e4538c7a179512f3a4f84dd7368153a28b1cf343afd848ac167cb3fcaa6aee811 +863690f09ac98484d6189c95bc0d9e8f3b01c489cb3f9f25bf7a13a9b6c1deaf8275ad74a95f519932149d9c2a41db42 +8494e70d629543de6f937b62beca44d10a04875bd782c9a457d510f82c85c52e6d34b9c3d4415dd7a461abbcc916c3c4 +925b5e1e38fbc7f20371b126d76522c0ea1649eb6f8af8efb389764ddcf2653775ef99a58a2dcf1812ce882964909798 +94d0494dcc44893c65152e7d42f4fb0dc46af5dc5674d3c607227160447939a56d9f9ea2b3d3736074eef255f7ec7566 +b0484d33f0ef80ff9b9d693c0721c77e518d0238918498ddf71f14133eb484defb9f9f7b9083d52bc6d6ba2012c7b036 +8979e41e0bb3b501a7ebbd024567ce7f0171acfea8403a530fe9e791e6e859dfbd60b742b3186d7cf5ab264b14d34d04 +af93185677d39e94a2b5d08867b44be2ba0bb50642edca906066d80facde22df4e6a7a2bd8b2460a22bdf6a6e59c5fdd +90f0ef0d7e7ab878170a196da1b8523488d33e0fde7481f6351558b312d00fa2b6b725b38539063f035d2a56a0f5e8f1 +a9ca028ccb373f9886574c2d0ea5184bc5b94d519aa07978a4814d649e1b6c93168f77ae9c6aa3872dd0eea17968ec22 +82e7aa6e2b322f9f9c180af585b9213fb9d3ad153281f456a02056f2d31b20d0f1e8807ff0c85e71e7baca8283695403 +affce186f842c547e9db2dffc0f3567b175be754891f616214e8c341213cbf7345c9ecd2f704bb0f4b6eba8845c8d8a7 +ab119eb621fade27536e98c6d1bc596388bb8f5cad65194ea75c893edbe6b4d860006160f1a9053aea2946bd663e5653 +99cd2c1c38ead1676657059dc9b43d104e8bd00ae548600d5fc5094a4d875d5b2c529fac4af601a262045e1af3892b5e +b531a43b0714cc638123487ef2f03dfb5272ff399ff1aa67e8bc6a307130d996910fb27075cbe53050c0f2902fc32ffe +923b59ac752c77d16b64a2d0a5f824e718460ef78d732b70c4c776fecc43718ecfaf35f11afbb544016232f445ecab66 +a53439cd05e6e1633cdce4a14f01221efcd3f496ac1a38331365c3cadc30013e5a71600c097965927ee824b9983a79cb +8af976ffab688d2d3f9e537e2829323dda9abf7f805f973b7e0a01e25c88425b881466dee37b25fda4ea683a0e7b2c03 +92e5f40230a9bfbb078fa965f58912abb753b236f6a5c28676fb35be9b7f525e25428160caeaf0e3645f2be01f1a6599 +8c4e7b04e2f968be527feba16f98428508a157b7b4687399df87666a86583b4446a9f4b86358b153e1660bb80bd92e8b +97cd622d4d8e94dceb753c7a4d49ea7914f2eb7d70c9f56d1d9a6e5e5cc198a3e3e29809a1d07d563c67c1f8b8a5665a +967bfa8f411e98bec142c7e379c21f5561f6fd503aaf3af1a0699db04c716c2795d1cb909cccbcb917794916fdb849f1 +b3c18a6caa5ca2be52dd500f083b02a4745e3bcaed47b6a000ce7149cee4ed7a78d2d7012bf3731b1c15c6f04cbd0bd1 +b3f651f1f84026f1936872956a88f39fcfe3e5a767233349123f52af160f6c59f2c908c2b5691255561f0e70620c8998 +ae23b59dc2d81cec2aebcaaf607d7d29cf588f0cbf7fa768c422be911985ca1f532bb39405f3653cc5bf0dcba4194298 +a1f4da396f2eec8a9b3252ea0e2d4ca205f7e003695621ae5571f62f5708d51ca3494ac09c824fca4f4d287a18beea9a +a036fa15e929abed7aac95aa2718e9f912f31e3defd224e5ed379bf6e1b43a3ad75b4b41208c43d7b2c55e8a6fedca72 +80e8372d8a2979ee90afbdb842624ace72ab3803542365a9d1a778219d47f6b01531185f5a573db72213ab69e3ffa318 +af68b5cdc39e5c4587e491b2e858a728d79ae7e5817a93b1ea39d34aec23dea452687046c8feae4714def4d0ed71da16 +b36658dfb756e7e9eec175918d3fe1f45b398679f296119cd53be6c6792d765ef5c7d5afadc5f3886e3f165042f4667f +ad831da03b759716f51099d7c046c1a8e7bf8bb45a52d2f2bfd769e171c8c6871741ef8474f06e2aca6d2b141cf2971f +8bae1202dde053c2f59efc1b05cb8268ba9876e4bd3ff1140fa0cc5fa290b13529aede965f5efdff3f72e1a579efc9cc +86344afbc9fe077021558e43d2a032fcc83b328f72948dba1a074bb1058e8a8faec85b1c019fc9836f0d11d2585d69c8 +831d1fc7aa28f069585d84c46bdc030d6cb12440cfaae28098365577fc911c4b8f566d88f80f3a3381be2ec8088bf119 +899de139797ac1c8f0135f0656f04ad4f9b0fa2c83a264d320eb855a3c0b9a4907fc3dc01521d33c07b5531e6a997064 +855bc752146d3e5b8ba7f382b198d7dc65321b93cdfc76250eabc28dba5bbf0ad1be8ccda1adf2024125107cb52c6a6e +af0aeccab48eb35f8986cabf07253c5b876dd103933e1eee0d99dc0105936236b2a6c413228490ed3db4fa69aab51a80 +ae62e9d706fbf535319c909855909b3deba3e06eaf560803fa37bce3b5aab5ea6329f7609fea84298b9da48977c00c3b +823a8d222e8282d653082d55a9508d9eaf9703ce54d0ab7e2b3c661af745a8b6571647ec5bd3809ae6dddae96a220ea7 +a4c87e0ea142fc287092bc994e013c85e884bc7c2dde771df30ca887a07f955325c387b548de3caa9efa97106da8176a +b55d925e2f614f2495651502cf4c3f17f055041fa305bb20195146d896b7b542b1e45d37fa709ca4bfc6b0d49756af92 +b0ebe8947f8c68dc381d7bd460995340efcbb4a2b89f17077f5fde3a9e76aef4a9a430d1f85b2274993afc0f17fdbead +8baaa640d654e2652808afd68772f6489df7cad37b7455b9cd9456bdddae80555a3f84b68906cc04185b8462273dcfc9 +add9aa08f827e7dc292ac80e374c593cd40ac5e34ad4391708b3db2fe89550f293181ea11b5c0a341b5e3f7813512739 +909e31846576c6bdd2c162f0f29eea819b6125098452caad42451491a7cde9fd257689858f815131194200bca54511f4 +abc4b34098db10d71ce7297658ef03edfa7377bd7ed36b2ffbab437f8fd47a60e2bcfbc93ff74c85cfce74ca9f93106c +857dbecc5879c1b952f847139484ef207cecf80a3d879849080758ef7ac96acfe16a11afffb42daf160dc4b324279d9b +aab0b49beecbcf3af7c08fbf38a6601c21061bed7c8875d6e3c2b557ecb47fd93e2114a3b09b522a114562467fcd2f7d +94306dec35e7b93d43ed7f89468b15d3ce7d7723f5179cacc8781f0cf500f66f8c9f4e196607fd14d56257d7df7bf332 +9201784d571da4a96ef5b8764f776a0b86615500d74ec72bc89e49d1e63a3763b867deca07964e2f3914e576e2ca0ded +aabe1260a638112f4280d3bdea3c84ce3c158b81266d5df480be02942cecf3de1ac1284b9964c93d2db33f3555373dcc +8ef28607ca2e0075aa07de9af5a0f2d0a97f554897cab8827dfe3623a5e9d007d92755d114b7c390d29e988b40466db9 +87a9b1b097c3a7b5055cd9cb0c35ba6251c50e21c74f6a0bca1e87e6463efc38385d3acc9d839b4698dfa2eb4cb7a2ef +aee277e90d2ffce9c090295c575e7cd3bafc214d1b5794dd145e6d02d987a015cb807bd89fd6268cd4c59350e7907ee2 +836ad3c9324eaa5e022e9835ff1418c8644a8f4cd8e4378bd4b7be5632b616bb6f6c53399752b96d77472f99ece123cd +8ffffdb67faa5f56887c834f9d489bb5b4dab613b72eac8abf7e4bcb799ccd0dbd88a2e73077cadf7e761cb159fb5ec5 +9158f6cd4f5e88e6cdb700fddcbc5a99b2d31a7a1b37dce704bd9dd3385cca69607a615483350a2b1153345526c8e05d +a7ff0958e9f0ccff76742fc6b60d2dd91c552e408c84172c3a736f64acb133633540b2b7f33bc7970220b35ce787cd4e +8f196938892e2a79f23403e1b1fb4687a62e3a951f69a7874ec0081909eb4627973a7a983f741c65438aff004f03ba6f +97e3c1981c5cdb0a388f1e4d50b9b5b5f3b86d83417831c27b143698b432bb5dba3f2e590d6d211931ed0f3d80780e77 +903a53430b87a7280d37816946245db03a49e38a789f866fe00469b7613ee7a22d455fb271d42825957282c8a4e159d9 +b78955f686254c3994f610e49f1c089717f5fb030da4f9b66e9a7f82d72381ba77e230764ab593335ff29a1874848a09 +938b6d04356b9d7c8c56be93b0049d0d0c61745af7790edf4ef04e64de2b4740b038069c95be5c91a0ba6a1bb38512a9 +a769073b9648fe21bc66893a9ef3b8848d06f4068805a43f1c180fdd0d37c176b4546f8e5e450f7b09223c2f735b006f +863c30ebe92427cdd7e72d758f2c645ab422e51ecef6c402eb1a073fd7f715017cd58a2ad1afe7edccdf4ff01309e306 +a617b0213d161964eccfc68a7ad00a3ee4365223b479576e887c41ef658f846f69edf928bd8da8785b6e9887031f6a57 +a699834bf3b20d345082f13f360c5f8a86499e498e459b9e65b5a56ae8a65a9fcb5c1f93c949391b4795ef214c952e08 +9921f1da00130f22e38908dd2e44c5f662ead6c4526ebb50011bc2f2819e8e3fca64c9428b5106fa8924db76b7651f35 +98da928be52eb5b0287912fd1c648f8bbda00f5fd0289baf161b5a7dbda685db6ad6bdc121bc9ffa7ed6ae03a13dbee3 +927b91d95676ff3c99de1312c20f19251e21878bfb47ad9f19c9791bc7fb9d6f5c03e3e61575c0760180d3445be86125 +b8e4977a892100635310dfcb46d8b74931ac59ae687b06469b3cee060888a3b6b52d89de54e173d9e1641234754b32b1 +98f6fd5f81ca6e2184abd7a3a59b764d4953d408cec155b4e5cf87cd1f6245d8bdd58b52e1e024e22903e85ae15273f1 +909aaacbbfe30950cf7587faa190dc36c05e3c8131749cc21a0c92dc4afc4002275762ca7f66f91aa751b630ad3e324d +91712141592758f0e43398c075aaa7180f245189e5308e6605a6305d01886d2b22d144976b30460d8ce17312bb819e8f +947d85cb299b189f9116431f1c5449f0f8c3f1a70061aa9ebf962aa159ab76ee2e39b4706365d44a5dbf43120a0ac255 +b39eced3e9a2e293e04d236976e7ee11e2471fe59b43e7b6dd32ab74f51a3d372afee70be1d90af017452ec635574e0e +8a4ba456491911fc17e1cadcbb3020500587c5b42cf6b538d1cb907f04c65c168add71275fbf21d3875e731404f3f529 +8f6858752363e2a94c295e0448078e9144bf033ccd4d74f4f6b95d582f3a7638b6d3f921e2d89fcd6afd878b12977a9d +b7f349aa3e8feb844a56a42f82b6b00f2bfe42cab19f5a68579a6e8a57f5cf93e3cdb56cbbb9163ab4d6b599d6c0f6aa +a4a24dc618a6b4a0857fb96338ac3e10b19336efc26986e801434c8fdde42ca8777420722f45dfe7b67b9ed9d7ce8fb1 +aafe4d415f939e0730512fc2e61e37d65c32e435991fb95fb73017493014e3f8278cd0d213379d2330b06902f21fe4e1 +845cc6f0f0a41cc6a010d5cb938c0ef8183ff5ed623b70f7ea65a8bdbc7b512ea33c0ee8b8f31fdf5f39ec88953f0c1e +811173b4dd89d761c0bdffe224cd664ef303c4647e6cf5ef0ed665d843ed556b04882c2a4adfc77709e40af1cfdea40b +93ba1db7c20bfba22da123b6813cb38c12933b680902cef3037f01f03ab003f76260acc12e01e364c0d0cf8d45fca694 +b41694db978b2cf0f4d2aa06fcfc4182d65fb7c9b5e909650705f779b28e47672c47707d0e5308cd680c5746c37e1bc7 +a0e92c4c5be56a4ccf1f94d289e453a5f80e172fc90786e5b03c1c14ce2f3c392c349f76e48a7df02c8ae535326ea8fe +96cbeb1d0693f4f0b0b71ad30def5ccc7ad9ebe58dbe9d3b077f2ac16256cde10468875e4866d63e88ce82751aaf8ef6 +935b87fd336f0bf366046e10f7c2f7c2a2148fa6f53af5607ad66f91f850894527ecec7d23d81118d3b2ee23351ed6ed +b7c2c1fa6295735f6b31510777b597bc8a7bfb014e71b4d1b5859be0d8d64f62a1587caafc669dfe865b365eb27bd94f +b25d93af43d8704ffd53b1e5c16953fd45e57a9a4b7acfcfa6dd4bf30ee2a8e98d2a76f3c8eba8dc7d08d9012b9694c6 +b5a005cd9f891e33882f5884f6662479d5190b7e2aec1aa5a6d15a8cb60c9c983d1e7928e25e4cf43ec804eaea1d97b0 +93f9f0725a06e4a0fb83892102b7375cf5438b5ebc9e7be5a655f3478d18706cf7dbb1cd1adcee7444c575516378aa1b +900d7cbf43fd6ac64961287fe593c08446874bfc1eb09231fc93de858ac7a8bca496c9c457bced5881f7bf245b6789e0 +90c198526b8b265d75160ef3ed787988e7632d5f3330e8c322b8faf2ac51eef6f0ce5a45f3b3a890b90aecf1244a3436 +b499707399009f9fe7617d8e73939cb1560037ad59ac9f343041201d7cc25379df250219fd73fa012b9ade0b04e92efa +94415f6c3a0705a9be6a414be19d478181d82752b9af760dda0dbd24a8ff0f873c4d89e61ad2c13ebf01de55892d07fa +90a9f0b9f1edb87751c696d390e5f253586aae6ebfc31eb3b2125d23877a497b4aa778de8b11ec85efe49969021eaa5a +a9942c56506e5cd8f9289be8205823b403a2ea233ba211cf72c2b3827064fd34cd9b61ff698a4158e7379891ca4120d8 +83bb2ee8c07be1ab3a488ec06b0c85e10b83a531758a2a6741c17a3ccfa6774b34336926a50e11c8543d30b56a6ac570 +8a08a3e5ebe10353e0b7fff5f887e7e25d09bb65becf7c74a03c60c166132efaada27e5aea242c8b9f43b472561ae3ed +957c7a24cefaa631fe8a28446bc44b09a3d8274591ade53ba489757b854db54820d98df47c8a0fbee0e094f8ad7a5dc4 +b63556e1f47ed3ee283777ed46b69be8585d5930960d973f8a5a43508fc56000009605662224daec2de54ea52a8dcd82 +abed2b3d16641f0f459113b105f884886d171519b1229758f846a488c7a474a718857323c3e239faa222c1ab24513766 +882d36eed6756d86335de2f7b13d753f91c0a4d42ef50e30195cc3e5e4f1441afa5ff863022434acb66854eda5de8715 +a65ea7f8745bb8a623b44e43f19158fd96e7d6b0a5406290f2c1348fc8674fbfc27beb4f724cc2b217c6042cb82bc178 +a038116a0c76af090a069ca289eb2c3a615b96093efacfe68ea1610890b291a274e26b445d34f414cfec00c333906148 +90294f452f8b80b0a47c3bcb6e30bdd6854e3b01deaf93f5e82a1889a4a1036d17ecb59b48efa7dc41412168d7a523dd +88faf969c8978a756f48c6114f7f33a1ca3fd7b5865c688aa9cd32578b1f7ba7c06120502f8dc9aee174ecd41597f055 +8883763b2762dfff0d9be9ac19428d9fd00357ac8b805efda213993152b9b7eb7ba3b1b2623015d60778bffda07a724d +a30a1a5a9213636aa9b0f8623345dc7cf5c563b906e11cc4feb97d530a1480f23211073dcb81105b55193dcde5a381d2 +b45ee93c58139a5f6be82572d6e14e937ef9fcbb6154a2d77cb4bf2e4b63c5aabc3277527ecf4e531fe3c58f521cc5e3 +ac5a73e4f686978e06131a333f089932adda6c7614217fcaf0e9423b96e16fd73e913e5e40bf8d7800bed4318b48d4b1 +b6c1e6cdd14a48a7fe27cd370d2e3f7a52a91f3e8d80fb405f142391479f6c6f31aa5c59a4a0fdc9e88247c42688e0cf +ab1760530312380152d05c650826a16c26223960fc8e3bf813161d129c01bac77583eff04ce8678ff52987a69886526b +a4252dffae7429d4f81dfaeeecc48ab922e60d6a50986cf063964f282e47407b7e9c64cf819da6f93735de000a70f0b2 +94c19f96d5ecf4a15c9c5a24598802d2d21acbbd9ee8780b1bc234b794b8442437c36badc0a24e8d2cff410e892bb1d2 +89fafe1799cf7b48a9ea24f707d912fccb99a8700d7287c6438a8879f3a3ca3e60a0f66640e31744722624139ba30396 +b0108405df25cf421c2f1873b20b28552f4d5d1b4a0bf1c202307673927931cbd59f5781e6b8748ddb1206a5ec332c0b +aa0f0e7d09f12b48f1e44d55ec3904aa5707e263774126e0b30f912e2f83df9eb933ca073752e6b86876adaf822d14ba +b0cbe8abb58876d055c8150d9fdbde4fea881a517a2499e7c2ea4d55c518a3c2d00b3494f6a8fd1a660bfca102f86d2a +b1ef80ec903bac55f58b75933dc00f1751060690fd9dfb54cf448a7a4b779c2a80391f5fda65609274bd9e0d83f36141 +8b52e05b1845498c4879bb12816097be7fc268ce1cf747f83a479c8e08a44159fc7b244cf24d55aca06dccf0b97d11e1 +b632a2fc4fdb178687e983a2876ae23587fd5b7b5e0bb8c0eb4cfe6d921a2c99894762e2aaccdc5da6c48da3c3c72f6c +953ef80ab5f74274ae70667e41363ae6e2e98ccbd6b7d21f7283f0c1cafb120338b7a8b64e7c189d935a4e5b87651587 +b929cfd311017c9731eed9d08d073f6cf7e9d4cd560cddd3fdcb1149ab20c6610a7674a66a3616785b13500f8f43ee86 +870fb0d02704b6a328e68721fb6a4b0f8647681bfcb0d92ec3e241e94b7a53aecc365ed384e721c747b13fbf251002f1 +979501159833a8ba5422ed9b86f87b5961711f5b474d8b0e891373fe2d0b98ff41a3a7a74a8b154615bb412b662a48be +b20f9c13cdeceef67f877b3878839ef425f645b16a69c785fe38f687c87a03b9de9ae31ac2edb1e1dd3a9f2c0f09d35d +8c7705ed93290731b1cf6f3bf87fc4d7159bb2c039d1a9f2246cda462d9cdf2beef62d9f658cfeea2e6aef7869a6fc00 +aa439eb15705ad729b9163daee2598d98a32a8a412777c0d12fd48dc7796d422227a014705e445cc9d66f115c96bbc24 +a32307e16f89749fe98b5df1effef0429801c067e0d8067794e56b01c4fef742ad5e7ab42a1a4cc4741808f47a0b7cb8 +b31e65c549003c1207258a2912a72f5bad9844e18f16b0773ea7af8ff124390eb33b2f715910fc156c104572d4866b91 +85608d918ed7b08a0dc03aee60ea5589713304d85eee7b4c8c762b6b34c9355d9d2e192575af0fd523318ae36e19ae1c +a6497dbaf0e7035160b7a787150971b19cf5ba272c235b0113542288611ebecefa2b22f08008d3f17db6a70a542c258d +87862adb1ac0510614ab909457c49f9ec86dc8bdf0e4682f76d2739df11f6ffcfb59975527f279e890d22964a1fba9b6 +8717ac3b483b3094c3b642f3fafe4fbafc52a5d4f2f5d43c29d9cfe02a569daee34c178ee081144494f3a2ca6e67d7b1 +855100ac1ec85c8b437fdd844abaa0ca4ac9830a5bdd065b68dafb37046fcf8625dd482dc0253476926e80a4c438c9ec +ae74821bf265ca3c8702c557cf9ef0732ede7ef6ed658283af669d19c6f6b6055aca807cf2fa1a64785ec91c42b18ae5 +812a745b1419a306f7f20429103d6813cbdea68f82ff635ac59da08630cd61bda6e0fa9a3735bfd4378f58ad179c1332 +867dbbfe0d698f89451c37ca6d0585fd71ee07c3817e362ef6779b7b1d70b27c989cdd5f85ac33a0498db1c4d14521fe +84db735d3eb4ff7f16502dccc3b604338c3a4a301220ad495991d6f507659db4b9f81bba9c528c5a6114bcdba0160252 +aadc83d1c4e5e32bf786cfb26f2f12a78c8024f1f5271427b086370cdef7a71d8a5bf7cd7690bae40df56c38b1ad2411 +a27860eb0caaea37298095507f54f7729d8930ac1929de3b7a968df9737f4c6da3173bda9d64ff797ed4c6f3a1718092 +a3cdcaa74235c0440a34171506ed03d1f72b150d55904ce60ec7b90fcd9a6f46f0e45feab0f9166708b533836686d909 +b209a30bdac5c62e95924928f9d0d0b4113ebb8b346d7f3a572c024821af7f036222a3bd38bd8efd2ee1dbf9ac9556cd +83c93987eff8bc56506e7275b6bef0946672621ded641d09b28266657db08f75846dcbde80d8abc9470e1b24db4ca65b +800c09b3ee5d0251bdaef4a82a7fe8173de997cc1603a2e8df020dd688a0c368ad1ebef016b35136db63e774b266c74c +93fb52de00d9f799a9bce3e3e31aaf49e0a4fc865473feb728217bd70f1bc8a732ec37ac3582bf30ab60e8c7fdf3cb8d +a1aff6b4a50d02f079a8895c74443539231bfdf474600910febf52c9151da7b31127242334ac63f3093e83a047769146 +8c4532d8e3abb5f0da851138bfa97599039bcd240d87bbdf4fd6553b2329abb4781074b63caf09bc724ceb4d36cb3952 +8bd9b0ae3da5acda9eb3881172d308b03beec55014cd73b15026299541c42fd38bab4983a85c06894ebb7a2af2a23d4c +979441e7f5a0e6006812f21b0d236c5f505bb30f7d023cb4eb84ec2aa54a33ac91d87ece704b8069259d237f40901356 +a1c6d2d82e89957d6a3e9fef48deb112eb00519732d66d55aa0f8161e19a01e83b9f7c42ac2b94f337dcc9865f0da837 +97a0b8e04e889d18947d5bf77d06c25bbd62b19ce4be36aaa90ddbeafd93a07353308194199ba138efaadf1b928cd8d2 +822f7fbe9d966b8ec3db0fc8169ab39334e91bf027e35b8cc7e1fe3ead894d8982505c092f15ddfe5d8f726b360ac058 +a6e517eedd216949e3a10bf12c8c8ddbfde43cddcd2c0950565360a38444459191bdbc6c0af0e2e6e98bc6a813601c6d +858b5f15c46c074adb879b6ba5520966549420cb58721273119f1f8bc335605aeb4aa6dbe64aae9e573ca7cc1c705cdc +b5191bb105b60deb10466d8114d48fb95c4d72036164dd35939976e41406dff3ee3974c49f00391abfad51b695b3258c +b1b375353ed33c734f4a366d4afad77168c4809aff1b972a078fd2257036fd6b7a7edad569533abf71bc141144a14d62 +a94c502a9cdd38c0a0e0187de1637178ad4fa0763887f97cc5bdd55cb6a840cb68a60d7dbb7e4e0e51231f7d92addcff +8fe2082c1b410486a3e24481ae0630f28eb5b488e0bb2546af3492a3d9318c0d4c52db1407e8b9b1d1f23a7ffbaf260a +b73fe7aa2b73f9cae6001af589bf8a9e73ea2bb3bb01b46743e39390c08d8e1be5e85a3d562857a9c9b802b780c78e6d +8e347f51330ae62275441ccd60f5ac14e1a925a54ced8a51893d956acc26914df1bb8595385d240aa9b0e5ada7b520ea +8dc573d6357c0113b026a0191a5807dbe42dcd2e19772d14b2ca735e1e67c70e319ef571db1f2a20e62254ed7fb5bcd6 +a5dacbe51549fe412e64af100b8b5eba5ec2258cc2a7c27a34bc10177d1894baf8707886d2f2ef438f077596a07681e9 +8349153c64961d637a5ff56f49003cb24106de19a5bbcf674016a466bfbe0877f5d1e74ccb7c2920665ef90a437b1b7e +96ad35429d40a262fdc8f34b379f2e05a411057d7852c3d77b9c6c01359421c71ef8620f23854e0f5d231a1d037e3a0d +b52385e40af0ed16e31c2154d73d1517e10a01435489fc801fbea65b92b3866ab46dab38d2c25e5fb603b029ae727317 +8e801c7a3e8fa91d9c22ebd3e14a999023a7b5beea13ec0456f7845425d28c92452922ca35ec64012276acb3bbc93515 +a8630870297d415e9b709c7f42aa4a32210b602f03a3015410123f0988aea2688d8bcfc6d07dc3602884abbf6199b23f +8cd518392e09df2a3771a736f72c05af60efc030d62dbbb9cd68dc6cbbe1fb0854eb78b6ed38337010eb1bb44a5d5d30 +921aa4c66590f6c54bf2fa2b324f08cbe866329cc31f6e3477f97f73e1a1721d5eb50ed4eacc38051fe9eda76ba17632 +a37e595cb63524cb033c5540b6343c3a292569fc115e813979f63fe1a3c384b554cecc2cae76b510b640fe3a18800c81 +b0bb57e4e31ae3ce9f28cef158ed52dabfad5aa612f5fcc75b3f7f344b7cec56b989b5690dacd294e49c922d550ee36b +a3c618ce4d091e768c7295d37e3f9b11c44c37507ae1f89867441f564bf0108f67bf64b4cf45d73c2afc17a4dc8b2c68 +999e6650eda5455e474c22a8c7a3fd5b547ec2875dc3043077ad70c332f1ccd02135e7b524fcbf3621d386dec9e614fa +b018f080888dec3c2ca7fcfeb0d3d9984699b8435d8823079fc9e1af4ca44e257fbe8da2f6f641ee6152b5c7110e3e3c +a2bcd4bcd9b40c341e9bba76b86481842f408166c9a7159205726f0776dcb7f15a033079e7589699e9e94ce24b2a77fd +b03de48f024a520bb9c54985ca356fd087ca35ac1dd6e95168694d9dae653138c9755e18d5981946a080e32004e238fe +a6c1a54973c0c32a410092441e20594aa9aa3700513ed90c8854956e98894552944b0b7ee9edf6e62e487dc4565baa2f +845d7abf577c27c4c1fafc955dcad99a1f2b84b2c978cfe4bd3cd2a6185979491f3f3b0ec693818739ed9184aba52654 +9531bcfc0d3fcd4d7459484d15607d6e6181cee440ba6344b12a21daa62ff1153a4e9a0b5c3c33d373a0a56a7ad18025 +a0bbf49b2dd581be423a23e8939528ceaae7fb8c04b362066fe7d754ca2546304a2a90e6ac25cdf6396bf0096fae9781 +a1ec264c352e34ed2bf49681b4e294ffea7d763846be62b96b234d9a28905cdece4be310a56ec6a00fc0361d615b547c +87c575e85b5dfbfd215432cb355a86f69256fff5318e8fda457763ac513b53baa90499dc37574bdfad96b117f71cb45e +9972edfdeec56897bef4123385ee643a1b9dc24e522752b5a197ce6bd2e53d4b6b782b9d529ca50592ee65b60e4c9c3c +b8bcf8d4ab6ad37bdd6ad9913a1ba0aba160cb83d1d6f33a8524064a27ba74a33984cc64beeee9d834393c2636ff831a +83082b7ec5b224422d0ff036fbb89dc68918e6fde4077dfc0b8e2ee02595195ecadb60c9ab0ad69deb1bac9be75024fa +8b061fce6df6a0e5c486fd8d8809f6f3c93bd3378a537ff844970492384fb769d3845d0805edd7f0fcd19efabf32f197 +b9597e717bb53e6afae2278dbc45d98959c7a10c87c1001ed317414803b5f707f3c559be6784119d08f0c06547ec60b1 +b9d990fd7677dd80300714cfd09336e7748bbf26f4bb0597406fcb756d8828c33695743d7a3e3bd6ddf4f508149610ef +b45f7d2b00ceea3bf6131b230b5b401e13a6c63ba8d583a4795701226bf9eb5c88506f4a93219ac90ccbceef0bfd9d49 +a8ccaa13ca7986bc34e4a4f5e477b11ae91abb45c8f8bf44a1f5e839289681495aba3daa8fb987e321d439bbf00be789 +ae0f59f7a94288a0ead9a398fdd088c2f16cccb68624de4e77b70616a17ddf7406ca9dc88769dadeb5673ff9346d6006 +b28e965dcc08c07112ae3817e98f8d8b103a279ad7e1b7c3de59d9dbd14ab5a3e3266775a5b8bbf0868a14ae4ab110f1 +84751c1a945a6db3df997fcbde9d4fe824bc7ba51aa6cb572bb5a8f9561bef144c952198a783b0b5e06f9dd8aa421be8 +a83586db6d90ef7b4fa1cbda1de1df68ee0019f9328aded59b884329b616d888f300abb90e4964021334d6afdea058fd +8fcea1ce0abf212a56c145f0b8d47376730611e012b443b3d1563498299f55cbcbe8cbd02f10b78224818bb8cbbd9aaa +8d66c30a40c34f23bae0ea0999754d19c0eb84c6c0aa1b2cf7b0740a96f55dd44b8fee82b625e2dd6c3182c021340ac6 +92c9b35076e2998f1a0f720d5a507a602bd6bd9d44ffc29ede964044b17c710d24ce3c0b4a53c12195de93278f9ec83b +a37d213913aff0b792ee93da5d7e876f211e10a027883326d582ad7c41deebdfce52f86b57d07868918585908ebd070a +a03995b4c6863f80dd02ed0169b4f1609dc48174ec736de78be1cdff386648426d031f6d81d1d2a7f2c683b31e7628c0 +b08b628d481302aa68daf0fa31fd909064380d62d8ed23a49037cb38569058e4c16c80e600e84828d37a89a33c323d1f +a0ee2e2dd8e27661d7b607c61ac36f590909aa97f80bdfd5b42463ca147b610ac31a9f173cbecdd2260f0f9ea9e56033 +967162fba8b69ffce9679aac49214debb691c6d9f604effd6493ce551abacbe4c8cc2b0ccee6c9927c3d3cfbdcb0be11 +8deab0c5ed531ce99dadb98b8d37b3ff017f07438bc6d50840577f0f3b56be3e801181333b4e8a070135f9d82872b7f2 +b1bfa00ec8c9365b3d5b4d77a718cb3a66ed6b6cf1f5cf5c5565d3aa20f63d3c06bb13d47d2524e159debf81325ba623 +90109780e53aeacd540b9fe9fc9b88e83c73eaf3507e2b76edc67f97a656c06a8a9e1ec5bce58bfd98b59a6b9f81b89d +88a1009a39a40421fdcc0ffc3c78a4fbace96a4e53420b111218091223494e780a998ebecf5a0abd0243e1523df90b28 +90b77146711ee8d91b0346de40eca2823f4e4671a12dad486a8ec104c01ef5ee7ab9bd0398f35b02b8cb62917455f8b3 +b262c5e25f24ae7e0e321b66fdb73b3bf562ded566a2d6a0152cf8bafb56138d87b6a917a82f5ace65efc73cfc177d81 +ae65a438c7ea46c82925b5ec5f71314558ca5146f5d90311431d363cfeac0537223c02cbb50fa6535d72fc2d949f4482 +8984208bfc193a6ef4720cc9d40c17f4be2f14595ef887980f2e61fa6927f9d73c00220937013b46290963116cbe66ac +a8f33a580508f667fac866456dce5d9246562188ad0f568eb1a2f28cf9fd3452dd20dc613adb1d07a5542319a37ecf1a +aedadd705fc086d8d2b647c62e209e2d499624ab37c8b19af80229f85e64a6e608d9cd414cb95ae38cf147d80ec3f894 +ae28077a235cd959f37dc3daedc3706f7a7c2ffe324e695f2e65f454bf5a9fc27b10149a6268ebfaa961ad67bb9b75d7 +a234c7f5a5e0e30f2026d62657bd92d91a9907ec6a2177f91383f86abb919778121ff78afb8f52c473fe6fb731018b52 +816a2ea7826b778f559a815267b6c6eb588558391c0a675d61bb19470d87489ba6c1e2486ea81dd5420a42ee7c35a8de +9218b61948c14234f549c438105ae98367ef6b727ad185f17ad69a6965c044bb857c585b84d72ef4c5fb46962974eed7 +a628031217a0b1330b497351758cf72d90fb87d8bdf542ea32092e14ff32d5ef4ca700653794bb78514d4b0edfd7a8d7 +ab4e977141be639a78eb9ed17366f9642f9335873aca87cce2bae0dddc161621d0e23264a54a7395ae706d748c690ee9 +b1538c4edff59bcf5668557d994bac77d508c757e382512c4368c1ded4242a41f6200b73fe8809fb528a7a0c1fc96feb +965caabe5590e2ff8c9f1048bbdda2817e7a2847e287944bfab40d94cb48389441ac42ff3a7b559760bfab42ff82e1e0 +a64b7484d22c4b8047c7a8ef54dc88cb8d110c61ef28ba853821b61e87d318b2b4226f7f0d1f3cdf086a0e1666d0212c +8915ab7e41d974eef9a651b01c2521392e8899e6ab91c22aeee61605c78fb2b052399ba1d03473aa9cfb52d1a8ba4257 +8dd26875d4a1716db2f75a621d01e971983267770e2da92399aecf08f74af1f7e73643ac6f0a9b610eda54e5460f70ed +83dabcb84c9cbce67e1a24ecbfa4473766b9519588b22288edbaa29aca34cefd9884f7310e7771f8f7a7cbced2e7eea0 +956be00c67987fb4971afca261065a7f6fcef9fb6b1fcb1939f664bbc5b704223253ebfda48565624a68fb249742c2cf +a374824a24db1ab298bee759cee8d8260e0ac92cd1c196f896600fd57484a9f9be1912ded01203976ac4fab66c0e5091 +a225f2ed0de4e06c500876e68e0c58be49535885378584a1442aae2140c38d3ca35c1bc41936a3baf8a78e7ab516f790 +8e79c8de591a6c70e2ef2de35971888ab0ca6fd926fdb6e845fb4b63eb3831c5839f084201b951984f6d66a214b946b8 +91babc849a9e67ab40192342c3d0d6ce58798101cb85c9bd7fc0ac4509ffc17b5ea19e58045cf1ca09ec0dee0e18c8f9 +8b4897fc2aef5bbe0fa3c3015ca09fc9414fdb2315f54dbecc03b9ae3099be6c0767b636b007a804d8b248c56e670713 +8f63ba42e7459ea191a8ad18de0b90b151d5acbf4751e2c790e7d8328e82c20de518132d6290ff3c23d2601f21c1558e +a1a035dc9b936587a16665ea25646d0bb2322f81960d9b6468c3234c9137f7c2b1e4f0b9dbe59e290a418007b0e7a138 +81c4904c08f7bb2ac7b6d4ac4577f10dd98c318f35aac92fc31bab05eceb80a0556a7fc82614b8d95357af8a9c85a829 +8c40e44e5e8e65f61e0a01f79057e1cb29966cc5074de790ea9c60454b25d7ea2b04c3e5decb9f27f02a7f3d3cb7014f +ad8709e357094076eb1eb601539b7bcc37247a25fbc6ada5f74bb88b1b371917c2a733522190f076c44e9b8e2ae127fb +92d43cd82c943fd71b8700977244436c696df808c34d4633f0624700a3445f3ecc15b426c850f9fb60b9aa4708f2c7c0 +b2cb8080697d1524a6dcb640b25e7255ae2e560613dbd27beaa8c5fc5c8d2524b7e6edd6db7ad0bb8a4e2e2735d4a6f7 +971ca6393d9e312bfb5c33955f0325f34946d341ff7077151f0bcafd2e6cbd23e2ad62979454f107edc6a756a443e888 +b6a563f42866afcee0df6c6c2961c800c851aa962d04543541a3cedeb3a6a2a608c1d8391cf405428cd40254e59138f3 +986bd17bad9a8596f372a0185f7f9e0fb8de587cd078ae40f3cd1048305ba00954aff886b18d0d04640b718ea1f0d5a3 +ae32dbccfb7be8e9165f4e663b26f57c407f96750e0f3a5e8e27a7c0ca36bc89e925f64ddd116263be90ace4a27872c4 +83725445ec8916c7c2dd46899241a03cf23568ac63ae2d34de3bce6d2db0bc1cfd00055d850b644a059fb26c62ed3585 +a83f7e61c05b1c6797a36ad5ded01bf857a838147f088d33eb19a5f7652b88e55734e8e884d1d1103a50d4393dfcd7a8 +aa010b4ec76260d88855347df9eaf036911d5d178302063d6fd7ecad009e353162177f92240fe5a239acd1704d188a9d +a88f4ba3cf4aff68ec1e3ded24622d4f1b9812350f6670d2909ea59928eb1d2e8d66935634d218aeac6d1a0fc6cae893 +b819112b310b8372be40b2752c6f08426ef154b53ef2814ae7d67d58586d7023ffa29d6427a044a3b288e0c779866791 +b5d1e728de5daf68e63b0bb1dee5275edae203e53614edeeeefff0f2f7ac4281191a33b7811de83b7f68111361ef42e1 +953fb3ddc6f78045e53eaacfd83c5c769d32608b29391e05612e4e75725e54e82ad4960fbef96da8b2f35ba862968a3e +936471136fb2c1b3bb986a5207a225a8bf3b206a1a9db54dc3029e408e78c95bfb7539b67006d269c09df6354d7254ac +ac353364b413cae799b13d7dc6fa09c322b47e60b9333e06499155e22d913929b92a45a0ad04ba90b29358f7b792d864 +a0177419ead02ba3f0755a32eee3fd23ec81a13c01eab462f3b0af1e2dba42f81b47b2c8b1a90d8cec5a0afa371b7f11 +b009eeb5db80d4244c130e6e3280af120917bb6fcebac73255c09f3f0c9da3b2aa718cd92d3d40e6b50737dbd23461aa +b8a43426c3746c1a5445535338c6a10b65474b684a2c81cd2f4b8ebecc91a57e2e0687df4a40add015cd12e351bbb3eb +94ff3698a6ac6e7df222675a00279c0ea42925dc6b748e3e74a62ea5d1e3fd70d5ab2d0c20b83704d389dd3a6063cf1a +90e4142e7ce15266144153e21b9893d3e14b3b4d980e5c87ce615ecd27efac87d86fa90354307857f75d7ebaeffe79ef +a5fd82c3f509ec9a36d72ba204a16f905e1e329f75cfd18aaa14fb00a212d21f3fac17e1a8e3bc5691ab0d07f8ec3cd0 +962e6bfd75ea554f304a5fee1123e5bf2e048ccd3b401716b34c52740384579188ac98bc0d91269fc814de23f4b2dd34 +b50b4e45c180badf9cd842cd769f78f963e077a9a4c016098dc19b18210580ad271ae1ba86de7760dd2e1f299c13f6a0 +84cf08858d08eca6acc86158ffda3fbe920d1d5c04ac6f1fc677760e46e66599df697397373959acf319c31e47db115c +a697a38ba21caa66b7739ed0e74fe762a3da02144b67971fcad28c1132d7b83e0ac062cc71479f99e2219086d7d23374 +ad1f6d01dd7f0de814fe5fbb6f08c1190ff37f4a50754d7b6291fc547c0820506ea629aabacf749fec9c1bbfda22d2d0 +b11fd7f8c120d8a370a223a1adc053a31bef7454b5522b848dec82de5482308fc68fdaf479875b7a4bc3fc94e1ea30eb +93ecf90ebfc190f30086bcaeee18cda972073a8469cf42a3b19f8c1ec5419dff2d6a5cc8ef412ccd9725b0f0a5f38f88 +911f25aaa5260b56b3009fa5e1346a29f13a085cf8a61b36b2d851791f7bcf8456840eccbfc23797b63ecd312e2d5e12 +a52f17a8b2db66c98291020b1db44ab23827e1790e418e078d1316185df6aa9f78292f43a12cd47131bd4b521d134060 +9646fca10bf7401e91d9a49753c72f3ecb142f5ed13aba2c510a6c5ccb8d07b8e8d1581fc81321ad5e3996b6d81b5538 +aa1da4a5665b91b62dda7f71bb19c8e3f6f49cc079d94fcd07b3604a74547e8334efa5a202822d0078158056bbda2822 +a2432ae5feeaf38252c28aa491e92a68b47d5b4c6f44c1b3d7f3abc2f10b588f64a23c3357e742a0f5e4f216e7ca5827 +83c7b47735cd0ef80658a387f34f259940096ebb9464c67919b278db4109fea294d09ea01a371b79b332cff6777c116d +a740a2959e86e413c62d6bdd1bc27efe9596ee363c2460535eab89ba1715e808b658bd9581b894b5d5997132b0c9c85c +b76947237fa9d71c3bece0b4f7119d7f94d2162d0ced52f2eac4de92b41da5b72ad332db9f31ebb2df1c02f400a76481 +a20e1f2b7e9cc1443226d2b1a29696f627c83836116d64d2a5559d08b67e7e4efa9a849f5bb93a0dadb62450f5a9eaab +b44bff680fba52443a5b3bd25f69c5640006d544fca1d3dc11482ee8e03b4463aae59d1ec9d200aa6711ce72350580fb +a9490f5643bacec7e5adcda849ab3e7ff1f89026bf7597980b13a09887376f243158d0035e9d24fdee7cb6500e53ef29 +96081010b82c04ad0bfc3605df622db27c10a91494685ef2e6e1839c218b91cbb56e043e9a25c7b18c5ddee7c6769517 +a9522d59bcf887cbbbc130d8de3ff29a86df5d9343a918f5e52c65a28e4c33f6106ac4b48ecd849a33d39eeb2319d85b +aa5e0cea1a1db2283783788b4d77c09829563b75c503c154fdaa2247c9149918edac7737ef58c079e02dca7d8397b0eb +8c03f064e777d0c07c4f04c713a86bf581cc85155afe40e9065ead15139b47a50ead5c87ac032f01b142d63ff849758a +a34d672bf33def02ee7a63e6d6519676c052fa65ca91ed0fe5fdd785c231ba7af19f1e990fc33f5d1d17e75f6af270be +8680443393e8ac45a0b07c30a82ac18e67dcc8f20254bd5ede7bf99fc03e6123f2fcd64c0ca62f69d240f23acd777482 +a4e00ab43d8ae5b13a6190f8ef5395ec17fbac4aa7dfa25b33e81b7e7bf63a4c28910b3a7dc9204dbc4168b08575a75e +8249259066ee5672b422c1889ab5ed620bddd1297f70b4197c40bb736afba05d513b91d3a82ee030336c311d952cd60c +a0651d8cf34fa971bde1ec037158a229e8e9ad4b5ca6c4a41adedb6d306a7772634f703dcfac36f9daf17289f33c23fb +b02ff6e8abff19969e265395ceaf465f43e7f1c3c9cfc91f1748042d9c352b284e49515a58078c877a37ff6915ee8bf4 +927fb7351ac28254458a1a2ea7388e1fbd831fbc2feedb230818f73cc8c505b7ff61e150898ce1567fcb0d2c40881c7b +a9d3861f72090bc61382a81286bb71af93cdeefab9a83b3c59537ad21810104e0e054859eeafa13be10f8027b6fc33b8 +a523306656730b1a31b9a370c45224b08baf45773d62952a0bf7d6c4684898ae78914cfafbd3e21406407cc39e12afdc +947a090e7703a3ea303a4a09b3ab6b6d3fda72912c9f42cc37627557028b4667f5398a6d64b9281fa2efbe16f6c61ed6 +b41d24d40c10239c85d5b9bf1a3886d514a7a06b31ca982ea983e37162293350b12428eabc9f6a460473ad811e61ba40 +b0bb9805724f4ca860e687985c0dc6b8f9017fe71147e5383cfbbbdcb2a42c93c7062ba42acdead9d992b6f48fc1d5ac +aec775aa97a78851893d3c5c209a91267f1daf4205bfb719c44a9ed2614d71854b95bb523cd04a7f818a4a70aa27d8fc +b53e52e32ca90b38987610585ad5b77ecd584bd22c55af7d7c9edf5fbcae9c9241b55200b51eaed0fbdb6f7be356368f +a2c5ac7822c2529f0201717b4922fb30fb037540ab222c97f0cdac341d09ccb1415e7908288fabef60177c0643ed21bf +92162fda0cbd1dafbed9419ae0837e470451403231ee086b49a21d20de2e3eed7ce64382153272b02cf099106688af70 +8452d5df66682396718a76f219a9333a3559231e5f7f109a1f25c1970eb7c3408a5e32a479357f148af63b7a1d352451 +831ea95d4feb520994bc4904017a557797e7ad455a431d94de03b873a57b24b127fcc9ff5b97c255c6c8d8e18c5c7e12 +93d451d5e0885ccdbb113a267c31701e7c3d9e823d735dc9dfd6cfdcd82767012dc71396af53d3bedd2e0d9210acf57f +a2126f75a768dcc7ebddf2452aebf20ad790c844442b78e4027c0b511a054c27efb987550fcab877c46f2c7be4883ae0 +aa4d2dcba2ccfc11a002639c30af6beb35e33745ecbab0627cf0f200fdae580e42d5a8569a9c971044405dfdafed4887 +ab13616069ef71d308e8bf6724e13737dc98b06a8f2d2631284429787d25d43c04b584793256ed358234e7cd9ad37d1f +9115ee0edc9f96a10edcafeb9771c74321106e7f74e48652df96e7ca5592a2f448659939291ff613dd41f42170b600ad +97b10a37243dc897ccc143da8c27e53ccc31f68220bffd344835729942bb5905ae16f71ccaed29ca189432d1c2cc09b1 +875cf9c71ae29c3bde8cdcb9af5c7aca468fbb9243718f2b946e49314221a664959140c1ebc8622e4ed0ba81526302fd +86b193afbb7ff135ce5fc7eb0ee838a22e04806ceec7e02b3fb010e938fff733fc8e3a1d4b6cba970852d6307018b738 +b3403a94f1483edce5d688e5ed4ab67933430ede39cd57e2cddb4b469479018757d37dd2687f7182b202967da12a6c16 +83edfa0a6f77974c4047b03d7930e10251e939624afa2dcafbd35a9523c6bf684e1bb7915fc2e5b3ded3e6dc78daacf2 +88ff3375fe33942e6d534f76ed0f1dfa35ae1d62c97c84e85f884da76092a83ecd08454096c83c3c67fac4cd966673d7 +af0726a2a92ee12a9411db66333c347e1a634c0ab8709cc0eab5043a2f4afac08a7ae3a15ce37f5042548c6764ae4cf6 +81cfa33bb702e2f26169a006af0af0dcaa849cec2faf0f4784a06aa3c232d85a85b8123d49a1555cca7498d65e0317e4 +910a16526176b6e01eb8fb2033ffbb8c9b48be6e65f4c52c582909681805b3d9e1c28e3b421be9b9829b32175b8d4d80 +93d23befa411ca1adbdba726f762f2403e1cc740e44c9af3e895962e4047c2782ca7f2f9878512c37afd5a5a0abbd259 +82fcf316027fedfe235905588b7651b41e703836f96cb7ac313b23b4e6c134bff39cd10b3bddb7458d418d2b9b3c471b +8febc47c5752c513c4e5573428ad0bb40e15a5e12dbfa4c1ef29453f0588f0b75c3591075fef698e5abcf4d50c818a27 +83dab521d58b976dcea1576a8e2808dfaea9fa3e545902d0e0ce184d02dca8245d549134a238ab757950ad8bc11f56eb +898cfb9bf83c1c424eca817e8d0b99f5e482865070167adab0ecf04f3deeb3c71363b9f155c67b84d5e286c28238bef8 +b845e388cc1a8e8b72a24d48219ac4fd7868ee5e30960f7074b27dada842aa206889122acfce9e28512038547b428225 +b1ce4720e07e6eecc2a652f9edbad6bd5d787fbaff2a72a5ca33fa5a054dd3b4d5952563bc6db6d1ce1757a578bba480 +8db6990dd10741cf5de36e47726d76a12ebe2235fdcb8957ab26dba9466e6707d4a795d4e12ec7400d961bd564bdee7e +a3ca7afd20e16c2a45f73fc36357763847ed0be11cb05bfd9722f92c7ba3fa708cf10d4e0ae726c3eccae23cc55fd2be +8701b085c45b36f3afb589207bbf245ef4c5c82aa967ecd0c334daa1f5a54093c5e0fcacd09be540801920f49766aa0f +84e3736727ba76191d9a6a2a3796f55bb3c3a8bbb6e41f58e892ea282c90530b53ab5490bbf1a066723399bb132160fb +87c02a01917333c7b8866f6b717b1e727b279894108f70574d1b6e9e8dc978eda8778342baf3c6464d6e0dd507163e76 +b8da532dac81fafaed759e99c3ae011d75f3fda67a8c420c3b9747281fe32e31ac3c81e539940286440704c2c3e3b53e +a0cc63c3bef75a5c02942977a68a88cd3d103d829b6c0f070f64206da7e3638f10f42452788092de8fbbc626ce17b0d4 +b5c9317b3f6b1d7ee6871506c0430cdf73e28b02c001ba6ca11061c7e121c91152d2b80c4f80e1d8f51ff5653bc0db5b +b798fb572da977dd3ef2dce64042b012a470d6bd2cb61a16267abe2b8399f74540d7c70462a6b2278d73567447e31994 +b868eda58739effda68c834745cd2cf66a09f0f215607b65685bb5ca3eba71150f43a6e47b81a0c19fb58eeae3da56e8 +9041c93a7e8f2c34812fd6e9744b154e898e1ef69db72bf36242c71e2c251f3db7e86cbd802da603a92cd0b06b62ea63 +a834d648e974230582fc17b3a449f4f65b3297038a3a5401e975b9b60ff79b2006a33e1486d3428106580276993311e1 +a3ce874da6ade9f0f854d7ae7651fc3ff63cec748a847527539fe0d67e6c99eaa3011065a4627c2192af7f9569f7ab57 +ae78ad16de150cc0400d3b6b424c608cd2b2d01a7a38ea9c4e504d8463c0af09613774dbefdd5198415b29904e0fbb63 +b966db5a961067e743212d564595ef534e71dcd79b690a5a2c642d787059fc7959b9039b650372461a1f52910f7e857b +8069904f360af3edfd6cabd9b7f2adf5b61bd7feb0e9a040dc15c2a9d20052c3e5e0158f3065ec3200d19b91db603b71 +9600917dbcd80a47f81c02c3aafecfcef77f031bf612a0f1a8bdef09de9656f4bb0f8e3e95f72ece1c22bd2824f145b6 +834a0767b7b6199496c1faee0e3580c233cc0763e71eebc5d7c112a5a5e5bd95c0cf76a32ea5bb1b74f3cf00fbd2cfb4 +99469a893579ed5da7d34ec228854c4666c58115d3cae86d4fc2d03d38f10d8c5dc8fb693763a96ab6be2045cc8d518b +a52cc0aecda6594de57d8ca13b146e77212cc55854929c03f2a8a6cdfa46296791c336aebcc2610d98612d5b4c0452df +97864434d55aa8a7aad0415d36f9558ce6e6c00452923db68a1e738232d0cb2d47e3b0b8f340c709112838adeaee4695 +a4a7f2c45db3661b6af7ec759f9455ba043b0de6fd4787e3372cba215b9f7c641d5d817a0576e7aa28a46349d2fe0ae6 +864e857652d95e1d168c1b9c294777fc9251a4d5b4b00a346b1f1c9c898af9a9b5ec0ac1f3a66f18a370b721dbd77b23 +ab8eac458fa8e7eb5539da3964ccd297a216448c3af4e4af0dcfed0ce29e877a85e29b9601dc7508a060b97a05f37e15 +a6fd0782c5629c824fcd89ac80e81d95b97d8374c82010a1c69f30cef16ffc0f19e5da2d0648d2a36a636071cb4b69a7 +ad35a75fd8832643989d51d94ee6462d729e15f6444ffdf340dfb222af5d2b6b52e5df86082dbc7728fde7c1f28ac6b4 +8e06831cc8a0c34245732ea610ea6aae6d02950299aa071a1b3df43b474e5baee815648784718b63acfd02a6655e8ea7 +994ac097f913a4ce2a65236339fe523888ee43494499c5abf4ac3bce3e4b090f45d9abd750f4142a9f8f800a0115488c +a3e6a8e5e924f3a4f93e43f3f5aafb8b5831ce8169cddde7296c319d8964a0b6322a0aa69e1da1778fcc24b7de9d8b93 +81a9bd04f4c6e75517de4b5e2713f746bd7f3f78a81a2d95adc87ba0e266d1f5e89c9cfb04b5159c1ff813f7968a27a4 +b24de8f3a5b480981c6f29607b257ded665ecd8db73e2a69a32fcf44e926fdc7e6610598e10081cf270d2f879414b1ab +adc1b3f8ed1e7d5a26b0959ffe5afc19e235028b94cb7f364f6e57b6bf7f04742986f923fae9bf3802d163d4d0ebc519 +a9fa5092b6dd0b4e1a338a06900b790abbc25e2f867b9fb319fdcdfb58600315a45a49584c614f0f9f8b844aa59dd785 +b29c06b92b14215e7ef4120562893351ae8bf97cc5c3d64f4ecd0eb365b0e464cf27beec3f3ddac17ed5e725706b6343 +adc0d532ba4c1c033da92ba31aa83c64054de79508d06ee335dcab5cabae204a05e427f6f8c2a556870a8230b4115fd0 +9737150d439e6db2471d51e006891d9687593af4e38ee8e38bfa626abcefa768ca22d39133f865d0a25b8bbf7443d7db +a10d1e6a760f54d26c923c773b963534e5c2c0826c0a7462db2ea2c34d82890f9c58f0150db00aa2679aa0fdb1afcb08 +816947dc6c08ee779e9c2229d73dbfd42c2b3b6749b98ec76dbad017f4b4d4f77b5916600b576691978287208c025d6f +a2dc52b6056219d999f07b11869c254e8b3977113fd9ba1a7f322377a5d20e16c2adf46efb7d8149e94989b3f063334a +8153900aae9cf48ebc7438b75c16f5478960ef9170e251708f0c2457967b7b31521c889b5fe843d2694a07c0e804fa48 +a9e9d8d66c8774972cc1686809ce1fa5f0e16997ef2178b49bcd8654541b5b6e234cb55188f071477ba1cebcf770da45 +b1fa775f9b2a9b05b4b1f0d6ad5635c7d7f4d3af8abaa01e28d32b62684f9921197ba040777711836bc78429bf339977 +b1afbbd522b30e1ae2adf9a22993ab28b72a86a3d68d67b1833115e513632db075d047e21dfe442d6facc7b0a1b856bf +8779b7d22f42845a06ae31ac434e0044f5f9b4e704847fb93943e118e642a8b21265505ad9d6e418405d0cb529e00691 +ab2c6cef1c4e7c410e9e8deb74c84bedeb3c454ae98e3bc228eb13f6b7081b57977b3e849ba66346250e37c86842c10c +908d6c781d7d96aa2048c83e865896c720a66fdec7b06ab4b172192fe82f9ff6167815ffb66549d72bfb540bb35c36c6 +b790440f205ece489e2703d5d1d01ba8921dd237c8814afb5cb521515ed4c3b0a6df45fd4bd65ba63592c2fe1d008df3 +aec346251f9c78336b388c4e9069a1c6c3afbbb6bfaffdad050a9e70e92fb3cae3609067b4903552936f904c804b0ea6 +a0e528cc2cb84b04cc91b4084e53ead4188682a6050b3857c34280899c8233aa8c1a9c6fa4fd6a7087acf1b36d67734a +aa8d7632be3e4340712a1461a0ad0ae90ba6d76e2916511c263f484c6c426939fa93ffbb702cd0341eea404d6ddffebb +a4ea871d8a1d4b925d890aefb9897847599b92e15ce14886b27ce5c879daa9edead26e02ccc33fcf37f40ff0783d4d9e +ab63e4dc0dbdaf2ada03b3733aafe17e719d028b30dc9a7e5783c80933a39935dbe1ef0320bb03f9564cafdf7a4b029b +8219761bbaa39b96b835f9c2b4cec0bf53801f8e4f4a4498d19638be2fa0a193b2c1fbf94e26c1058d90a9ac145a7a12 +a609ee5561828b0f634640c68a98da47cb872b714df7302ef6b24d253211e770acd0aa888802cd378e7fa036d829cd36 +90793ff0736f3c80b5e0c5098b56cda8b0b2bca5032bb153d7b3aa3def277f2fc6cea60ac03edc82e3a9d06aff7d1c56 +8760085283a479d15a72429971a0a5b885609fd61787a40adb3d3d7c139b97497aa6bcb11b08979e2354f1bc4dbf7a0d +b168ede8b9a528c60666057f746530fc52327546872dd03c8903f827d02c8313e58c38791fb46e154d4247ea4b859473 +842c1149ca212736ebe7b6b2cb9a7c3b81ae893393c20a2f1a8c8bfef16d0a473ff865a1c130d90cc3626045f9088100 +b41d0e2c7d55108a8526aa0b951a5c8d7e3734e22fe0a6a2dd25361a5d6dea45c4ab4a71440b582a2f9337940238fe20 +8380bd49677e61123506dd482cdf76a8f1877ea54ed023d1deabfc05846103cfd213de2aef331cdf1baf69cfc6767be9 +a026f92030666b723d937f507e5a40e3f3cfd414ad4b2712db0a7a245a31a46002504974ed8ba9d8e714f37353926a4e +b492e9e9917b29eb04cde0b012df15cbd04f3963d120b63c55dc4369e04f5ac7682b2c7dff8c03410936c26ca73ad34c +81fd9271b4ee36be0ba8f560d191e1b6616dd53c56d1d8deac8c1be7bc67bbc53d434cf70d04e7fa9de3e63415389693 +835c3711abe85683d2344a3ee5f70e68342fd1aec025ad248efe66aab3e3d5790fad2f45bae0d7a53a80998fde45f0aa +b46599be80b8f7dbad0b17808dd5ca91d787929c0bef96fbbcf6c767727d07ed6785bad164d733ecb015eb6c8469a16d +b36bf5c17271d39f5ccb3d82a5e002957207a0cdf9ae7108a4946e6f3ed21a5d353fa940b6fe949c39422b452339bae9 +a12f5444e602d6fb8be51a08b8bc4ec105dfd759d2afe98d51ff4edd673c92e4fc91ff32417ae8070e12169004f8aad3 +892ce3ca0a2961a01f7f0149b8a98fdc0f8871c2d85e76daf7c8aed2a18624b978a4d0a84213f81f9d2a81f7ca4826d0 +b1e6229ebd5b3d85e62d0474d1fed34564f1b5b9c5856fae36164dd0eff378d67d6717dda77536379006fb462bced9da +ac852921dcb81e54e1e315fd6226219932f7b785c2ceb2035710e814899784d7001101f1515d68e3fb74cdbb4baf9e26 +989a42d851123d708a213f3a02cfc926df15af058ec9b5a9df968fe16decbd781b5e65a4c17fbfedd2ac17126084700f +b1d0fc2f7c948e466445f307da7b64b3070057c79c07c7ebbbe6f8ed300a642b3567aed2e5f28988ac566ba62e0d2a79 +83057263b41775bc29f1d59868a05b0f76d3bdf8a13c1014496feb4c0ee379bfd0d4079785252f51fbeb641e47a89b69 +ac9e6a208aa9c557155cf82b389bb4227db5ac4b22a0c7c8d1c3d98946df8b82b0c49d093ba55c8255e024a6d67c14b4 +8294a11cd3f5111b1f8bd135be23b4de337ac45711db9566ebf6e162cd58e7859b1309eba8149b0f0a43e07f62a92411 +8c15f3388b196603c05adec195c1d2cc589e3466da3169e9afd37157fa55cd34bfafbfc5ff10ac0e04aa6a0d0b2ce3db +b8faf8ba89c3115576ab6b340f6cc09edfea8f7331f5a5e8003960c584e839fcecf401113dfbb9a5c11a13721b35c263 +955c63b1166514c02847402d0e92dccfe3c0dee3bc70d2375669afb061594c85651e6569f471a6969759e5f373277da4 +963bd4f9ae7361d6936d209592a07d9a22cc9ef330cf0c5cb845cb4085d76e114aee66d7599bf5b9f11c6b1c05dade8d +85509b3c97e06e0db113b8b40022c8989a305cec39acab36ba3a73a4b4719573e5bdb82dc4795699c26d983465cd61b0 +b870cfd7f691f88db8d1dfbe809b7b402eabd3b3299606b7dfdb7ef49415411f01d2a7e4f7ebd919ac82c7094f628166 +a5533e7b58a6a9e5c25589134f501584163551247d36f50666eeb0a0745cf33e65bb8f7a9c2dc7fe7cb392414f1ece4a +b93d1ade01ff5678fcd5b5b4f06a32b706213748076cae3a375e20a97231133ec37c1c3202cbc4896b66c3410210f446 +86ed3a58000a46fe2c37d4de515430a57d8f54ab4300294685534372fed1d68e192dd43d43ea190accf3dc9b22e1548b +a8c7d8dc30057bb8ad66b9cfda5e223334407730aeb0f51705922c18e7a07d960c470d463d1781899203e1b1ed1df484 +8d86821d006e957e8544f95a98b110c89941bcc6985562e7a97285f5826b35b690963b2c141ff3f389d92ee18ec76d24 +a4e1108cd3cf01810e74dbbf94340487011b80013b9bfdc04f019188c0d4d077a54b71a3f97a036601aad42a268531e8 +a822cd61db07f64bea00de226102f5fc0adf8fa9f05a6c7478b0ff93e48f6cc3191302d22e1f369b571877d5eb96139c +b1ad4094d0bb4c325dfe072b17711962247dd7ff7e4bce4612e80a6f3c1bde04880ba1682f60d5f1451318afd4d3ba60 +88e7beb0cfd7361288ea27f6b2cb18870e621152ff47994440c18d45284d21bad80d9806ed7d9d392a5cd791d5150ce2 +aad3724a176cf4476595cdfb9e2c3261c37052324c0b5373a30b6cbeb481bccd303720840c49a84ddca916d470eb6929 +a57983370d159e7078a273746fb22468000a6448b1a31d277272e35c6f548f97928e9015f1daf577511bd9cfee165237 +a54136e9db381cdd6dfb3fff8bdec427d4dc1072f914f6fecfec13d7b8f95bb3b5f30ad7677288c008ce134edfb039a7 +a25dfc4019f165db552f769f9c8e94fa7dbbf5c54a9b7cde76629cc08808c1039ecbd916560c2b6307696dd9db87d030 +a917d25328b0754d70f36e795fe928e71ae77e93166c5e4788716c1ef431115c966f2aad0ce016f4bacc2649f7466647 +842ce5e4ad7d8d4b8c58430e97ff40a9fce1f1c65ecba75fed2e215e101d1b2d7ab32c18df38dab722c329ab724e8866 +a8eb2ed2986ff937a26a72699eb3b87ef88119179719ff1335f53094c690020123f27e44fc6b09f7a3874bf739b97629 +96753c1f9c226f626122dad6981e9810a3cf3bbee15cfc88e617cfd42753e34593610861be147a7b8966bcdec55bba8d +94119d31606098f5b129931b51b4b42c4e3513a128b9bfb03cfeee78b77b9909b1c2fcf0a292e49d63bc4e5fe823dfef +a869654f5880d9c21a0af1ff4cfa926e03ec1f2d80fe5524605e04f484e09dc80d6769249f31fd378ff3926ab4cebc69 +b2a539bdd8de4499c5f35cd8824974c2abb1933b3f50d0175dd044563ca829eaa0fc47bdac97eafa98434d1cd05d7c5d +85f53b2bfcde1986ce7279f3a2f5f841f87d75af5d197c897f261d4874bc6868c575ecf7556a32b7b33f7b2795454591 +964f087ed02228b30f401d8aea35c1a7f76698e4075e1bb343398be74c716884e9ca1a31b81566e1ff7513cf76a2f0cd +a1c9d9c9bfbc9c4e281a2953d5991e7b22ff1a32ddaace9e8d9a42e080efb802b853d3276973b5189a5745943c9b4389 +b0c45a9852663a427d7f50c608a6419fbd00f90e8452757a45269d25c0386ec29942f48a34aafc0187ef6020e581d290 +aa3ca7b01862d5d2aea714fa06724b7dda7062b6608605cb712588b2c49fc3c7d89a8799e6e7c31e7a9ef28b1ad4d1f7 +88f5e98ae8c5ae7add42f6d358a35667e590aa80e1869593cbf597d7ee466efa35b429f1836ba2199d8280fe7f60ce3a +8a3bff472e8008f7e50362acc1a0b53c09ac60430942544532722e938470376f0672662261992146765b7c75a380c318 +b9847be7f7aee7532282c279dde928698a892a183ca3047ceda521e9e0a50d96fd3ce59f8e58f31af49508ade6d4ba51 +98065dc23ea3df6d9f8459e81887d88d5752b7e7ba6050ec5c3f0dce93e463e0bf12be3c94ec74c16e2f7ba62e447845 +994aff677b97ee790894dbdb21b1f9210734e008cee2aa2200c8f2579ea650b872f39776a13a8c31e95cc817091bae1c +b292811674e18912ebe79df1af4a132b04ab702c125c039e0213f735f658fafd36c38e5bbd7cad35842576431f5f3630 +96520d750ec10bb10f75019f8f0e4a93ecbc6b678a710d76cd10aa27a6642ad1461bd58fc2aab8e0391b3f788339ed29 +80d478da7fe246ad0e81a00141229e9d91ffb7fd1b29975c8ec358ed5e864e481bf01b927a9ba002c5ec4aa226d0cb57 +ae58049d93a11ae845dc5be2505e95657f83b95d83ff3591a3c565d587157be795ff4481f42d59eda95e6d523444e199 +85f1f5ad988b9f8a7e24b6d6a22b9de9fb3fe408f95711389c444d7ba2243987225b04318aa97a4cde2cb4c30c05508f +922092d0cb828e764ce62f86cbc55c04dce07233cff041888fae48cfe93818780b4aec9b4ff4718275bb2bfa6bd9e9ba +a85ba97125feff0590a05fb78f19a7338639ad1748802918af4d59307bc994536c0ad638b97b9acd26a08b6b4370dfbf +8c46fcaa8d13266d650bd9366180e5ebbfa002c339e4424a030de19ed922e2daa9a353ae54921a42299607ae53feb075 +b8549832230eb1ec6ee3c33c078deb47f556a0907d2a85fde7720391c82d2ed63dd753cf544a6a0a46eed4b8d1ecd9b8 +b7b96f24504c7f8fbed9c1c654a2550feeee068407b809c43f1082c9558c8665806d911d5d244308169d8a531373bf56 +81c483fd9d9ad7af7869d617ac592e7e951e39738da041d8c4110637689108eb29c8acadfc85366c70885cdf77b353c3 +acf33bcfd9080dfdba828727fe36803327a94e8a3ee5b6e445274f0e8267ad3c943994a8dd6d09b8072912b57e1e25b8 +b3475e7456ff96861bc11068198d51b69b899f5ff13022694b501d3adc8bac58a16204b12011d61e880c8459f4badbbb +8ceb9562026aa96d6e786ec2e5cd49200b5b424349a2214cd3ff5c8f1c2bf1b9872480428f5428e45cc61106cbfbd953 +af56f7e482c24a1367fd798201a20c464848ece431f2d8a31a6ef4f9bdbaa50991e748dcb4ef0c08fdac0ef8ddda3b80 +896dae8b12549909d512fd5c02a2f72dde4086aef6c8007ddb26bb04dff51a707ae94ff87e45191fc10339967fa28958 +8ed1c606840e07a2ac6ff16ac6e81ed3e1c90872ababfe68d56ed2dc50d9294579b9c3546dc63292874299a3162d59f9 +b4d7a5c0836e419a46942281ce77d0aade8e39eb1bf1190dd274ca5070898a1c02ad9d165855629d6e1c96df1a6bd5f3 +aebad8939ac117deb28b789d9846c2c80359dc260920ac8408dbae0b6228dbf496dac0023a3b4302bb9a53e8ada18e61 +812d07c74a8650dc3f318c9b2dbf265f181041fb432fee989cedabd44b933dc6590e36c71dcf9dbe7b4bbf74ea0d7c50 +87b131dd3489889e090839c392231e0ee198acac65bb2e9e63e7d6da322391d1685cfc8ba60699308054c4b0fd89c90c +8b12110ece0b99b2e653b4bc840a12bce5b85abf6fb953a2b23483b15b732a0068824f25fcaa100900e742886c7b4a0d +8765fc9b526a98512e5264c877bdca567e32fdcde95cdbcf4f4c88ce8501e1c7fab755f80b87b9b32d86d18856f1d005 +ac806a32a14019337dfdb5f781ecba5cdea8fb69b23e0e57a0f885e0082a9c330ba808621a48e24316604f6c6c550991 +a711970fa40cf067c73e3edee9a111bf00cd927112205e9d36a21897529be9a051be45c336d6b56725dca3aeea0aed15 +908adbc17fc18821f217d46c25656de811d4473779a41eacd70d2a0d7dd3010de4268a562378814e619e13ac594bb0c3 +894251b79be5ae763f44853f6999289b3a9abda64d52797c6c7d6d31ff2a79e9b3906da72f9ebb95b61d6b29479e076f +aadcf11ea15bcb6d979c3ea320cff8dfcc23c5118ed075f35e77f71459b2141253060e3a90839adbcd3d040ad3bdc5e2 +b4e55d7d2eeaaffb0267448ecce0b75166e4805dc0e261eb5634d4a3f3c08964a597302fd8f6b45ec48178619291dadc +a8e2a02c93d6bec7f42f9265269660b4b404940c3e3de9515b4d826ea7e71f18c6f90a71ce3fbe452d0713de73cb391e +8e2467accfe207cb1ba37d60662920f95338ee212927edb706228c25345734217740159310edf17687f58b333754cb65 +90376b88f653381b3bab673c48c2b84fa82a091e18f710a732fef836e0d39043fcd5527aa97a3a385c0a77cf53746993 +b16530e289198c235ab680f86851bcc177f0c16a58483d83a89213077b06d6840600b03834b6b7af0e22b1914f72de43 +8c4fc3854f938ef1c2b5df065e4e75e9f299798afae8205706439491bdf9784c756134922e77af007e349a790afa52b7 +a68aaec4341d29b92b35322f89b1ae3612e7b440c89a86135a07c261dc5799217a651460c92113d099b486817226d8cd +a653f965feefd2df24156478f0cf3755274ca395afb79e8c72d3b6e1d1f5ba7f3e4f9a4c5ee85355de6f3c81935ff579 +aaf6c8d2717b57f6b14e06c742a11a3bc736bfc0327ca4b8a005b6e924f06871141d231737698a9a59286e44f244a168 +8de32e3c104b4278e27aac695d224f134001c3619f15186466c57c0c46f67e2efe537501d0d9f52f4cdbc724a170b92d +8e9b5858b6d4ffe811f6498bd80e454f0d6b345d4729c946626c7cdc196c803a349a14515296aadb7258bb7a5b37e930 +82fc711043aaf1d7a9c712d00eafd816a710f82eb10818ba6af09f591447f36814dbff6e6a1cb2b5c7f16c73930dbbca +b2f0205327fc8ff687f751e7b97788732afaef4fcf51bb17fd7579ed07501915790b70fc36624371fe4fb87a0179d850 +add87d5b1288d30f3449d3ccfa11cba4dc7756d85cee1cb6171b493680a625a01f273d0bb8e6332d0410250036b3acdd +a411f75ef7dd8de8062331ea40929db989e4d65ae8f33d3fa6cc19c98fa8a8ec2b7c7534a5c5eee9e5051626a6a2e47c +89d40a647781e7f2e8ab3a0f7dc7133669944c0cf627376433687a2ea15c137be26f582a6b07ff94b266ac0910009f7c +b2b5f808c26b40ed507922ed119b0fb95e0d6d8b084bbbba58ca456b4354d03110c99989b93207998334ea5d1b70fe49 +8c8db028671969a1e80e595283ce5e678ee955d785043bb5fd39fdb68a00e4c15b462600a7ab1f41486b6883e725894e +958087ce0c75fe77b71770c2f645ef3360c1a9c98637693b988c5f6ce731f72b24ab8b734e8eb6258ee8b23914451f0d +aad6c00df131c1eec6c556bae642e6dcc031e70f63eee18682f711c7b2fcd9afbf1f18cf8a4af562759130add67bd4a3 +b6d23c567291f019cd9008e727704e7e6679b274feb29abba0d92e036f349b1f0fa8c5271ec7384e8d70a2c3977b1f8a +a942c770e903d4150b5684e4b94bb72d0e171df2c7cae6f46e002c41c6b04d774ac6e2753ba8dccdbba3ad1e297a9ae5 +aa542d1849390f86d797408ed7f6a31504aa65d583481a00e475028af20f8b69248a87a8ffab1dace0377db77fe5f9b2 +a1ed3f9564a97f7cabe7c67e018eaeaa42db73a2f3d2332041ca9a7bea57436d848784d6dc402862c22a47f0692b1286 +925c757750c91db8b1b3c220fcbdd80742b4a060abfb0a402071d215c780ef6b420132ec5a43043b9fd7a06bf1b323db +94e575daa7fa0bbb35b4386f510fc3877c9df57bcf15349c5923f30ad6a8df95372835cc078216b41a7192921c1e8973 +9346a41174865d9ab31c7fb9a5329f322bfce06002386d3f5a2e2193de9bfff12bd0bd93307928f7b85e1097b2aaddff +a6e54c9324baa1bff7e9bf39c94fdd308ec6f210aad937112ec727565f8a6141375c04196831873bf506294854f6a20e +98d47b662504f400f1a0e14e24b43829490d022ade02a56288aaf148d466b45d89b5fc146cef67c9ba548cd37ad5e354 +ab690dd59a69904b6b3a4d5a42d17ea4898d9b00c6753aec216d5d4ea564f9a1642697df44d5a62f2c2ab19aaabf1532 +8d0aa8d3c5ec944af49beb99e403cc0d6d1adc6003b960075358a4ff1cbfa02a83d6cb4d848d9e83b34882446a330883 +af9334b7300780c752f32eaa68f3dcecd07dc50d265083f37f9800b02c2595ba24dab89f5fc27c1ecfdbf5291b4d77bc +81c4a6aaf7d4ccee9925c512dae5da6d916a6dd59f7a4cc79d216a91201b4d300114a309e3ddb3291bb95f85bec2a8ea +8c804e810c0785789de26e12b1beff56a163769733be7a31f34f81093782d6410293768a166c9191ef8636fc8724a31e +a91222b48de238f6dfe79c84080cee618611bd0bdca15cfe44474829e42481f8511a82589e69964e19f8cba04e3f5f3f +b26a8885aa594b0c8ad4a1711d80bcf687df996442075dd1497db1b446d16c74e28bc6f0e92b2ecea9c3e15c9c7e828a +85940f45d324ad1d335bd1d7d6f81758f52213e63d5770d9fe0c0c9507d5550795e538b6a2dd463f73d789b5ce377aed +931a277c78082f416880620df3aeb6d0bff2103d19679dd092ea981f5323e438c50a0d094908034ff8a2cb47b1a44108 +88dd85e4e2aa349a757b98661fc00d4538ec1d3f53daf44b16ffcf7f943dd4f2bba5b8ba3b05c529251dfeed73f6f1e9 +b7fd7182cd33639710b8216c54a11bb02e199bbc54fe33492a809dbe17771a685d6238ea3ebcfc75e3b0d4ea5369bc9f +85d77194d910f8cdad7330e1bca9087529a40fece17492f1d17cc4790833891b6d01d24f036b6422175c732b438faeb5 +9845265892d672d9517fbd22f88be4f225711b4abafa8327cc059f000656e4737188506051565d97912a0c19c3d063c0 +90a81987aa841c7f640c298b816643a0ae00cd3609c3a31d0b01245283cc785d9bb27763131b31a4f21aeda4e50073e8 +8b1256eb41a600bda8a06ac08b98a220ebfd52f89a0e4fdce32425db7a0481e9b7873ba3b7a24ad9fb782ee217dfdbf6 +870548998deed85c59507cec7e69cc001c279bb2a99c45a4d030a35c107e69feb76afecb9e435e67965051d6d7a88220 +b1504d194a0dd8df48d431ce991f89d7a0f72f573d21bd5bb46474c5005e43820877a44e62db555f194427ac8a4b9168 +a00d7423ec2cf0c9e9da07f3dae092d09e1ff4be852e07e531aa54d62ad937bfb52c8bf44683ac3a70f6dfc125575da1 +8019625ad3d218018803aacc2efcedba3a41c24aca8c5aab2005556e58fdf2ed614831277df7937aa594e97a2fc65e7d +8595596284f3add0155ecfee3fc0b66a6b6fc7923d82ca8302952e2ed906d119a1c053aed1123b51f73e1d30d93aba57 +a8ba033f5e7d06177e9ae2d99c40ed4e99e14e1c1b61795997f62e21ed8af1531c4720f23d6a39b0f75c6cd91c58c700 +a94f4167c0f6ae214bae75dd92c63299dd954b00b0d8b0416b8af929fe5aec6a259e44f83a183412d7ba4eb3a49728c0 +a73ee3c3a0fd2a369e0a279c3e214fb662d0378eea3c95cfb91412d7213a1f05958bd0de8f2a4f80f9f80d7eef943b41 +8ef6f3e241f6a761c9ab412629a49648c08b70b837c2cd8bea620bc93056ec73754e3e11f0df50f8e9fa67a9867501a9 +80b473ac4ba8cb82b4ae684206cde124d10fcf619f55a6c90d035981e1b08b9e141b4e5fa9a9af0b7f0c281b355dd593 +a566e2be0b41f01978dfffbb32f442b5e6706f5b9901110e645cf390f6a82869e3ca16887ffa35782a004d251d29c26e +a74e01eefa03546d00afdd24bf17015eee95d36de28c03c9b055e062cd5e8d8f20473c6d7ad21c94f9058fc5e84f9628 +acefc74de146911275dfd19bbe43d72729e89e96da04aff58e5fcb90962856c0b24eb13f43e30329f5477a1b65ae9400 +b5f113ef36e75de6d6d44130f38e460ad3ffc65cb9a5606828c4f7617981fecf76f5e862d7626ccb117aa757cc3c3e52 +96d3aeb1d3a66b136244062b891fc7f93ce745b776478d361a375ae57bdba9b4fcb257becbae228c1a3aff4a1c4fb5e2 +ab26c4a110877e5495b674569a32025dad599637b5dafedcfe32f205dfa68cd46f3ddf4f132a8e5765883b5c83214a07 +922a7a738066692193af32ccbab74edef067668ce3253e18a3275afcd5a6df7168deb2f5175c5fb413dc08fdaef63b17 +a47542f8e4a3a35ef6049280d1a9442c920887d5f1a1483149e143ca412318495a36decb804f81c9f5a7672a14965a4c +8fde57991e72a2aebd3376b4d9fdd795943ba3833431e52b136683567e6ee2cc1c1847dc49dc9534983060c54bf22f7e +addb041f01a99e7238ab2f9f2f94579861d0470b93b91cfb29f3a2e4c82386c868b2cfb6f3778b8a9cf908788acafe58 +a8c4e1df726431c43703739776e2cc51f5ebac57051244991baf53582538120133a44ca603d0722a4b5193e1be3c5ec0 +846379125968d1154376c5dc63100bdcd99b9403d182e3566fe48d79099099f51523cd81d21f0d1dcd622b715bdd851a +b828bf0d936d275abb40e3d73ef57fcd7ce97e9af35e194ae61463317bac6c1b0c3e4b40afe08a1061037bb7149108fc +abd07c71754973e698fa26c5019afd9551548f8369e2249b9902513f19a097057ee7065a1d88912e8f52e6e0fbfa6d82 +a9e36b6fcc9a3cc98e76d5751c76c50e1f92b7670f8076ab6ca8a30de4ec14c34669e049fd39bd293cde8789b1ca67f0 +8c060835496a04c7b51790790035862b20547e62fa8bb4e8857fb36891ec6309520af5c0f45d5ea46e3d228747d710a4 +8cc472ec62b8dce244373f40a821db585628989b6a7c4d394edffbc6346c8be455f4528d528fff41f91f2c875bd9fc0f +b4a75571f84f93451f15b3a86479063d7324d2789b6d2f2f4f8af68c66fac32743dc09b51df29608d62aaba78f6904af +916484984743b5ac16d40d0544faf9184819d92f779254b7fb892eb68cefbe59e75be8a6336a585e120f6ccae0a1eeac +b906ae585a73119764024e9eb87d92e53ee0c673474fec43fec4d344a3bbf471ce3976d25e37d197604689bbc944f1ab +8552708487305f16f95db3e01fbbfb969398f5b6d116844cbb000c9befd03f15c767584bf9541a42141949a4dc787a3a +a6025a2773f78c247f78c0d895ade8a6baa76e5499085f6175935d98a05fc41c1359f7843e0c6c323f1be256c45f45e6 +96dac695dd9288aeb6e32dce50e51ddf1fbd41de6146e3605c7a81f2253b17babf2bfda4f5a9d0c28352b9746c0dfa2c +a215b21f8eb2290f9d308278f2859a999eb3a31f4888f84a65f9ed05e1151c17777f91054d4d0de759ac5c3547d91929 +8fd7c9a279e9b619acf927d501b35dc551979731a89eab91d38b2356c0d73569baddacb9d1096d20a75c917ecaedadd6 +b985e8baa5195e2f1ea1091122d55aa321178d597f87b732b23eccb12b891638be1a992305a1ffcf5233af34339fa02c +ae1a9604b7f569aa48d2daa1889e76d3d103065fc8c3deb9ae127a6d94145695cab3bef640fa781612e8082c6d616c47 +a8fc67f9069f753360349eb874fa4dcadb2ec48d97c61abe568faee5f370ec3c87786c7faf0f73fc0ae7181a36eb89ca +a506d13acc3a9f80509fac936aef848cd30698631fff6130ed6217512ed9527d075f653cf6ef91f68e48a24c903eeb3a +a415093755cc012863043bf586b970bafdd87653ad14d1929672e04949bae4a753d16aa3eb5bd1afe3df3691b80f240f +ace3b792a1960580348b6fae8513149242378a18382741bbc2fb2f785cb8bf87550da4b5e0df2955970ab3a31f99f5d7 +a47d7fa7522664c8f9c404c18102f6f13a1db33ba8b0a56faa31a78a3decba3168c68f410115c5d9f240b3dc046dc9b4 +a9c930db3ea948cd2dd6ea9d0f9a465a5018bbaf6e9958013f151f89a3040cc03ae0b8eaf74b0ff96b4e7a6cd8aa5b4f +88abd235e3e760166cdedff4be82cf6ba02d68f51c6d53b1de326769f1f635215890f9a4c35b06dd16a9b93f30f3a471 +8f8d7b2fcdb70bfedde1ffd7f0b94108f0fa432f6ae81097988521dd2c4da928c10c5da3c7f33f11bd5331f2da8ec219 +b7abdbd48cece30d8f795a58a94913d76842cb006892485a9382a0502826538ca4ff951cc1ef4493e45de8571360d20d +b3e7b125f350c52695f7c5ec4a30916ea6c11744f1151a18ea0510e6cf6ed6f6dba4beaa4ca56988d306bd80ec360056 +9a004423c95e1f1714f98fb97ab798d6ab16cb5f6d6cad860635585d4d4b43ffcda63d8e931351189275e5a2cef28c2f +a8eab6ef917cacdc9b1932eb312309e1f85298d63e55ed9c89ab79da99d3eb60f1643d16be920e82d9285f60c7f7cab3 +934df955485113d10c4dde476ec14a98771145aadf3c8b61af26b09b9948757fa1abcc945ac91466a18c18c2fdce40d0 +99ed9146561597cff8add2196ff3a0f161dd5302685ceb846afca6efb5225f642e8f4a0970eecb01cdf18694fa697095 +b37062dd12a81267bbbf89bc9d6e30784c0e11e713cc49c6c96440f800f2a6a2a7e7f6c7f6c9eed4bc3c8890f2787342 +83a3d70055b6044e0207b3ece4da849755ab5798317b36b20c3555a392c27982f811e1c5007697554eeedc737b37f3ef +a85392c07ff8658935fbc52acec7221cd916c5fde8537a8444eefd507220e76f600350ae8f5dc3353911087b88b91045 +b1ea23558ad805dde9cc1eade995cd8e7f46d9afa230908b5fbaaa09f48547f49c2bd277bff8ab176f1c240beedd2b09 +8a16a48b9105d94700e8e5706b8d8a1ed14cffda5558a596974ea3191c5c3449da6e7efe2059e7baf4530a15f175ce16 +ac5fa54381fc565842417558e131df26e9505027759416165035357816a7e1859a7c14c228c79b4e5ba2ef6758e12ad8 +8475e290c399cc9322c05264a516cf766bf5fdb6b9dec7283961da0b99012d499b244b33fc0eaf94b461ab777f2a9537 +a7922f3c70e6857652805af7d435646c66d94eec174be997c4fe973d8f019990c4f757eeb730b2cfdf8154e6e97f7d5b +b90deb797fba3150cf265a23ea6bd49a382855cd4efe171cbcb1664683a9f1687cfcadfdca4e39cd971ec13aa5cdc296 +91ca761dd9659007d2fe8970bbd336c19ed0d2845d0d8aaab397116affcc793de2da73d89e6625cf4dae5983cceffa56 +9121ae9b60323ab1301e97555bcc74ddba0f5b1e62bfe9eaa2c239e1d685c4a614d397b32a59febed4db9968db44f38a +8477b07da4bbfe9087975f30d2c2333fccfcd7149f90e0e6fabecee627eee3ea324df31cf6a680393f5dedf68a35c9de +946a9c0f02fa6bf9f9d4933e7fc691749f4ac2f82a9b880666b5185189d4f3432da9096d0ea4d6baacbc079e19c887ce +b24663332914ea519435874d4c42d11842ea84dd3dc55292d5b0f27f64587848d095bacaec235a37003bdb5185daa6f2 +b980f46f84ac21dea75b4650f9412f6123325842758589a9b47caa68545905061f03fcad23cc102e2ce8ffeb1ae634a8 +90e9ebb060182d3043ea4210a2d934858559522a19eab9f0ff81a367484a05ec7cce78ee6a91dfff96145869db6a4e80 +b04228a009c91847693eab29c9ea71d1d6ba07060bc2b0b3bb81c46a125baecb3e1412f6ce4305076a97d316d14e4665 +8d3268370dbf38d378c7228c7b54e91f90f43cbfddc0d8468de11a4312616ca6372619209b89114152b16f334f4d2780 +964a63ffae653e0249685e227d937937b079ec3da9c977dad2b2e052af5eb560ce7d175941f2ae0df90e3d0a20b77e75 +855604c2910be885b14b27896e16d8dc339236b975398c771d29ac74e4278a2305fcf85203050a8faffddf64ea19cf78 +8e0b1d61a4349411eec77cf3490555843187a25a93e1f45bf66ad3982b9cc141b07805f8cb252b0fcc125e0052a7c450 +a03bc9588f971a1257cd0cfd2ca406c76aaeb634001864b0e4dda91e009d3361b33fc39f34922835031a423a13619a82 +b703fa855c2c4e1641d2687717fe8c5061acab71cd2dab55cdb069a6865464c3080f7936ddfd320516b6791b36c64b8c +aad1cfa7295e463fc3d5374ea4b952020010d67a77c7a86fe2c351a5959cd50df6a0045ad588257567a99bfd0e9400b3 +97906fb82abf5c1d9be8f72add8e6f175a6a5a4300b40295cb5ec8527cc7ec700fa03a7a494122d9605d212457452e41 +a83366cf93ad9a07f617e4002a10b624270f60083559b045ab5a805aaa592ac37b90c1e8b5437158f3bd942cf33bb633 +a585168e157e111bfa329d0ed6651a96509b20b30f6bb0691c6a5875d134d4a284867ab52511cdc19e360d10638e58a1 +b17d480a0b39f2487b7f3878714658fda82f2147c5ecbccd4004eb92d267c4663b42c93bafb95ce24e2f2f0a9ea14b8f +9362297a1a3951d92db4fd8ea6b48c403d6d8d2f7e7b6310b9cf9b4e4ba9e84cfe1ae025830aab9466c32fd659144474 +b1a62fbadfd4ea4909d8d0714c1e3ee9f95237fde20720f88d5ad25c274a6792158b99966d7b93151f769c832b6a132b +8d9af736949a33fe929548abe72384281365385862821a584f5198eed63bc5388f89fc574cda35a9eaabed0d336b86b6 +90ee2235f4ec2c6089b5cb7b8a41c9bc39e4a57935022ef28bed490e2ab12680922af7395bda4f708809e2bfc62192c9 +91f3a123d420bca34d3d751119bbebc435630c6605fb59a8d80d16a4895972e56cfe4cf1998e0a527c18ee38c2796617 +a2c4fbb20e7fbaae103b86ca9d8dbc2828e6bf33d1d7ce153bd98e8880fe7ac62abbf7059194b1eee64f4526a36c63a9 +91a7f93310ac74f385f11509f4bea9a4d74f2ce91cf2024fee32a4a44d5e636a73339c6b4027ee4d014a24b90de41ecb +914a6d405fee0a15e99704efb93fd240105572335f418d95e1f2de9afeb97f5f4b80aaf20bd5bf150b9da9abc2b6d6a5 +9462cf2c7e57e224389269b9fdddc593b31e1b72ab5389346aa9759fad5d218039a4a5bc496f4bf7982481bc0086292a +b7596132d972e15dc24f2cd0cf55ee4a6cc3f5a0e66dff33021a95e5a742889e811afd1dc0cd465cee6336ad96f25162 +99409bba2548f4ece04751308f815ecee71222869d8548fa142788fb19df5366d093a5131e57560237471bbd5279bbe5 +8e7560988a844b5b844ad460b19c452a5a04346d8c51ca20d3b144a3670ecc60c064b2415c2eeebf140d6ae4ba5c5360 +8cd9e18d311e178e00eb81ca839cfaa8e64e50a197de8461f07135fca28c1d895dd9c2401b923a4175ff711853497317 +91ebf99c95e8f653402b3079ecbd533ed7cd3b6c857a710142354ce8330cebdee7cf0fd0400417883b66055bec9d0552 +a9d0cf8cc6bbdc44426dcb716df667826426b4559056d73738bf3eaa6df373403861b6bbd6fa0454b1d2730e3b0015c4 +928320b452ef21d2443dee360110550f531d7a4275b2cb227814150f3e9e360e05a884d6e3bc4415f202120ea5ac333e +b9551f2b2e7bb984618f2e7467e33b5b5303b8707f503f2e696e49c2990ea760c31e0944d52257c7a38b553a67cf621c +b2ec34126fe61345e5c6361fe55b8fb3218cdcc9103bba5b200252d50b758153cd549226b7aabedd265906401e755190 +a8cf814926082a96a921d471036a9919a58e68d02ee671c215ea304759cd92a7c2c9ccebdd5e9ec5572164ad2abb22ad +8c0563c28c261bbe9a1ec4986f8b277324bf05b4fe5e2b79a862168e646bbea50ce7c4622b2aa7ca899c1a728c226d24 +b558cdc334ea894d3a13347ea9e30f78a0a20621903d6c009c54feceba3ba81d2445a43572e088ae691f65489702e963 +a62ba0b20f46c367cfd409beb300e39f1a6cd5be95e63457b6ad3cb66374aed754fd037b8e4215d651a7d8e1a442f762 +8543e2c6135df471bd7a5c09f1313674c7f6847cb88f15eabf40b2bc9535d0ec606725b97103334a0c162a20d9f5bb53 +8c0367d7058d63b425450f8ee9252e64234c0c2e61878c7c2d4b17bab22a72f40c75ac3bf8b64f264c00d9c5963af041 +acb7207445993d563f1b6e7b179bbd6e87044399f80e6d15980acf7aaccb9d85071fecb22250afb3aba850712fbda240 +b93725e66184bb03f0ab4078c737a7fb2b10294a3a09995958de3dcf5316b476ce9b5cd8d180017196d9482abdfcab88 +afcb52bb7b8f45a945299da6fc6a877ba9f69f7f23d5f94b5f5d9a04c3cf3089333bbd50fc305e3907825003da73b9f6 +961de781cb238cef52d43bc0dc7d8e3a75bca4c27ab37a2e9353137a9aa9403444a5841b595adeca75a3de5485ab97f6 +9408c828d3ed6df40cc167d72ca9882a9c9cf8e765d6f9125e02e0d66ee0ac94f449803afb50bf1b92176feae92473d6 +a85480591e7e033b9087fd0efe5cf3c88c10a75de4a5d7da4443df1cc1fa1aa59b6cde3ce7453fcabe555495e49ef6f7 +a2611bd82344bc5d70d7e6cf3f0d25866b9f709ac4bf6f75d1006da2a11e2cd07a4c0ac71505e5062a04f71db7a3063b +ac466aaa96febb5b810ba350c7a874797ce4bd6c9585f6b9d114d646894a67c9af9526ade4f7ec834d3a69e18ab643af +b73fc98a79fe77cdbc524c76a09cb9f2d5f8b0a5508846bed1ba5ea9ae3bb62120e01d3b8fb544d90ac9ae0c3d4ccefe +aed333c3403adc899a870082f70aadc770c9f880dc057f05a46d7400be9d893354121a0a31e5475898f437bf722eefcf +97f02133c72187178a8c48db26031f0b2c0317a6648d2be5f7450f00c37391cec935bea46b8144ec9fea5327ee959f27 +940b582b41f1d0f09f0c5f51bab471e4eb143e91b1e96dde83e94650421d51f9c9baec10cc802fb83cd63b56d0b907c0 +b1286a55a74a88a75da47671994916be428be1ca3f42783e497d6478eaa6aca69d50a421b210e9ed3283d578b651b8cf +97cd4e87e21c71d11f1df1c0b6518c00e1610661be4b13cdbdbb026d60fc3f4a2b8549326a648b3fdecb7de8f6aa9fb7 +8f36bbcccee986c35328633bf6ee8f70b5dbf42d0f677c0f4e009d2289976e512af6af91a6ddcd87dc0df93bc4ecd02d +9253ad44ad182e67ab574d718733a69c05cd5bcc43e6292ef0519a9430460aa6a233fe26269da7298ea88cf406e733c0 +b616b5ea74db0dcf8f10a2db79df6ec3566c06410f68a933eff150194608c591b2b175908d4b4ccaef1018b0fefc5693 +80a712ba89394381cbb83fedcaae914cc4f21ab024b8da8a7bbad7762a22f82940451427b1a3f5d84c246d5ba0c7ccc7 +a806909a5517a970879143ad789c6cb6256b82553b649f6865cdafbbc050b1f86528241b3cb600e784186e1a672b588f +b6ae801d1f0e4adf3ce57659d7c61f94abd3c8d1635ad28133a79eff0586fc48bdc195615335449e9bfee39e8a955eb2 +b8a000561211844bef72adf3413f3b438a8789fcddf6676402ca6a1c2c63b9deed322030de2ae3a0aeb3cedbb89406c3 +8bc3615b28e33fc24a7c989f8b4f719c914c4c65b35ad3d4cf15e2196e37c62e42ca34e8b1275e0f32589b969bdfc21b +b2f9637f370a79e7591e5056dac004f56b375f33645ae9f5a192cc6b7b6b3d8a1105cc00f10d8bc8ef250ecc2ac63c39 +b51899978b9c5b737999fee1935a5b0944261e7005bea411b5903d2c16ea045a3b0bcd69395b6733752caed43bc4e343 +873c71a01009dddb9885c48658f83aa6320e74bc152e09de8b631c763c2b4e2e8cbac921418a0d9085ff5c53a2b52d39 +96470f48efd7d2ac2daea8753ef097c09c6fc128a54cc7ef758ff07e32c0b0ac7d122f97b53e88a29cc26874dfee5e0d +8dd2decbd3504b7961d65edb8d51b96377f4edd2e0d2cd8a4d98333f373c79a8d7ca8f8408718d0e7b5e48255857c339 +b536ae387bdd0f6e40850c71fcaecb1051b2c8f7bf5cf92c6bda030de72a03e9212d00390c53a72a08e9fb2bff1249c0 +b1566076f59064e3545adef74fd1acadc1bee0ae23543c30caf9e1ad1fc20ebe84ee25004c612525b26857253f5345b7 +afd180e25444cb720342923b8897d38a6537bc33a0ca1fc9c6e4d524b280193618f19e2bcfbd07606b78b734fe6114ed +89b2a6c8811e5a6d07aa74c79dd854bdfc292cc104b525bc37e4c7c1f9485e19d759c8e27cd7cd73c46346f56ce3b189 +8234196e196898b2501b79d0dc016f6df3d5878952cdb8a93735e4ce2ecf77d07924c701e084533a20f0c50a7d1ee376 +adea7ce2efc77711f50138691ef1a2b946aaba08e7e3b21378708dd5a10bae933ed121e71834b43b14e2ea30a7b306e8 +a566d406a35fae703b3d1ea1791d9207116002e5ee008d01e053a1ea4fe5af2feb63605b011ae6a14414028aa054b861 +b83bbb063682386456719179b6f6bbc8cf6f791229600b7d402167737492f99437b45886695b26a28731e952e56f1ee1 +a8f5fffc2c335d3ad5c7593e81f0862351413cc348392afa86d50921dabb929a5a1de20d604666af9e17a13bbc30bc3b +8d5dcdc1335f01847f6ef650ff64b26e7c4cecb934a7bbce11254e8ced9fa9e4fc87eec55248f69bf499180101c63f5a +83fec30b8bc62f9fc28301a03ef18158d6364738f1c42de311bbfba2e62b25d4c9ea9d6097698b24c84fff956a6748b9 +96394fbe0c2d03cdaa56e13326aeb62344238ad3043ee2fb4f18ebf0a6f7f090f410032a2d15bfbeca9449202d59f2a0 +94880f5928fe71a797362a37d05849d23e118742697f75bc87173a777e7b9d4383b8796a8a2bbee27fb781f363301dfe +af229535896ab86fdf6d2ae676a0dbf44f868f6c7f17bd9a65567631c7aa2e29758f41de050ca5311bd1528bcc811532 +8d4fa4968575b483b3ac16345e7f1ea3f81e8dad72c945a48b7b982054fe1030584be2f89b2f53af84d2490cda551b84 +8052aeb115e4d242078c8726d376a13156cc832705243f14adaa3ef3889e1f2fcdfd46e087acab6fa85a74afde5f5eef +a1349c8a22788a1937a837fceecfaada9e93a63e582a09c56b53da52c9db1600254dc85f63f5eadfa30b89b31dcbdb30 +a10178cdb263ff1a5e0cc034b6deaa160d00c3c3fe1fd1ff0c55fdf1ecb83d771070c10930f88832b75fef39a10024ea +938b17e4405934ea5ef29c2187d6787c5ff5d8c9a02665efb453117d462dbc50ef2c202cbc884305cd807a70b5cc177b +84f01f0da6b58c71788616be71fb3c259ceea7f8bd131a5661c5c03d0205feaff6dac2915919347b0559c381477b3d89 +98787f0a2fac2b04bb7aa247ac77236bbe690aae64203e553be328a2c3bffb772e7a0244e585d27558cc64b089a5ee11 +a14501d8b6b3a84b13b9006d521667e8d168f642ebf154c4e90ec8c75d11985fd0c9d86fc2efa6c7077dafecfdf0ab13 +8215dee75eed04de83a3e910129bee8c48ce01cf1317ea477ff35c09a6f9e9771a8b05aa79e6b0f3e71b9874695e7a2a +85763c3072c7400a2c5668ef5cc53e6f4b8dff474146028a8be370ca9d8af9bf9ee10cd7d23d33eb6d6e257dd3af38d6 +91bf62245c5a59d514d39bfb74db7f72ca7160c1c5d5be3844fff37e53e99d451e18a6747c65e33f98f48a55f38962c6 +8c68817c6a6ea348d9aedce99929371c440fbad72718c2d239ffcaebb26ecc8a4e8c38c2819d945fdb7f02ffda70a5e0 +a96ce2745866a22267a49faa7ea00ebf009ea8d0b0ca2c233c62759b9d5514306b5822dd2eee0124c9e28380e2f97aa4 +8b18d5757c73843dcd55f0f0dc894bcd17e0ecf4c9fd901eacd38480844a15b4ce5e9598ccee039f9d93185137630cdb +a5b45c403b6735aaae14389bcee23ca10571f5437f1f5ab0c2b4e573dfd3341c638fff2cc780166af96b118d47ff2299 +ac849a0ccd354dd46bf55ea837d509b4ae3eefcbd5b8eb2582d301fd56c27b89950c6eefdd4e98e608ef4a6b75251311 +89f13ac14bb064e9c6b49a482831ecea6344faec490bd18bb44028b83a0f22e21145861558029bd172ba7c5247c2cba7 +aa57b057a2ac32c101e442c33831630c81b2e061a542e3e1d6897b2b7ca8a7241ef717a548b3f751d60d89be384ba5da +8a43db4e12682b98230364f25c75b49002f5002bd72a1674cf2a9d53197b5ef1b95e48429af98af503b0d5c3e0e017b2 +a10cd7b8e1574d78c4e917cf833d3d845b878e8e8b60312e6a994bd4f391a5e8c38dcd774087b93c9241238f43f80937 +8b61ccb949088286216cd628811df1a362a7f5c333654ce823e63ebd04b069d5b0f627fb6c96d54c7b853de8aab05472 +887b902020ad45f70f2d5bcfa7324fcbe7be09fd2b1bd40f9ae43a89d487986e89867aee0945ea6a0fe8dfd051ffec56 +822fcd260a7876cad31f54987053aab06108de336878b91b7a15d35013d6d4d6de2d4b30397bb6f1d5c1a7b48e9d1ced +80b89ff95d725858b50e84d825ea99fb6a8866f10b91a5d364671ccbb89cb292bada9537c30dbde56b989c8bdc355baa +b53cab156006c3a1766a57dd8013f4563a2e8250995dbeda99c5286a447618e8ac33ebf25704b9245266e009a0712dc5 +b6e2da9c1156e68c15861a05cd572976b21773e60fc5f2f58c93f3e19c73ad6c2ee3239e6cb4654040c8e15df75a505d +8b7e187d473a0bd0b493adcdb91ca07c9310fd915dec46c2c9f36a5144eb7425dd35dfa50feb0e9ef747caed9f199944 +9743ec3917e953e0a420406b53f4daa433adf4ad686207e9f296e7c83d1ffdbf81191b920ba635c85416e580178c16ff +98d1476fd4504a347c5261012298ca69c8593fec91919d37ddfdf84155b6f1c600cd8dbb92b93f3262da16cf40a0b3c6 +94f50d52982a3c81ac47a7b3032dad505b4e556804f8606d63d821f2c1a4830917614630d943642ba375b30409546385 +b5c0eb5f4cf3f719be1a9ad0103349269e8b798dbffe1b5b132370b9de1188a6d71dcbc3635dfdb4b888400f790b6ea4 +b47fb45ec73392598866d27994c2feb0b0f3d7fc54303a2090757a64b6426d183ae41af16794ced349ede98b9b3fd48c +b5f45fd0aee6194dd207e11881694191e7538b830bfe10a9666493ae8b971d65bc72214a4d483de17c2530d24687d666 +a50c149ea189387740d717290064a776e2af277deafcf5f0115bbbdc73c0840d630965a4e0214b738d1cb0d75737e822 +b941afc772043928c62e5dbe5aa563fa29882bff9b5811673f72286ac04fddf9a9ed0f9faf348268fa593a57bc00ba6b +839051a7838937270bdf2f8990fd9aa7d72bfc86cffe0b057aa8eca7393abf16b70d71a6470d877f8ec6771efa5a8f26 +835bc9d049418ab24dd1cbf76ed5811381e2f0b04035f15943327771f574f723b07c2b61a67a6f9ddc1a6a20b01f990d +8935cf5634d6ae7b21c797a7d56675e50f9d50240cb2461056632420f7f466fdcd944a777437dcb3342841ad4c3834bf +b5698fe3da1f9d1e176c9919fddd0d4d7376106774aa23a7a699f631566318d59b74ae8c033eba04d06f8cdcb4edbbed +ad11421ba75d74c600e220f4bce2ca7eacb28e082b993b4368d91218e7b96029acfbdf15a2ab0b8133b7c8027b3c785b +886ef813644599051dafdaa65363795cf34a3009933c469bd66a676fdd47fc0d590c401cc2686d1ba61fce0f693426d4 +8858fdf3e98e36d644257ab6076f7956f2e7eacc8530ec1da7f3e9001036cba7a0855fb5011925cdc95a69600de58b2d +b59eca7085a2f6dfeaa6a414b5216ff0160fbea28c0e2ad4f4ffd3d388e1cc2c23a32dbe517648221b75a92500af85e3 +abec62d259bcd65b31892badad4ac8d2088366d9591cd0dab408a9b70ad517db39c2ef5df52348ba4334dce06a4e3ba5 +a9acfe8f5a310779509621ed2946166ffb6168e68ecf6d5a3b2f6008df1728c8fceb811636c50d2e419b642a848a9ca9 +9929bb1a3537362848fac3f1bcb7cfb503dac0a0b1bebbfd6ddf14c9a73731e2248cbaf0fbb16c7d9c40cc6737c3a555 +981d06c7431e6f4654e32f1c5b27e7be89e7c38d59c4e2a872a0f0934cb852c6aeff2d2eaee8302131795590b8913f5e +a6ba9dd43354320f65fd5cdd5446cfa40080bcf3ef4a083a76ad4e6a609b0b088bcf26c4957bfab829dca6064410ca5f +9367ef28def311c79adfd87e617651fcc41ad8caf047d73ce9a1f327e8871e9b35d5b203fd0c0138e32e2ef91e20ba62 +855d1bb508a9036f42116c8bbb830c576189798baee27c7c3477ef1b1fc5d7b0c2c7203457f1eb48d4b029dd6f646be2 +8539a5d0528d3d601083e162b34cb33b5bf6736b4feeeab4941f10eea127c56b7e0b8d57f34b72f8f674d89c10bf302c +a3b71a9a9ac2dfcd681bfd8f6a5d9abf5df6950821705bdfb19db25f80d9b8a89fac7a922541cc681325679c629743d2 +8e95929dfd4e5b56e5a8882aad6b7e783337e39055a228b36022646a13a853d574603de5fed12b6c1f2585621ead7afd +8b05c885575d6894cb67ba737db5915639a6f281bf249480df444ff9f02724e28ed7371ee7ec26d50d25f3966010f763 +90f1a45de0cc0641181d54ee86630b5d182d24e7c30c2615803f16de90ec7c982a00b21f250ccebc2e94ef53a13e77e6 +90f0e97a132092e51a4521c2ecaaa47e4e4f319e67a3cdbd00ed85c2f10dfb69c339bc9498e2abbffcd54b1fdc509a20 +a9995234520cab9d1bdec1897b0b67571b718d5021c0fcf913140206b50ab515273b5f8a77e88fe96f718c80dd9be048 +aebc6495d54d0e45a3c74388891dbcfab767f574fed0581566415af872dc5b3bd5d808c44f6e1fbdde7aa9ffd260b035 +ae757f8f4b1000a623a7d8e337a50c3681544520683207e09d05e08a6f39384b7aaadf72018e88b401e4a7bb636f6483 +a626a28d5ce144cc0c6a30b90ec2c1412cbbc464ee96ac49035e5b3a37bb3e4ed74e8934c489b4563f2f7db1caf8b2ad +8c994e81dfd7a5c2f9d4425636611d5dd72d0b091a5862f8bec609d0cdd3c423eb95b0c999c48faa5dbb31e510c22b61 +a1c0e59e076b908de760d9becff24883c6eb9f968eac356e719c75cce481f2f7bcb1a41ed983a00c1a3b9369a7ff18f9 +8d7e199044fe2e552bc514668fe8171c3416515f7a5019f239c0384f0ade349e88df26cd30f6b67d02b83bf005d85de8 +80190f2255199be690fb502d02ed159aa568c390a684f7840512efc3d2a62f28a49d5d1928ad99a5f975ad81a245acd5 +889d84cefef33f5714e14d558f41d406072ba66b427bf27918b669c5be46261c3de0139610a2c2eadef8e6508e937bcb +a480a686d5085b854ccf9e261e7f1f2d40d978fc30b62b1a8fa9561127745529405820df21a680ee2258b8cefa5f0201 +ae6243400d416a8c13b80b6637726959ef07b8d9b6aff2bd3bb23aaaf97337c7a6b466c5db617bf2798e01d4ccc68e4d +85e0ff143657e465f3d934ee781de5cbd2bfd24f2fbbe6d65c698cdd93204a845f6ef1fa8941c2578463a06a8a418481 +8f4f8b45f1a9f6c2a711776db70f20149dd6d0e28d125906ba9893c5e74e31c195b0906f04c922c8b556ced7cd3d611d +877b852c33483b25c4cd8da74b6b589d8aa96e217c3c4d813466c77ef83af95a94a47364aa8421f0396ce631ad87d543 +852cb06bc4222ce125287a7a55a79ad0bf55596f26830dd6d79da3c60f80e3ba7b9a9b42b126dcb99d2cb9ce142783ef +810cd64c1dfce85d509eeb57a5c84efafe1d671454ef601a040de8d46fb33bc419577f6a6c404e28ffdfe315ffec558a +b60ff8bc804d101a32079b8ed52285fdbb47fd60c3c15cef17cfe7f6b0567de6b50128b9dbc49a1d9811b62b22c99143 +a9df7068b26a6a58f7a499e67b17d34f2a2e8e5029c6e51e2b4c0d19324fb5cd9734c4c4d5034e1bfc274cd0c74a82d0 +ad93c50802ded1e21217a58b874c074ea52322492d589820691572084d8edaede8c2ce8021c6df8c0060f395f3c25ee8 +a17b98e090f7ef5800477132b436c1fccc1802f34956711bfc176e36890c7df95a108e03f34659142434cbd8aee9dccd +acb14aea5575c293dc0a2b58c5350390801d57e9bcda876d87c56565043ddde1a544a88b48ad0d8ec3d41f690aef801e +88b8e26cbc83faa053fa247e26c95d1bbb77955b336e1b0e41d080633248238de8adc9b98688c98fdfc67e7286bc5be4 +899f69823cf1b2204c8da91bb4f943c04d943137b08b1c46e160919e3378bd22a666a079a66e63d81c05336c742efdd2 +8d7ffbc0b47a32408c9e88676ac4f87683cf37c37d214163ca630aec2d3cc014d88caff35022ff3b6d036eb8343d52a3 +b7760f27db0704a6742855998a0c31333bb34d60ddebc95588e25b72445ae2030427aab088ec023f94563118980f3b74 +ad06ecc0f3745861c266bf93f00b30d41ed89d41e99ab63fedd795c970d3ad40560e57ab7333883a72e5575a059df39c +8687d28b1cbc8aa34a0e5dbdb540a517da9bda36160daaa7801fce99754f5d16eda3bc8e1df6b0722cfb49e177e9bcb6 +a38332c3ebbd7f734c8e6ab23ae9756f47afbf7d1786fe45daebc8d7d005d6d8fd22f5dbd0fa8741e1bfb2014d3f9df7 +b86f84426dee88188be9c5cc10a41599e53b7733ba6f2402392b0ea985effc7525756ca1b7b92041ae323337618b238f +958731a6f1881f652d340832728bc7fadd1acebd8daebd772b5acea634e9f7b7254b76d38a7065ea1b2cdea83b18a54f +adb90bff1f0d7d45b8ba28b536c0e0f7f4dc4b9a0354692ecf29539631d7a57d308db3e438e0f907810234c490b42153 +a5188c775ad76617d3bb6e7f1f3b2449f48b7bb7a84035c316284396529564a227e3b9762a89c7114fa47b3ca7ba418a +a3826ef63c98793a5c8c5d5159e2e00cc85fb5e5124f06421b165de68c9495e93c2f23cd446adf6e6528967aa3ed3909 +80eab97de89f3824ace5565b540b229adcc6ef9d2940e90de185af309234cd8aa4ae9c7ce1b409b3898c8fd10c8c2896 +8824f5acd4c2330c459fdb9ece9313263a8b20419f50f8d49958dc21754c21a77bcf7fbf3e0041f78d8fb667a3342188 +95091cf06911a997a09b643326c2fadbbe302555ab2521db806a762a5f4492636507ca71d7a093840236ac3c096614f7 +a392c81a546196d7e78b61f3ceaadfb2771d09fe43f862c0af65f5e55ce490a0293b9ab754cb5ab03ff642a9a8213a23 +afd76cce1dfa2c9e4af4f840376674f090af37d8c6541824963373f97b9dd1f405c50b2ff56165e1d4dde760e590738a +8fc4f513d3b40c10872603e1c29a4b2cf4c99320962644ce89f69ffb57f844344e1d472b2d43559119bdfb5a2c21749a +9951ca8e13b9a2b4a789e851c04c4f030470772da62f101074ef304612e9653b43b37d2c081b5d0a09196b3a167f5871 +b4f16fc2a113403ab5fc1b6a9afddec77be7406413b70ee126f0e84796168a572940550d61e443e5635591d4b6c46ca9 +8d71452cf39e7345c7298d514b9638a5cbe78af7652f0286d42632c5c6d7953ed284551fb40c77569a7721413cdbf79c +953625b58d52a308cb00ad87c44a3fd936786ada44000d45bb609ea9db6b156a0d0f9475e13ee5e053eaded19a09990a +a0983a3baa278ad5f5de734eb1b65a04f668408994e396fb0b054991ad2e56e27ac522b04fe37c9583b754e344f795b3 +8eaa454257f77a6754b2c1c5ff0036fa5b03e214576fabc657902c737fcbf298b1795b43c5006e18894f951f5f7cd203 +90183fdeae2ce2a295a567fa61b997b1f975d1be7b03d0101728cd707bb2a7111c222588ab22e573518fa1ef03719f54 +8abec7f31f6b897a1d497368a42733a6bd14ffbb8b21d3e49fc4cd3c802da70e8886827c1aea0b18d1b44635f81ec461 +a6d1e6fd24b0878ff264b725662e489451c590b2aadaf357d64210a3701fe763f529826fa6e0555267c1f5ecc2c52c05 +8fe6d2a4ea0d91702cb2a8a1d802f5598f26d892f1a929ff056d2b928821e4b172c1c1c0505aa245813fe67074cf9834 +82a026a408003583036f16268113ca6067ce13e89c6e9af0a760f4b2481851c62fadeeef0d361f51dcd9fa5674ec5750 +a489a574b862d4056091ef630e089c163c16c2f104d95eb79a27ae1e898b26d6c1adc23edc1490f73bb545d3a6e3b348 +939d85148547fc7b9894497841bd4430bc670bb670f0efeac424b529a9aebf2c02ac18a9d1402a12e4e590d623de09f0 +a3ab52cf911a2ba7fb0cd242d7778ec0d4fa382960c9bd5b476bb1cd44ff1430a3871bbbcea0a0db2630c39ee639fd1e +b7629509d8c3a3b88b31f1af137a25c38f536284f11a5bbbe0d05b86a86bc92ebbf70f17c256dc8b0d48374e1985e6f3 +8a8647ff33e0747dd6c6ceddcf7938a542656174a08a31b08337ea49b08d814e75f8363fb51676a2cd2746569e3bc14e +a7a7f8d94d32b7cee00b3ff272d644b8dca86b8da38c726f632c2bcdfa0afb13fd0a9a5685ddaeb6073df4d9cfa3d878 +b7136eea8d05bfee2265b0e9addb4bdf060270894de30d593627891584b9446b363973de334b6105e0495cf8cb98e8f7 +a9fcd33ea59315ad7611a3e87e8d1fd6730c8cbeeaebd254e4d59ed7d92c97670303a2d22e881ab16c58779331837529 +965fd41741a0d898c2f2048945b2aefc49c735228c25deaf17fed82c4d52cf3f8e93b3fb8825ade632dc4940311b1542 +b9f400a2c7ca7da8b36470ee5d26c672b529b98e6582012cbfc2a3c24b72e73f5633de4265c417c0d47c474155a603c6 +85f333b0b1630a688a385f48bf0175cd13ecdd92fa5499494f4ad5aea0ef1b9d180fad8f936018538d842630ff72884c +8da95a735a1a98ed8e563099bd87d13a237dd7ec6880cfac56c6416b001e983a56f3d72dda7f68684bb33e4f64cadd30 +a29b66a2095e1acce751f6aec8dfeae1e5b24187dfedb5d1635ca8deae19b580ef09329a18b3385ebb117cd71671f4dd +b001deeeaf5eaf99ac558c60677b667b9f3d57cf43a2c4d57fd74b125a6da72ea6c9dc81b110655e0df01ca7b8a7a7ed +912e11dfff77c778969836d5029747b494dd81d9f965f8be2c9db9e8b08f53858eface81862c3ee6a9aa10993d0d23f3 +ac166a00e9793cf86753aa002ca274cb6f62328869fe920f5632a69a5d30d8d3ce3f0c5487cb354165763ca41d83495a +b74df519ae1a8faeff2ccd29892886b327c7434360ab5c5355752667069a77d466a48cb57b1950d10b6c47c88b2a8538 +8751679aeffa39da55f2c2a668f7b26fb8258f70c5454b13e2483e3ad452f3ac7cc4fa075783e72b4a121cd69936c176 +ae0cc16848b8bf8fffbb44047d6f1d32b52b19d3551d443a39fb25976a89d1a5d2909a4fc42ee81a98ad09d896bd90a9 +a0c8acd6a2f0d4ab0e0a680fa4a67b076bbbf42b9ec512eb04be05fb2625f6d2ed7b4349eebe61eb9f7bd4f85e9de7fa +85c629ce0deeb75c18a3b1b4e14577b5666cf25453a89d27f1029a2984133a2b8e7766597e2ff9ee26a65649b816b650 +938dbb477840d3ed27f903d09fd9959f6fec443fbc93324bc28300dd29e602bd3861fd29508da0dfdbb0fff7f09c5a6c +a7c76cd4a42ab7904d036fe6637471d9836ad15d0d26a07b1803b7fb8988b8c9edf522e0d337a1852131d0f658565ae7 +838a30260cf341ae0cd7a9df84cbc36354c6bc7b8f50c95d154453c9e8ec5435d5f9b23de2a5d91b55adde3dbdb755b9 +8f870b1f798c0516b679273c583c266c2020b8dea7e68be4b0628b85059d49e5a680709c3d6caabe767a0f03975c4626 +89bad0b6499d671b362ae898fee34ad285aa8c77d33ca1d66e8f85b5d637bbd7ae2145caae7d9f47e94c25e9d16b8c4f +af963d3dd3d983864c54b0ed1429c52b466383f07a1504215bbf998c071a099a3a1deb08d94b54630ac76d1d40cfc3da +b5686de207c3d60d4dcfe6a109c0b2f343ed1eb785941301b827b8c07a8f1311e481a56a4baab88edb3ddc4dace6a66a +95e5978739a3e875e76d927f7c68bdf7ab20966db9fa8859f46a837760dfe529afa9a371a184dfb89d2962c95d5fcf3b +96d2855e20c37ed7bd7f736e11cfba5f61bb78a68303a7ced418c4c29a889a4798c5680be721a46d548d63525637e6b0 +b134bceb776cd5866e911f8e96016704c9a3caeadcabd7c0f37204497d789bc949e41b93e4c2d597e4c924853f1b21e3 +a1949ff397013acde0303e5d64432bf6dd7f01caa03c5fc38e7c8ae705b9d5c2646b4b02d013004e5eb58e344703260c +8036a5f79d8aeb6df4810974cf8dbd0ac778906d2f82b969ac9dcfbe7ece832a7e8aad08a4dc520f7abeb24b1610ae84 +982b6b0af8602a992c389232b525d4239edc3ae6ceea77d7729d1fffc829664dd647ff91c4cb9c7f7c25cea507f03167 +b34c7d24fa56ab6acdb8af5b4fa694a1985a1741cc53a2b0c5833611e8ed6fb3b663a4d9a126bb4a1a469f2072199d66 +8166366fec4ee2b3eda097dc200cdfa0533a742dfbe7082dfa14c1c1ecafc9d9fa71f518476634f29d06430869bd5e02 +86c0251ac00b8200618c8b7ce696d1e88c587f91e38580b2d6ae48a3ef904e0ba1b20b7f432719ca40e7995f2281a696 +afd89f3bc7843a1e45ac961e49c1971114c5238d9e21647804b1852b8f476a89c12d1edfb97fff71445e879d6bfd3b70 +911d8bec4d4c3e73a2c35469b2167569f59705404425bd95440408fb788e122f96e9b1bd695f35c6b090f10135b20cd3 +b3f6350ff7afaa0660f9dddd9559db7f164e89351a743fc695d987c88f89fc29136e3c5eb81963edabf2b6f2057120be +a371229680d1468777862e9c0e864156f9cd7c12ce7313a8de67b7bd34e3d1b6fa45ce891a81f8316f4afcbdecf3b6ca +a6a9a875ef9efe8ba72523e645b5773aa62c4fb41efd23da3fa38105472308b8d293be766342ee0a2f00758825bd3b6a +a840d495a184f4499b944ee08f07193a1e1bb8ab21f8ce7aa51d03bd8643f2bc2616c17b68d3fe7c0fb364136926a166 +b55200ae7d6ebb0b04b748051c5907293184b126cf8a1c2f357e024f1a63220b573e2875df83d9b5e0c6e2ace9300c40 +b1e0870f2e3719f42a48256ee58cc27f613308680f2d3645c0f6db0187042dddcfed0cb545423a1a0b851b3a16146d70 +b43a22ff3f838ad43786dc120b7f89a399ed432c7d3aa4e2062ad4152021b6fa01d41b7698da596d6452570c49a62062 +88b1dc50873564560affaa277b1c9d955aebdcdd4117dab1973306893b0e3f090899210102e7e1eef6f7cdf2f4e0e5db +9223c6246aa320b1b36eb1e28b5f9ccc2977e847850964f9762c7559da9546e508503050e5566ccb67262d570162b7a3 +aeeed21b932752709f43dc0c2c7d27d20263b96a54175dd675677a40a093f02bba80e2e65afe3eb22732a7617bf4ff9d +b47cae580ae84f4e4303db8f684f559382f075ef6e95698b9a629e92b67bf004f64e7cf47e401768fa170c4259efbda1 +849821e1ead81fe2dc49cd59f2bba305578c4ea0e8f4b8ae8fc275a1c4a6192f8819d5b6d7da786c94dfc16aacf3e236 +8c60d9a8baefc72a3d3f9dd2e24cca40fb5ce36b19d075122391d9b371c904a0a15d2196c0f2ac9da3acf188d15b0fe8 +946edfe168bbe5ddb0fa6c2890bb227d8418bfbebe2bafab84909825484f799407b610d8aab6a900c5ff9eb796cdc4bf +ae7bf8ae71de5d7ea644d9541e49da1ec31eca6ff4c3fbec5480d30e07ef2c2046cc0a486af7b3615a6a908846341e99 +b4d31a6f578463c9a5ccde0ea526c95b1981eb79468665395c0e550829abfdfa86689699d57830856e324092a423f231 +93415ad3a732417cca9771b056ed42db7ce50879aca7c6f71883ad297eaf5a37fd4641d44a0b7e28b90c168834141340 +98960617a413a3ba86d8257a7386355a69258943aa71834166bd624ea93b0af06178e86538e237f88fd039eacf7cb04a +881335200a487545e38d5b1ffda3080caf5729e1b980603bcdf9ea652cea7848335b83aeeaa321d3476ae4a8d9073582 +b39e84c14666d51895b7a8341fd8319f9e0a58b2a50fc3d7925cce3037f7c75367b5fb5bf25ff4720c9992cab7b8b9f4 +8ea4bab42ee3f0772d6bd24dff3643d8b61147b46ada374414d8d35c0c340e458e449d31023d96e66decf9c58e30cc34 +a5198f6759a045b6a4ba28e4bc3bb638fad44c5a139064327580e285adf38ea82a7570acebf925e81a39d9025f3a6f2e +80267097e2d27c1b19ecf95d184dcff822d34e03326b9fc139a4f8b75b3f80777bb97a9dd284d9b755f14dd401d63c0e +946f346220bd3b6f733e94b61a1ad0b44e45c356fa6036dde5882d93b5613c98e23b20e91eddc6b3c5acea38085705af +a5f559e110cad99bbcae2d9362434aee7db0f3b6d72311291649dbda3f84c10e9760b66b988db3d30067bf18ae2e5238 +8433b38e5c7b293ef532f8c70cef1ed9be7f31f60d5b532e65df7d2885203be78b7ad78ab3011bc54cd9f64c789bf837 +a5a4c0a9b0e0b6bb912cf6ecd30738b0acc0146d77442449b486c3f32d7e60244f643a5cf9cc6da2de5408d0c5f17691 +a81feb329fb51b72464bddcfcf4e02149d995b548d88c64ba143144ce16b652c9913c8ee948ee837596ec97cc43d8cc9 +88e5a7e93a738d61330425bc21ade88d33d7160d124bf174eb3e12a00283654431036977c4f1a47a1bbbf2ef8449ac89 +ac75ad7c099383069e662bfd3624b92b64b5838246902e167fc31b9411efda89b2c6bbd1d61b9eb7d304faacf438d70b +8583bcd1c7cb9bb4bb6bcff803b0a991912b8403a63c0d997761ff77295ccc357d0292318601a8c61329ab28fed7bb83 +a1f9aa0523f1dff00023a44a6c3a9e4e123be0f6722a1c6682ac3c6047efe9e62f4773daf4767e854e1fcbf8ee7339e2 +85f65ebcf5c7e574174b7c4c4166a9a5368e7986b8c0ef846c2e13b75dea7311a87483503149ebfb3cb839b3ef35c82d +abc55eeb72699031a367b9675a2b91a8434e1f01467660903ced43a0b2a11a85ebdf48f95c13ff67e4e2958065a50ff3 +a4ff77c9b86939a15647499b9412417b984bfb051e5bf27b35392a258a5dac297bbdbcf753a4be6729ffb16be924a2ff +af0d41c15b5172efa801cc85ed101b76844dcd06712d0d21160893235a2dbedd15d187a9b31cf0d0ca6c14de6ab2b707 +92661339199f18e5dd9a210783c1d173a26dfa315bd99a33d6f04bf506c871a2b47745c1909faa209d5e6c5c645124a4 +b35813dafb52df709dfa47982bfb44e1bf704f9f46085b2a0e92511dff90e5597110f614f8915830821fc5ed69ae0083 +934a05aa713fa276a4d47f1a28ef06591e5a9a69293c1651c223174df0af4927fc9cd43d374d89c1b4f7c8dc91abe44b +8f83a0ef05202c0b7170ac96f880135e2256fdf8964dae5aed5dd0f6452a6d8e123321e8c182b3aa6f1f8ab767caa735 +b92db10c21c321cf1349fd34129d7180e5088daf2bbe570de6427299aab68992c011c2e2939a44247396f5427c1d914a +95ce1892d1ce25ef2bc88a23880055a4d829a3b31f3806635fd49bec32cca4e965b129b6dd3e90f7e3a2eb293ffc548d +970cf816ee7501ade36b0b59f87c7e352957f67f1f75bbacd8ed52893f9fc40572c76f49c23db44866af7e34a63cd3f9 +a2fcd08581d3569fff699fd7ed1ede5f98f2b95956ecdf975a29af053d9f4f42600b3616ad6161e958c3ce60139c20a4 +b032688b6cc8a7e63dcb82694f71f087b1ee74c4d5fa27323b1ead3ba21722d7fc49eda765725b5553db5260005049c3 +b0b79e4329f1ad25ef6a603390baf889757cab5af10bfa6953a61f89aaace0442b9ef08e57ba778f1e97bf22f16f0ace +a2e6ac06f8973266cd0df447f82cec16614df65174c756e07f513e2c19aa82c10d8670047860960cfba3c5e4c42768c8 +811e66df0f3721a1ae0293549a0e3cd789f93fb6be2cab8e16015a6d52482af9057b1b75e9456322a5a9e87235e024cd +8744a80b3d9e37da4c50c536007981a4958d7e531cb93916dbf985cdc22f4ff482a5cc4fe50915c049d2de66530f1881 +b20b6e8c7be654c23c8ca440be2c37cf9cc9f4e81feedfd0cd7c56f37eda8f295fe5d415e9bac93d5f0a237edd8bc465 +b33fd84377f31f7819150d464b5eb3ef66e06cb8712665cf0587d61e1b1c121d11cc647f3753bbc18604941c77edbc1f +83acb8a3ec5f477b6d44cd49f9e091bc2bf7c9dfee876cde12075a7db9262314cb66ad2e7557114e0c19373e31c6eff1 +acfe4172327832ee207eb07da9cd37da3b009c776f7a8290529f0249f58da213254baddc7c3074fbaa1d226ba1e52b7c +81911b4dea863424b9d77a981987732382702e0294d8c8e1ec48e89678ecb0e64836b45205a120885fa8f8a3a4b9d4b0 +b11f61b1302579a11077bb2f1f0db371ab943573b261be288dc76172eee8a5102b992a5b526092d160ffd20aac2d4856 +ab491f7f1e002a44944c02537f365e525ebb6d5614bba8e5e8e8bd12064c702a1759571ddbeee592a0ba8b73cfce8810 +89211da3d92aed6b111de001b8b5a9231a1c2d09fb1cd2618ec457b635a6c8590fe119acca42fce76dce791c35b889c7 +a5f076c8f7164bcab8af59021ef97a0afa93d0877e52241c3ff5a9a9f81227a55c119ed6a84d34b196e94ec851ca5ca0 +80d91417d0d6c1adb5a3708165da1d54a83caaff482a4f65abf3fb335cbbc738c74ed19a8c451ca98befdf9b2d8b5f90 +aecba33a67f66401614eec5fa945e763da284edb9dc713bad4ac03972630781a09a3e2a291aac0605a9560c5f3444de5 +8a0aa1320bf5217a049b02ad02a4f892bfd6a3f5b48f472041d12f3aaab8dd197307f144f9de5f9e762c6b4971a121b4 +a4120a569e446fe4129f998e51f09c1cc7b29dc2b353d6f6f05daad1a4ef99acfcbaa4950a58aacf7ee1b3fde0af33d0 +aff71370d58b145758a5f24cf3c0c6667d22a1f950b8137c369fa845a5265cd645b422f24fa95e1cd7db1d68686120b6 +a839f075a8a702809a51fbc94595eab4f269a2e7a027aa1f4fc472e77f586138bf5aa4e5570a560e139eb6cda4cca161 +9484f1caa3e35cda0e3d36e43aff3dd8cf45a5a51fc34aafa3a63ed3543047ba9d6af2a9bc7c201c028499e6b4c41b28 +84ddb374c5c9170903bb3e1054fad071b0a147a9ca2ebe2fdb491ebb2431d53b398872a39cc385f973e38579d8e60158 +acaad8babaeaeb52c5b5a16ae689fa5ae15846f2d1f3596a52371bd8681819603822ee8d32ab8cda1bd5290d601e483f +946b69ca5361b60c3dc31db13669b05e5c0452f3c80e7e185f9667a36f351e9ed83bcb5c6dd2439ecd4490e3a87d260a +99f457221ac40df86f9b4bef0bf8812720b2f7218273a0aab08c4d4d4fb18a0fb0ef6ba9bf7fa53c116cc6f16742e44f +8bc0e812d8b718dbe48ead74a6bc7bac68897d01d097422be04110a25589bacd50d336d2c8b70d0dfde6c1b8bc372dc3 +895d118dae2fb35a4b0de22be0d000ec0f0f317b9494db7c12f10d7db81b6f3eaf6d6f3fdfe952f86ec4143d7469368d +893bf3d7e579e800526bc317438a69590d33759931830daf965cec721baa793ea335e9624a86b84b8fed5effc3e2bbac +a112d30dda88c749ca15d6dc65bcbc7fe838b2d25329d44410a9a96db195c7ce6a6921196a61ba7c9d40efdb101a164d +b88b5340af052fc3b8e1a8cf7532206801e79d878f1fb02b32ac4f8e91b64e0ec9252d808b87c4579de15886a20aaef1 +865f76475bb5da18c6a078c720c7b718e55d310876c98017c30ac31882ae347258b508ec34001918324250241d2df5b7 +b6d8a15913eb1714061d5cacbd0bb05edd83ecdb848a89b864e7411598e9f7814d0c039ebe4735437c8370d2ff183751 +a95fedce8351ae9c24d7fa06ebc5cd4e3aef87afaf04a7150e561a6a7f2347bdcec1e56b82d6e5f597fe7124f6cc503b +8526004ca0c802b073d50b0902ea69975949e7567b2e59ca2cf420bc53d91951d26096f2abb07a2955a51506e86488dd +99ccecaab68b6e5adadb9c848cb577de7e7ff4afc48d3b6b73bc0872730245b8a1c68cebf467074af6756d6226f4f4a7 +b5497d5c0cd79b7e6022e295642e1f2161254379eb78ef45e47f02c84ef5a3f6b6297718e4fac8093bf017287e456917 +b6943f30012b2093c351413c2b1b648afc14a5c4c0c338179d497e908451d2779919fe806181452ed386c1e8f8e8c25c +afdb56ce89bcd3247876c918cad68aad8da65d03c7c73ccbee0c4c39f3ad615aab87ffa0db5b3b63b4cc915d0b66deb7 +a44659d7be2f11d4d4949571d7bf84a6f27f874d3281edc34ef1098d321a4dcad9a42632b39633f8f9d20a39f54a2464 +a3e489b4db5832280dd58c62120262471b6fb4355c2ad307bd17c5c246b3f1e1b00f925930f5f5f6987de234fcbb7d16 +87a4e3a190340ed4949597703083d338e9c17263ba8a39b67100589f0dddbc420d9557f9522c17c71ae04b76876f8db0 +a35a3978e928eaac8c182a0a613c611ae7b4827c5e999f938eed06921c0294befdc21d02e68d035a2fc8d03c82641126 +a6898d90265dcf0fb215629f04b07c7918e022667583efe0bfe02f258b446954876c6ca9e369ffe1bb079e2314ebda32 +922fc52e648b6b2b6768c079c67ab425da72907a46add801715f8a2537280869d7071d527b833aa63ef562ce059a392b +8acbb7c4297196d8d1c131040c34cc7064656a148c2110b19c672abb094b1d084fafe967f7122ba9dd1523a4eaec3b42 +82dbf2cdd581fe3b81b156792228eae2485710e6c21dd5fd14614dc341bb0afbebbc0f32340eda9f094b630afcfc17e8 +907a095dca885da219e4558e9251ec765cf616e995c61546bc010963bf26f2d8adbd9b2ef61f2036e1740a627c20fbed +a7a83f849691d04640137989a2d0c90a7ed42a42b0ad328435d7e1fba557a27a58eec9170ab3d0099ec97da0c950765a +b7d435a801c2a5652cb479027f2c172eafa3df8ca0d896bbb9d49a42c42660fb382a8439bfed09ddf7e0214cb6066761 +8bc6b5e79af5512589f90de8e69bc858277055cf7243f592cc4edd193f03f71d16c9300097ddafb79752c63f135c884c +913264fca800467bee58a429e1f245ef303f5dbeea90f0ce6bb3c7ae6d1bd0f99ea75d3d309634684d2178642c81b5d8 +83ba558f9c23b785a123027c52924a1d7334c853a6165d4f5afd093b0b41951a36860ba0a20fa68f73d7db9df0e3ef38 +875b2df7cb54ecdf7ba31181b9dc7dbe02761ab8ffb61757d42a735c8e20d44bad5b904e76dcec6bb44883fdb9f4ad84 +af3dc5d2dd29565de8f4c700d5f1ab71dadb4351f06e9ee2eb5ee7a9b5da827d0c6726c6dc780748a26aa3b4d10e6c2d +a113ff09296b25f550f6d0d3f37dd4517b14cf6d5517293bd3068aa3aea765a8640fcd4bf0ba96db5c00167267fbd574 +a138c5cca485b9180ef091c9e327982bea203c165cb83564f416c36e813bea1ef1f6345f57c8a591df360541b7b758f5 +85793441e917ed520d41dda6e762269fb9f9702e5ef83cee3e90652d324536bf4233425cd05b54a383609076ab84ea13 +b422ac9de53d329e6321a8544c264d63cffc37965d627d7e180a999c3332644e21fedf10cd2f43cf6ba4fc542db91155 +a85d31d4bfa583a493681e57bfccca677ec5b85870a53de37f7be7833b573f8c8dcf029cea4ae548d83048030d77d56d +ab8a0702a371db496715a4ee8fcb6d430641b0f666d7fe3ef80c09df0bf570293cec1aa1675381c6bbd9ecc1f7cdccf9 +b308ef2b87438d35957191294782e9f5014a3394fadad3e2ccaf6ebf20fd889a36dbb8ddb3634baa8e2e131618aa4e70 +919e972e5b67cd65f377e937d67c27b4dd6fd42cfe394a34a70e8c253a1922f62ff36b9dcc7fbbc29b0960ad6a7fde88 +a0e4d4be28301af38a910971c8391ef3ec822ce35757226a7fd96955cd79afa14accba484ef4e7073e46b4b240a5863f +9422f6d424c1736b4b9bb9762aa62944085e8662c4460319dac4877b1e705aa5cd8b6b3a91268363ec3857c185685f4b +b7cf9f2053119d284a37df4e4489b632594df64e5dc846652ee26b4715e352e6333118b125021481138e4ec3e9f9987b +aea983e81c823472df8652654be8a60a8bf40147d599f87e323397f06bf88c98e9c6db0f28414f6ea4091f3eb0f6a96d +aa20bf03cd8b6ffda09fe0ef693fc0aaa3bb372603e786700e52063a4f7ee742771c41cf5e67e6248f99b7fc73f68dbf +8748a4978198071d7d5ddc08f8c8f0675d895dc19df0889e70bd86d44c469c719b93f6526c7e7e916c7bfeb9a1379aaf +b8fcd863d55dab2f7b1c93844306e00056ba17338ddfa3f02689a0b58b30239beb687b64c79b8420ecea8d0d082d9ffa +abb1a35952dc8a74dd1cdbc8ae7294c6bfd1910edab6f05c879e9ed06c636a949fe0017ec67f8f6f73effcb5817cccae +8bef43422b1c59e354b7f46c08a8eb78e26c4d01c236a4fe781cefb7465293a4444f2bdc68c6a221cd585a2494d9a1d7 +93527258940feff61befa18fcd6626fcff019d34a3ac8c6886599cbef75b15c15d689e8c1bd2177cc93c4c1792dee8d7 +b7f114eea99c8278841180ec8886ad2bab1826554a1657b9eeb17aa815f31b59c3931913ddec40aa9923bc92f8975635 +91a96446158b194a0a6ada2e37c8a45f3017c34034f757245f6f3b98c65d39d084e74d2a9dc271e5918faa53990ec63f +aea4ada0a853753db03f9790e20bab80d106f9b09e950f09aeaba5d869f0173bed673b866a96d6b0dd8123a539caac9a +b8e3e98ff0d3e512441e008a4a6783233045a4639e0c215c81984846b43ff98de99d7925cf717b1ca644f6229b6d16a2 +8987ef81a75213894e11e0310e8ba60fe06e2b264cc61655e5b51bf41cc8c3d6c10696642ea3517770f93be360207621 +8d4eff7335252f74af4a619c78625fd245df640f2086338dbb6c26b059f83fe70f3e81f5b6c12d62c0f784e572d56865 +a56f6389b0bac338f20c615d7d11e16045a76cbea23ced0a9d9067f538421c378200bfd4523b7c96094ab67f47f98d42 +83f5ab0727fd6ce8b3370ce3fac1f3a9c1930ea7ebbd16be61cc26f34aa1291ba4b5f16729d7d4f5924eaa4a1e31a04e +8cc62366874bf8751067a526ea32927584cef41174e2ec5a53079ee557067bc282f372b831cb2547c5e21a2f178c91b4 +b609e141006dc8d8649457efc03f8710d49abb34bc26a33ed4e173e51b85d7acdf18d74aed161b074f679d88f5aa2bf3 +873c7aa784c17b678443320950e494250baff8766db42619b9fc7ec4c3afa4eee290cd1f822b925d5b9e55c9cdd1af2f +859ba787f052d3665481c3dd58159ec8c238d918fb6d2787ebe275ef9acd377cb7aaa03a69820c78247bf51afee3d5bf +8eb1e6d2b0f51a3275b4a8be96957cb2d518b32c815dc0dfd5f75340c7dee73e5edc45db7c7d375c4ffaf8c59767d0c1 +85f3876ff5edbb826a9592e68db3dcc975725bfdda4fcac197758a8b27e4f493e6c531b1342ba0f5a75f965273720345 +8a1272f2678d4ba57e76c8758818965e6849971e8296b60ff85a522feeaaa3d23d3696c040d8bdaf1b380db392e988aa +85002b31ce31be7cc8757141a59a7cf9228b83144993d325b2241f5bfac09a02aca0c336307257f1a978c0bbf79fa4fe +b96bd26a6bbbc705c640285fd561943ef659fca73f25e8bf28cfcd21195752b40359d0edca0adc252d6e1784da267197 +936cfe367b83a798ab495b220f19cfe2e5bde1b879c8a130f84516ac07e3e3addcc791dc0e83a69c3afc225bed008542 +b1302f36190e204efd9b1d720bfaec162fcbba1b30400669dbcdd6e302c8c28f8b58b8bbde10f4512467dd78ed70d5e0 +8291b49f56259c8d6b4fd71525725dd1f35b87858606fc3fe7e048ac48b8a23ba3f0b1907b7c0d0c5ef6fa76cddc23f0 +97aca69d8e88ed8d468d538f863e624f6aed86424c6b7a861e3f45c8bf47c03e7b15d35e01f7add0a4157af171d9360c +b590d896e6b6f2e4dcffebfa67fc087fa518a9c8cb0834a5668cabe44e5c2b6f248f309b9cd74779030e172dba5d9e29 +97e7099bff654bcb37b051a3e8a5a7672d6ab7e93747a97b062fc7ae00c95deef51f5ced2966499217147058e00da4be +83435b739426f1b57f54ebad423939a68ad3d520db8ca5b7e28d1142ebfb4df93f418b180a6c226c0ca28fa0651163a0 +946c9144d982837c4dbc0b59544bdbc9f57e7c9ef0c82a7ad8cfddea78dedc379dbc97af54ba3ac751d844842a2990a4 +90ba1eff9c25adba8c3e6ef5b0d46c13de304632fec0646ee3a7bee69da2bc29e162dd3fb98a37ed1184ae5da359cf0a +b17b7a5c0a48eb9784efb5ff8499230b45efeb801cf68e13fe16d0d308511af5aa60e3b9a5610f96d7c2242ae57d455b +9991245e5617c4ea71575e5b2efe444f09cbbed13b130da08f8e9809d62512e8298a88d41f6aa3dbf3bcbc90654ceb18 +a1190c4cbccf2898a7fe025afd03f8652973a11cef59775fb47d69a6b4dcb9a5a0c554070421a5e10a75e43b63d37b79 +857c0a5f291eb35a76be11543a8c3d798187bd0717e2cdee50d390b66322d0d9529520fd3377136cdc93cfee99b6403f +944d11e5f9a3493c67786df94f129352d892fbdc43e98206b8dbf83cce240f65305e1768b38e5576048a31dca5c18f31 +818f361c5dae709e067a82b81beffbd9674de8df2bc1bfc3a27ddf326260e124e46b1e36697fb8de539b7736db093e9e +b07f5b737735a0d628e7ac2d335080b769bdb3acea38ad121e247a6e4307916ba1d029da5d341f079ea61eeaf7d8554e +a69e338803f3ee0fbbddc7ee481a13f6b64d25d71bae0d76f4b5145b54923cf1616c77ba0fd9ca37a3ae47208f490423 +acaee66b94e226622e28a144f93f6b1b442b9c79d7a8a1740c4d53044d0675a661e7453509b9e716e469fe11ce45ee31 +9402ca799d2e1cce0317ed49453ee0b2669b05e68ff101b89306db215c3941b3786ad3402d00369cb1dee020b56d3142 +849440c539fc0df3c8d06e23e271e6faa50234d5c057b8561e9376415f4396e548351cc677b0abeafe4f51b855a3dc83 +865b99587eb3dbc17e412647673f22b2e89185d1df1ec8ea04515585ad2edfb731be458123118dcd7b41b475026477b9 +9390618833b5adbaf24bd38cf9fc6f25104717f314259bb4da5c7a1f6963ecdc04d07bed391d8cd765c3d53567b2b6b1 +95383e8b1d0a629cec238b5ae2bda236a027f4e3b5f99ceace05f1d5a781ec1e7a43058f44ef0a5aee6b0db5697a0d89 +91739b8946d90db3a5244f7485295cc58143ba0449c9e539df1ba3c166ecf85ff914c9941192963c32d35033ae2f0980 +b5d88848d856d882db5947b9182025f0abf2bc4335b650fa0a48a578e2c87f32cc86d42d3b665ee2eab46d072bf1eccd +91f4c754549f5a53b1902ef84274ce9acf0bfd2e824e62eb127d67e3214ce05fc2430c05ea51e94dc6e8978f5d076bab +91fff8c75f8ad86afe78ec301de05e4ca71421d731419a17c747a9a0bf81129422c9499e4749107b168d1695dc90292f +99fbd7bede9cc1e2974c2a21c70788960c2dbf45a89552da8d73bb1d398b8399590707f2f4ba4b43cb356e703eb01b5e +80a51cd83e3d748c07b9ac82de1a697b09031e3edc7bf585f06cd0ffa8ea319517fcc2b735614b656677b54b4910814e +886b27de1f93311d1a31b6d698aa28b54fbd800decd8e25243d89e352ee38cb252d5648b5134a3e1ed021bae46e9da48 +976e70c94db905f83b4ef72188d840874bf005814c0c772f3832aa65b1f21927403125eea7a07b6d3305b1a781b36ab7 +b4adb9d1c49eb31462583580e3ffa625bea4f8b2a7d4927e4ff925c1759d4b3c1e43283d635b54fb0eabfbe1f4c12992 +b66b466bd48485ebeedd47e749d86cbaa3deffbbee2e69cfaa5e9f3bd28b143d7c1c0255a7a1393a2cc1490b2c485571 +8bded5bc0794513947ddb00ff6b780c5cc63a74e2a0b0284153c346a31c82e1eff07c073939da39e6f87a06c14ff1a80 +aceea8c6f799589f6b7070abf69fec724e6679514e60f1eaf9a52c37e9cebb72abcc833a81d8da1a4f5194c1a7eeff63 +89a9f76d053379687fd221ebcaf02c15c2c241bb673ef5298e32640a115d9e0f2331c3e185572cd65946dd6c5bd42412 +a57b6f1e3fdd92eadc6220760f22d0685a82cada1c7a1bda96d36e48e2852f74f3a83c757dd8857e0aee59e978da4919 +9106cf0891bb39ce87433c5f06a5c97a071d08ad44a7cbcd6918c0729c66bb317fbbee8aa45591cee332ad1234c7257d +96c18cca4a0f0299e0027ff697798085f9f698a7237052c5f191b1dba914e5a015ae356b80c17f0fdd31d08c5a939ebb +a892103c93df126c024825c07d8769bdac5f1d26ea9509ee26530dc594384b2a5095cc34e0b41ab3db0392a29792c9e8 +b7c2dbc95edb6fc25802ea051803b7bea682f87a99f8a9fdcc3091c81d914b9493dfb18a8894c964805298a6c22b07f2 +8e40948927d560a6840d7fb99802989ce72b43693e9dc7ed9dcda4bca7daedf75271cf656bcc22b3f999a550faad8648 +b354de1c6f0603df3ed9036c610281e55b51a48950ee3ce57a00b4692232de7ca57d19722700e15cbe67a91fcec2f786 +adf987b90737b933436d8036c1d3f0c9104f26c540052e22e703964f72739ac1261e4289b8f27dec47281a0f3f51378a +8ed5248e9c836fffa7c924178db593e1aaeb54bcf2e93c1983c1f3899cad538deeb2b836430fddc9b2f283e0797ea11e +907e5410e3bd5d7f55340e2f497bd1ca10bfcb4abed2c66a3cdf94dc40bbd7c43ac98754e0b4b223ea4c61eebf2f27f5 +8e81b441ea0397db28840fb4b3c3bfe6d8e31418816f7bda36f9c1cfe4556daee30c43639d90a2dc9b02a3d65e5f4ab2 +897085c477f5030f9fed06e181b05953a8cd2001d959dd6139738d40f1d673b2c7120b5348f678547acfdc90ffc9fcc6 +b0bf2784c4b3808a04be5a00a0593035ce162b3886e1500247b48365eac8ec3d27c7e5e6372e030c779c75fb79772d0d +af3fe6c75f2a1241ac885d5091ff3882cf01695d957d882e940f0c31f7a5b5e269c1a2bae7336e9a7cda2b1d23c03bd1 +a6d94e065f85736d77080a4f775885ccb0dd5efdbe747e4595280bca0ebe12450257c1beadcbec77566ef57508c5d4df +a5c50fe56b5532bf391da639a2f2b6cbb2634fc6637416fea7c29a522dea024d4adaaa29b6d472b4d2cc3e3b85c72e2a +afc35f5a03b245a6286318ef489db05d397bbd16c17b4e92eeb56509f875246c0176c01804139eb67dc4247c2a36ff9e +99ba14ab5a9612c078f9bbaa0e68fd1d52ecceb2ed19bd9abf8f98dd4ed1f9c4fa6e4d41bcef69be2ff020b291749ca8 +8018cdd3d96f331b4c470a4c3904bed44cadecbeec2544ca10e4352cf4ae1a856cf55f6383d666bf997ad3e16816006e +a9964790c318bb07b8fe61d230dd2161dd3160e186004647a925cfec4c583b4e33530bf5d93d8a14338b090055085b05 +ab89d8401df722101c2785cb3ef833017f58376ee82cedd3e9405b2534f259bb76063434a247652c7615a6de5194de65 +a72c3d320a0d40936dee8edfb36703be633aefbb8f89530df04eb6aebe0305ef4f4b6709436f8036d417272a7e47e22a +b3457661ad62634cc25e2918921a97b0bf5c59ccc7063bc8eb53194783f07659f42f8978c589228af5b12696588d8b2f +926fa35cd3ed4c8ad78af6284b87ae53b2e25a1ff50398034142a2bbed5b989ba3181ff116838931742c0fbcd8b8a56c +ae57fe506626432f27ae4f8791421c2df9efd9aaabe4b840ccf65fc3d0dd2f83e19eb63ae87bfa6898d37b5da869ddb2 +99c0a26ac74211db77918156d7ae9bea6ecf48da3ce9e53829a9ad5ed41321227c94fbd7449ae2e44aae801811552b1b +abdd2635b61cb948e51b762a256cf9d159b9fcb39b2fb11ba2fed1cb53475a03fc6e024a6a824a67a689396119a36a7b +a5ca98b98da8bb8eb07b1e5e3c85a854db42addefacd141771a0c63a8e198421dccc55ef1d94662ca99a7d83b9173fc3 +a821bb5cf1eb3aeae6318c8d554e2ea3137d73bb29d2e4450c9a33f441355ea77bb0e0e0ce7c819abc3ed119110a3a92 +95cdfb19b3f7196c26d60586e2c1efaa93352a712f8c8ef6209f6f318cecd52d7bebdfbfee4be1f5903a1595f73bc985 +aef6e6a400106e217f9888afcef0a1e1299b59017e77dc5453317dec0c32ae96873608bef3f1b504a7e4f45b06edc9c6 +96399ad093299ba26dc09ae85dbec9a1801dea4a338dd5d578bcdcb91246db0059e54098ba8a56cbb24600a40095cf79 +ad8b018ac99857ad4b38bdf6d110bbef64029a4d9f08df85a278c6ddc362a5f64e1f3a919f798ccb2f85a7f4ca1260b4 +b211f3b5dd91941d119c4fe05e2b4c7bb0ce0a8d7ef05932a96e850f549a78cd20cded0b3adb3f9f8b7058889ae2cb4e +ab780dd363671765c9c9ab0f4e7096aacf5894e042b75f40a92df8eb272a6229078cd6eadcc500eead3650860aa82177 +a4d96b16ab3abe77ead9b4477c81957e66a028f95557e390352743da53d1a7ba0c81d928a7ea8bc03b9900135ac36a6a +b4d4e028099bf0f28ac32141cd8de4ee7c3d62d4f519fad6abbb4ba39592750812220a4167d1da4c4f46df965f7cf43d +aa929c5f0bd8cb44a861bfb3d18340a58c61d82afa642447b71b1470a7b99fe3d5796bdd016b121838cb3594f5a92967 +a038e66f0a28aba19d7079643788db3eed8e412fb9ab4c0f6cacf438af4657cc386a7c22ae97ccc8c33f19a572d6431c +89c1ff879faa80428910e00b632d31c0cebb0c67e8f5ded333d41f918032282fb59fbcbe26d3156592f9692213667560 +8d899072c9d30e27065d73c79ce3130a09b6a4a4c7d9c4e4488fda4d52ad72bd5f1fd80f3a8936ef79cf362a60817453 +8ffb84a897df9031f9a8e7af06855180562f7ca796489b51bb7cca8d0ca1d9766a4de197a3eb7e298b1dfb39bc6e9778 +836ebd0b37e7ef4ff7b4fc5af157b75fa07a2244045c3852702eaafa119ca1260c654a872f1b3708b65671a2ece66ad2 +9292dfd6d5bfc95f043f4eb9855c10cbcf90fbd03e7a256c163749b23a307b46a331bdbd202236dca0e8ea29e24906de +8bc37eaa720e293e32b7986061d2ffcbd654d8143e661aabe5602adc832ab535cffbe12a7b571d423675636a74b956e4 +887455f368515340eb6f9b535f16a1cf3e22f0ceda2ead08c5caefccef4087e9f4b5d61c5b110ff3e28e4ab2ad9e97c5 +a6e5ec36e7712056fec00de15b8696952b17891e48ebe2fa90c6f782c7d927b430917b36b4a25b3d8466da3ca2a4985d +895cae36ba786104ec45740c5dc4f2416b2adce6e806815e3994e98d9e1be372eaec50094fbb7089015684874631ab7e +9687444fe6250c246b1711a8f73992f15c3cac801e79c54ffd5e243ad539fdd98727043e4f62d36daf866750de1ba926 +b17f75044c8e9ce311bb421a5427006b6fa1428706d04613bd31328f4549decd133e62f4b1917016e36eb02ea316a0ca +8538a84d2f9079dd272a7383ff03b7674f50b9c220e0399c794a2bcb825d643d0fc8095d972d5186b6f0fe9db0f7084f +af07b37644cc216e7083bac1c4e6095fa898f3417699df172c1f6e55d6c13c11f5279edd4c7714d65360b5e4c3c6731e +87eed8fe7486c0794884c344c07d3964f8fc065aebb0bb3426506ab879b2e0dfaefa5cece213ec16c7b20e6f946c0bd2 +8a4bf42f141d8bc47c9702779d692a72752510ef38e290d36f529f545a2295082a936c8420f59d74b200a8fff55167c4 +a7170e5e00a504a3b37cb19facf399c227497a0b1e9c8a161d541cb553eb8211449c6ac26fe79a7ff7b1c17f33591d74 +a9a2cc7232f07ef9f6d451680648f6b4985ecab5db0125787ac37280e4c07c8210bab254d0b758fd5e8c6bcf2ee2b9ff +8908d82ebfa78a3de5c56e052d9b5d442af67a510e88a76ba89e4919ae1620c5d15655f663810cfc0ee56c256a420737 +a9d47f3d14047ca86c5db9b71f99568768eaa8a6eb327981203fdb594bdb0a8df2a4a307f22dcea19d74801f4648ea89 +a7c287e0e202ebfc5be261c1279af71f7a2096614ee6526cd8b70e38bb5b0b7aca21a17140d0eddea2f2b849c251656a +97807451e61557d122f638c3f736ab4dab603538396dca0fcdf99f434a6e1f9def0521816b819b1c57ecdfa93bd077eb +a8486d60742446396c9d8bc0d4bed868171de4127e9a5a227f24cbf4efbbe5689bbd38f2105498706a6179340b00aed5 +a03b97c2a543dfefa1deb316db9316191ab14e3dd58255ce1027b4e65060d02fb5cb0d6ac1a2bf45bfeac72537b26429 +a7d25060f6861873410c296a4959a058174e9a1681ac41770788191df67fc1391545dab09de06b56cd73a811b676aa1b +96bb9c9aa85d205e085434d44f5021d8bbafc52cd2727b44e2a66094a4e5467b6294d24146b54c0d964c711e74a258d4 +b07b17f11267e577191e920fa5966880f85ff7089ac59d5d550e46f3a5cdadd94f438a547cd1ec66f20a447e421f96c6 +964e33e1571c97088fe7c8ca3430db60a8119f743a47aa0827e6e2fb9bae5ff3bf6cecd17b11dd34628546b6eb938372 +82a0513a05870b96509a559164e6ff26988ea8a2227ac6da9adc96fe793485a9eb6bdcab09afac7be4aef9a5ae358199 +b1185bc679623e7a37a873d90a2a6393fb5ccc86e74ba4ba6f71277df3623cde632feae4414d6429db6b4babde16dee0 +b3d77504b7032b5593a674d3c0cd2efbf56b2b44ed7fe8669f752828045e4e68202a37bf441f674b9c134886d4cee1df +95ab31749ff1f7b3f165ce45af943c6ed1f1071448c37009643a5f0281875695c16c28fc8d8011a71a108a2d8758e57d +b234dee9c56c582084af6546d1853f58e158549b28670b6783b4b5d7d52f00e805e73044a8b8bd44f3d5e10816c57ecc +86da5d2343f652715c1df58a4581e4010cf4cbe27a8c72bb92e322152000d14e44cc36e37ff6a55db890b29096c599b9 +8b7be904c50f36453eff8c6267edcb4086a2f4803777d4414c5c70c45b97541753def16833e691d6b68d9ef19a15cb23 +b1f4e81b2cdb08bd73404a4095255fa5d28bcd1992a5fd7e5d929cfd5f35645793462805a092ec621946aaf5607ef471 +a7f2ca8dacb03825ef537669baff512baf1ea39a1a0333f6af93505f37ed2e4bbd56cb9c3b246810feee7bacdf4c2759 +996d0c6c0530c44c1599ffdf7042c42698e5e9efee4feb92f2674431bbddf8cf26d109f5d54208071079dfa801e01052 +b99647e7d428f3baa450841f10e2dc704ce8125634cc5e7e72a8aa149bf1b6035adce8979a116a97c58c93e5774f72b7 +95960a7f95ad47b4a917920f1a82fbbecd17a4050e443f7f85b325929c1e1f803cf3d812d2cedeab724d11b135dde7a3 +8f9cd1efdf176b80e961c54090e114324616b2764a147a0d7538efe6b0c406ec09fd6f04a011ff40e0fa0b774dd98888 +b99431d2e946ac4be383b38a49b26e92139b17e6e0f0b0dc0481b59f1ff029fb73a0fc7e6fff3e28d7c3678d6479f5a3 +a888887a4241ce156bedf74f5e72bfa2c6d580a438e206932aefc020678d3d0eb7df4c9fe8142a7c27191837f46a6af6 +ab62224ea33b9a66722eb73cfd1434b85b63c121d92e3eebb1dff8b80dd861238acf2003f80f9341bfea6bde0bfcd38c +9115df3026971dd3efe7e33618449ff94e8fd8c165de0b08d4a9593a906bbed67ec3ed925b921752700f9e54cd00b983 +95de78c37e354decd2b80f8f5a817d153309a6a8e2f0c82a9586a32051a9af03e437a1fb03d1b147f0be489ef76b578b +a7b8a6e383de7739063f24772460e36209be9e1d367fe42153ffe1bccb788a699e1c8b27336435cd7bf85d51ba6bfdd6 +937a8af7ed18d1a55bf3bbe21e24363ae2cb4c8f000418047bf696501aaeec41f2ddf952fd80ef3373f61566faa276a9 +ab5e4931771aeb41c10fa1796d6002b06e512620e9d1c1649c282f296853c913f44e06e377a02f57192b8f09937282eb +893d88009754c84ec1c523a381d2a443cb6d3879e98a1965e41759420a088a7582e4d0456067b2f90d9d56af4ea94bba +91b2388a4146ebaaa977fec28ffbfb88ac2a1089a8a258f0451c4152877065f50402a9397ba045b896997208b46f3ebf +8ce0523192e4cc8348cd0c79354a4930137f6f08063de4a940ea66c0b31d5ea315ce9d9c5c2ec4fa6ee79d4df83840dd +b72f75c4ab77aca8df1a1b691b6ef1a3ff1c343dd9ed48212542e447d2ed3af3017c9ad6826991e9ef472348c21b72a4 +af0fa5a960f185326877daf735ad96c6bd8f8f99ab0ab22e0119c22a0939976ece5c6a878c40380497570dc397844dba +adf9f41393e1196e59b39499623da81be9f76df047ae2472ce5a45f83871bb2a0233e00233b52c5c2fa97a6870fbab0a +8d9fc3aecd8b9a9fca8951753eea8b3e6b9eb8819a31cca8c85a9606ce1bd3885edb4d8cdbc6f0c54449c12927285996 +901969c1d6cac2adcdc83818d91b41dc29ef39c3d84a6f68740b262657ec9bd7871e09b0a9b156b39fa62065c61dacb1 +9536a48ccd2c98f2dcbff3d81578bbb8f828bf94d8d846d985f575059cd7fb28dfa138b481d305a07b42fcb92bacfa11 +8d336654833833558e01b7213dc0217d7943544d36d25b46ecc1e31a2992439679205b5b3ab36a8410311109daa5aa00 +95113547163e969240701e7414bf38212140db073f90a65708c5970a6aaf3aba029590a94839618fc3f7dd4f23306734 +a959d77a159b07b0d3d41a107c24a39f7514f8ce24efa046cfcf6ace852a1d948747f59c80eb06277dce1a2ba2ec8ea9 +8d2cb52dd7f5c56ef479c0937b83b8519fa49eb19b13ea2ec67266a7b3d227fb8d0c2454c4618d63da1c8e5d4171ac7b +9941698c5078936d2c402d7db6756cc60c542682977f7e0497906a45df6b8d0ffe540f09a023c9593188ba1b8ce6dfcb +9631d9b7ec0fc2de8051c0a7b68c831ba5271c17644b815e8428e81bad056abb51b9ca2424d41819e09125baf7aaf2d4 +a0f3d27b29a63f9626e1925eec38047c92c9ab3f72504bf1d45700a612682ad4bf4a4de41d2432e27b745b1613ff22f9 +80e3701acfd01fc5b16ecfa0c6c6fd4c50fe60643c77de513f0ad7a1a2201e49479aa59056fd6c331e44292f820a6a2c +a758c81743ab68b8895db3d75030c5dd4b2ccc9f4a26e69eb54635378a2abfc21cba6ca431afb3f00be66cffba6ab616 +a397acb2e119d667f1ab5f13796fd611e1813f98f554112c4c478956c6a0ebaceef3afae7ee71f279277df19e8e4543a +a95df7d52b535044a7c3cf3b95a03bafd4466bdb905f9b5f5290a6e5c2ac0f0e295136da2625df6161ab49abcdacb40f +8639fc0c48211135909d9e999459568dbdbbc7439933bab43d503e07e796a1f008930e8a8450e8346ab110ec558bcbb9 +a837bcc0524614af9e7b677532fabfb48a50d8bec662578ba22f72462caabda93c35750eed6d77b936636bf165c6f14e +97d51535c469c867666e0e0d9ed8c2472aa27916370e6c3de7d6b2351a022e2a5330de6d23c112880b0dc5a4e90f2438 +aadb093c06bd86bd450e3eb5aa20f542d450f9f62b4510e196f2659f2e3667b0fe026517c33e268af75a9c1b2bc45619 +860cef2e0310d1a49a9dd6bc18d1ca3841ed1121d96a4f51008799b6e99eb65f48838cd1e0c134f7358a3346332f3c73 +b11c4f9e7ef56db46636474a91d6416bcb4954e34b93abf509f8c3f790b98f04bd0853104ec4a1ff5401a66f27475fce +87cb52e90a96c5ee581dc8ab241e2fd5df976fe57cc08d9ffda3925a04398e7cffaf5a74c90a7319927f27c8a1f3cef5 +b03831449f658a418a27fd91da32024fdf2b904baf1ba3b17bbf9400eaddc16c3d09ad62cc18a92b780c10b0543c9013 +94e228af11cb38532e7256fa4a293a39ffa8f3920ed1c5ad6f39ce532e789bb262b354273af062add4ca04841f99d3aa +99eb3aeb61ec15f3719145cf80501f1336f357cc79fca6981ea14320faed1d04ebe0dbce91d710d25c4e4dc5b6461ebf +920a3c4b0d0fbe379a675e8938047ea3ec8d47b94430399b69dd4f46315ee44bd62089c9a25e7fa5a13a989612fe3d09 +b6414a9a9650100a4c0960c129fa67e765fe42489e50868dd94e315e68d5471e11bfbc86faffb90670e0bec6f4542869 +94b85e0b06580a85d45e57dae1cfd9d967d35bdfcd84169ef48b333c9321f2902278c2594c2e51fecd8dbcd221951e29 +b2c0a0dd75e04a85def2a886ee1fda51f530e33b56f3c2cf61d1605d40217aa549eef3361d05975d565519c6079cc2ac +abb0ea261116c3f395360d5ac731a7514a3c290f29346dc82bacb024d5455d61c442fefe99cc94dddcae47e30c0e031f +a32d95ae590baa7956497eddf4c56bff5dfdc08c5817168196c794516610fcc4dbcd82cf9061716d880e151b455b01e0 +8bd589fb6e3041f3ef9b8c50d29aed1a39e90719681f61b75a27489256a73c78c50c09dd9d994c83f0e75dfe40b4de84 +82d01cdaf949d2c7f4db7bfadbf47e80ff9d9374c91512b5a77762488308e013689416c684528a1b16423c6b48406baf +b23e20deb7e1bbbc328cbe6e11874d6bdbb675704a55af1039b630a2866b53d4b48419db834a89b31ebed2cfc41278dd +a371559d29262abd4b13df5a6a5c23adab5a483f9a33a8d043163fcb659263322ee94f872f55b67447b0a488f88672d6 +85b33ddf4a6472cacc0ed9b5ec75ed54b3157e73a2d88986c9afa8cb542e662a74797a9a4fec9111c67e5a81c54c82b3 +af1248bc47a6426c69011694f369dc0ec445f1810b3914a2ff7b830b69c7e4eaa4bafec8b10ed00b5372b0c78655a59b +94b261ed52d5637fd4c81187000bd0e5c5398ce25797b91c61b30d7b18d614ab9a2ca83d66a51faf4c3f98714e5b0ea5 +953d4571c1b83279f6c5958727aaf9285d8b8cbdbfbaff51527b4a8cfdd73d3439ba862cdb0e2356e74987ff66d2c4d9 +b765dae55d0651aca3b3eaef4ca477f0b0fda8d25c89dccd53a5573dd0c4be7faaadaa4e90029cdd7c09a76d4ce51b91 +b6d7b7c41556c85c3894d0d350510b512a0e22089d3d1dd240ad14c2c2b0ce1f003388100f3154ad80ec50892a033294 +a64561dc4b42289c2edf121f934bc6a6e283d7dce128a703f9a9555e0df7dda2825525dbd3679cd6ba7716de230a3142 +a46c574721e8be4a3b10d41c71057270cca42eec94ca2268ee4ab5426c7ce894efa9fa525623252a6a1b97bcf855a0a5 +a66d37f1999c9c6e071d2a961074c3d9fdcf9c94bf3e6c6ed82693095538dd445f45496e4c83b5333b9c8e0e64233adc +ab13814b227a0043e7d1ff6365360e292aca65d39602d8e0a574d22d25d99ccb94417c9b73095632ff302e3d9a09d067 +b2c445b69cff70d913143b722440d2564a05558d418c8ef847483b5196d7e581c094bae1dbb91c4499501cfa2c027759 +87cbde089962d5f093324b71e2976edbe6ad54fb8834dd6e73da9585b8935fca1c597b4d525949699fdfa79686721616 +a2c7e60966acb09c56cf9ad5bdcc820dcabf21ef7784970d10353048cf3b7df7790a40395561d1064e03109eaac0df98 +8ea7b8af208678178553946b2ee9e68c0e751b34f3652409a5e66c40d3aee3a40ba6ffe2175ce16c6a81b78ecc597d02 +960234239e1e3ea262e53d256ad41b2fe73f506b3d130732d0ee48819eb8a9c85bb5106a304874d8625afae682c34015 +858459694c4e8fdafa6cdaee1184e1305ca6e102222b99b8e283dd9bb3ebf80e55d6c4d8831a072b813c8eceb8124d95 +a30a8ce0f44aeb5590dc618c81c7cac441470ce79fd7881a8f2ea4ca5f9d848ebde762fcaee985cbd3d5990367403351 +a83867643672248b07d3705813b56489453e7bc546cdba570468152d9a1bd04f0656034e7d03736ea156fc97c88dc37f +a7bb52e0fc58b940dc47ea4d0a583012ee41fad285aba1a60a6c54fa32cfe819146888c5d63222c93f90de15745efb2b +8627bcc853bdeaad37f1d0f7d6b30ada9b481ccdf79b618803673de8a142e8a4ce3e7e16caed1170a7332119bcdc10a9 +8903d9dc3716b59e8e99e469bd9fde6f4bca857ce24f3a23db817012f1ea415c2b4656c7aeca31d810582bb3e1c08cc6 +875169863a325b16f892ad8a7385be94d35e398408138bd0a8468923c05123d53dba4ce0e572ea48fcdadd9bd9faa47a +b255b98d46d6cc44235e6ce794cc0c1d3bd074c51d58436a7796ce6dc0ae69f4edaa3771b35d3b8a2a9acd2f6736fab3 +9740c4d0ee40e79715a70890efda3455633ce3a715cbfc26a53e314ebbe61937b0346b4859df5b72eb20bcba96983870 +a44ce22ab5ddc23953b02ec187a0f419db134522306a9078e1e13d5bf45d536450d48016a5e1885a346997003d024db0 +90af81c08afdccd83a33f21d0dc0305898347f8bd77cc29385b9de9d2408434857044aec3b74cb72585338c122e83bb4 +80e162a7656c9ae38efa91ae93e5bd6cb903f921f9f50874694b9a9e0e2d2595411963d0e3f0c2d536b86f83b6e4d6ef +8b49fa6babe47291f9d290df35e94e83be1946784b9c7867efd8bc97a12be453013939667164b24aeb53d8950288a442 +a1df6435d718915df3da6dda61da1532a86e196dc7632703508679630f5f14d4cb44ce89eff489d7ff3fe599cc193940 +afd44c143dbb94c71acc2a309c9c88b8847ef45d98479fccce9920db9b268e8e36f8db9f02ff4ee3cff01e548f719627 +b2cf33d65d205e944b691292c2d9b0b124c9de546076dd80630742989f1ffd07102813c64d69ba2a902a928a08bce801 +b9f295e9f9eca432b2d5c77d6316186027caca40a6d6713f41356497a507b6e8716fb471faf973aaa4e856983183c269 +b3bd50c4b034473edce4b9be1171376a522899cb0c1a1ae7dc22dd2b52d20537cf4129797235084648ac4a3afc1fa854 +8ef37683d7ca37c950ba4df72564888bedaf681931d942d0ea88ead5cc90f4cbef07985a3c55686a225f76f7d90e137d +82107855b330bc9d644129cebecf2efbfab90f81792c3928279f110250e727ce12790fd5117501c895057fa76a484fc0 +816a5474c3b545fb0b58d3118cc3088a6d83aad790dbf93025ad8b94a2659cceba4fa6a6b994cb66603cc9aef683a5e3 +8f633f9b31f3bb9b0b01ea1a8830f897ecd79c28f257a6417af6a5f64e6c78b66c586cf8d26586830bd007fb6279cd35 +acb69d55a732b51693d4b11f7d14d21258d3a3af0936385a7ce61e9d7028a8fe0dd902bda09b33fb728bc8a1bc542035 +8d099582ac1f46768c17bf5a39c13015cfe145958d7fc6ddfd2876ad3b1a55a383fbe940e797db2b2b3dc8a232f545dc +97a4dd488b70bf772348ececaca4cf87bc2875d3846f29fe6ef01190c5b030219b9e4f8137d49ea0cc50ca418024c488 +b4d81148f93fa8ec0656bbfb5f9d96bbf5879fa533004a960faac9fd9f0fe541481935fdf1f9b5dd08dff38469ef81c5 +8e9b2ae4fc57b817f9465610a77966caaff013229018f6c90fa695bd734cb713b78a345b2e9254b1aff87df58c1cd512 +99eb7126e347c636e9a906e6bfdc7c8ca0c1d08580c08e6609889a5d515848c7ca0f32ab3a90c0e346f976a7883611f7 +8ca87944aa3e398492b268bda0d97917f598bc0b28584aa629dfec1c3f5729d2874db422727d82219880577267641baa +88ab0e290dc9a6878d6b4e98891ff6bfc090e8f621d966493fcbe1336cc6848fcbb958d15abcfa77091d337da4e70e74 +8956a2e1dc3ec5eb21f4f93a5e8f0600a06e409bb5ec54e062a1290dff9ce339b53fbbfc4d42b4eed21accea07b724d6 +8d22220da9dc477af2bddb85c7073c742c4d43b7afee4761eba9346cadbcd522106ed8294281a7ef2e69883c28da0685 +90dafd9a96db7e1d6bde424245305c94251d5d07e682198ae129cd77bd2907a86d34722cbde06683cc2ca67cebe54033 +b5202e62cf8ea8e145b12394bd52fd09bda9145a5f78285b52fda4628c4e2ccfc2c208ecde4951bd0a59ac03fa8bc202 +8959856793ba4acf680fb36438c9722da74d835a9fe25a08cf9e32d7800c890a8299c7d350141d2e6b9feceb2ebb636f +ab0aa23c1cd2d095825a3456861871d298043b615ae03fcd9283f388f0deef3cc76899e7fde15899e3edf362b4b4657f +9603b333cc48fe39bea8d9824cfee6ac6c4e21668c162c196ecd1ff08ef4052ace96a785c36b8f7906fdcb6bc8802ddd +93bfecbc3c7cc03c563240e109850a74948f9fa078eb903b322368cda0b50888663a17953579578ba060b14dbf053024 +b01f843b808cf7939a474de155a45462e159eb5044f00c6d77e0f7ec812720a3153209e971a971ccbf5ebee76ec4074f +b009e0567c3c75ed767247d06fa39049a4d95df3392d35a9808cb114accf934e78f765cd18a2290efef016f1918c7aeb +ad35631df8331da3a12f059813dfa343d831225a392f9c7e641c7d23a6c1ad8df8e021201c9f6afb27c1575948d6bf68 +a89c2a631d84128471c8ef3d24b6c35c97b4b9b5dad905c1a092fb9396ae0370e215a82308e13e90e7bb6ebcc455eb2a +b59c7f5fbfeb02f8f69e6cedef7ff104982551f842c890a14834f5e834b32de1148cf4b414a11809d53dd3f002b15d6a +aa6f267305b55fede2f3547bc751ba844ce189d0b4852022712b0aee474de54a257d4abcd95efe7854e33a912c774eba +afddd668f30cce70904577f49071432c49386ec27389f30a8223b5273b37e6de9db243aceb461a7dc8f1f231517463a9 +b902a09da9157b3efa1d98f644371904397019d0c84915880628a646a3ad464a9d130fdc651315098179e11da643ad2e +b05f31957364b016c6f299ae4c62eede54cab8ea3871d49534828c8bdc6adbc6a04a708df268f50107d81d1384d983ae +b4c3f7284802e614ddf1f51640f29e7139aae891467d5f62778310372071793e56fbd770837b97d501191edd0da06572 +b4eddb7c3775fb14fac7f63bb73b3cde0efa2f9a3b70e6a65d200765f6c4b466d3d76fcd4d329baee88e2aba183b8e69 +a83e7dbae5a279f0cfd1c94e9849c58a3d4cecc6d6d44bb9b17508576ca347fca52c2c81371d946b11a09d4ed76ec846 +8018ea17e2381c0233867670f9e04c8a47ace1207fdcf72dce61b6c280ba42d0a65f4b4e0b1070cc19c7bb00734974d9 +af90b541dfed22e181ff3ef4cf11f5e385fd215c1e99d988e4d247bc9dcee9f04f2182b961797c0bcc5f2aaa05c901a9 +a37046e44cf35944e8b66df80c985b8a1aa7004a2fd0b81ac251638977d2ff1465f23f93ac0ce56296f88fdc591bbdd7 +a735bd94d3be9d41fcd764ec0d8d7e732c9fc5038463f7728fd9d59321277e2c73a45990223bd571dab831545d46e7aa +94b32dcb86f5d7e83d70a5b48fe42c50f419be2f848f2d3d32ee78bf4181ab18077a7666eedb08607eece4de90f51a46 +a7f0804cafbf513293485afc1b53117f0cbfaea10919e96d9e4eb06f0c96535e87065d93f3def1bbc42044dbb00eb523 +aaaad1166d7f19f08583dd713275a71a856ab89312f84ca8078957664924bb31994b5c9a1210d0c41b085be4058ed52e +a1757aac9f64f953e68e680985a8d97c5aac8688b7d90f4db860166dd3d6119e8fca7d700a9530a2b9ba3932c5e74e33 +98cada5db4a1430c272bfc1065fb685872e664ed200d84060ee9f797d0a00864f23943e0fb84ba122a961996a73dfb14 +a5e609f716dc7729d1247f40f9368a2e4a15067e1dd6a231fece85eeefb7e7d4a5ac8918fb376debd79d95088750b2ca +b5365eb8caab8b1118619a626ff18ce6b2e717763f04f6fa8158cdca530c5779204efa440d088083f1a3685454aa0555 +a6e01b8da5f008b3d09e51a5375d3c87c1da82dff337a212223e4d0cdb2d02576d59f4eef0652d6b5f2fc806d8c8149c +ae310f613d81477d413d19084f117248ad756572c22a85b9e4c86b432e6c602c4a6db5edf2976e11f7353743d679e82a +a1f219c0b8e8bb8a9df2c6c030acbb9bbfa17ba3db0366f547da925a6abb74e1d7eb852bd5a34bae6ac61d033c37e9dc +a2087fa121c0cdd5ea495e911b4bc0e29f1d5c725aadfb497d84434d2291c350cdaa3dc8c85285f65a7d91b163789b7a +929c63c266da73d726435fa89d47041cfe39d4efa0edce7fc6eca43638740fbc82532fd44d24c7e7dd3a208536025027 +91c1051dcc5f52ad89720a368dddd2621f470e184e746f5985908ba34e1d3e8078a32e47ab7132be780bea5277afecb0 +ae089b90ba99894d5a21016b1ea0b72a6e303d87e59fb0223f12e4bb92262e4d7e64bfdbdb71055d23344bc76e7794b2 +8b69aa29a6970f9e66243494223bad07ac8f7a12845f60c19b1963e55a337171a67bdc27622153016fce9828473a3056 +95ca6b08680f951f6f05fd0d180d5805d25caf7e5bda21c218c1344e661d0c723a4dfc2493642be153793c1b3b2caaa4 +a4789dc0f2a07c794dab7708510d3c893d82ddbd1d7e7e4bbbeca7684d9e6f4520fb019b923a06c7efab0735f94aa471 +93c4f57a3cf75085f5656b08040f4cd49c40f1aab6384a1def4c5c48a9fe4c03514f8e61aabe2cfa399ff1ccac06f869 +b6c37f92c76a96b852cd41445aa46a9c371836dd40176cc92d06666f767695d2284a2780fdfd5efc34cf6b18bcfb5430 +9113e4575e4b363479daa7203be662c13d7de2debcda1c142137228aeead2c1c9bc2d06d93a226302fa63cc75b7353ec +b70addeb5b842ac78c70272137f6a1cef6b1d3a551d3dd906d9a0e023c8f49f9b6a13029010f3309d0b4c8623a329faf +b976a5132b7eb42d5b759c2d06f87927ef66ecd6c94b1a08e4c9e02a4ce7feca3ac91f9479daa1f18da3d4a168c2ba77 +8fdab795af64b16a7ddf3fad11ab7a85d10f4057cf7716784184960013baa54e7ba2050b0e036dc978ff8c9a25dc5832 +b2c982ad13be67d5cdc1b8fac555d4d1ec5d25f84e58b0553a9836f8f9e1c37582d69ad52c086a880a08b4efcccd552e +810661d9075ae6942735215f2ab46d60763412e1f6334e4e00564b6e5f479fc48cf37225512abbccf249c0ca225fc935 +a0c4bf00a20f19feff4004004f08231b4c6c86ac4ed57921eea28d7dea32034f3f4ab5b7ded7184f6c7ffbf5847232ad +b2bb5a9eea80bf067f3686a488529d9c2abd63fc9e1d4d921b1247ef86d40cd99e0a8b74f750e85c962af84e84e163a6 +887ee493c96d50f619ba190ce23acddc5f31913e7a8f1895e6339d03794ecefd29da5f177d1d25bc8df8337ae963fc7b +b7966fb07029d040f2228efa2cfcd04341e4666c4cf0b653e6e5708631aa2dd0e8c2ac1a62b50c5a1219a2737b82f4f7 +92234cfd6b07f210b82db868f585953aafbcbc9b07b02ded73ff57295104c6f44a16e2775ca7d7d8ee79babb20160626 +8d3cd7f09c6fd1072bc326ff329e19d856e552ac2a9f20274bc9752527cd3274142aa2e32b65f285fb84bc3adaaea3cc +8caed1cb90d8cd61e7f66edc132672172f4fa315e594273bb0a7f58a75c30647ec7d52eda0394c86e6477fbc352f4fe8 +ae192194b09e9e17f35d8537f947b56f905766c31224e41c632c11cd73764d22496827859c72f4c1ab5fd73e26175a5d +8b7be56aac76d053969e46882d80a254e89f55c5ab434883cbafc634a2c882375898074a57bc24be3c7b2c56401a7842 +98bc4a7a9b05ba19f6b85f3ee82b08bed0640fd7d24d4542eb7a7f7fde443e880bdb6f5499bd8cb64e1ddd7c5f529b19 +a5a41eaa5e9c1d52b00d64ab72bc9def6b9d41972d80703e9bfe080199d4e476e8833a51079c6b0155b78c3ab195a2a7 +a0823f6f66465fd9be3769c164183f8470c74e56af617f8afd99b742909d1a51f2e0f96a84397597afbd8eeaabb51996 +801da41d47207bdd280cc4c4c9753a0f0e9d655e09e0be5f89aeed4ce875a904f3da952464399bf8efc2398940d5fba2 +a719314085fd8c9beac4706c24875833d59a9a59b55bca5da339037c0a5fc03df46dbecb2b4efcfed67830942e3c4ea1 +a75dde0a56070bb7e9237b144ea79f578d413a1cbbd1821cee04f14f533638b24f46d88a7001e92831843b37ed7a709f +a6b4ef8847a4b980146e1849e1d8ab38695635e0394ca074589f900ce41fa1bb255938dc5f37027523bac6a291779bef +b26d84dfd0b7bd60bcfdbea667350462a93dca8ff5a53d6fc226214dcb765fada0f39e446a1a87f18e4e4f4a7133155f +ae7bd66cc0b72f14ac631ff329a5ca4958a80ba7597d6da049b4eb16ac3decde919ca5f6f9083e6e541b303fb336dc2f +a69306e6bfbbc10de0621cffb13c586e2fcfd1a80935e07c746c95651289aec99066126a6c33cb8eb93e87d843fc631f +a47e4815585865218d73c68ba47139568ea7ae23bfa863cb914a68454242dd79beaec760616b48eea74ceab6df2298dd +b2da3cfb07d0721cd226c9513e5f3ace98ed2bc0b198f6626b8d8582268e441fa839f5834f650e2db797655ca2afa013 +b615d0819554f1a301a704d3fc4742bd259d04ad75d50bccee3a949b6226655f7d623301703506253cca464208a56232 +85e06ed5797207f0e7ae85909e31776eb9dae8af2ec39cc7f6a42843d94ea1de8be2a3cdadfcbe779da59394d4ffeb45 +8c3529475b5fdbc636ee21d763f5ec11b8cb040a592116fb609f8e89ca9f032b4fa158dd6e9ceab9aceb28e067419544 +accddb9c341f32be82b6fa2ef258802c9ae77cd8085c16ec6a5a83db4ab88255231b73a0e100c75b7369a330bfc82e78 +93b8e4c6e7480948fa17444b59545a5b28538b8484a75ad6bc6044a1d2dbd76e7c44970757ca53188d951dc7347d6a37 +90111721d68b29209f4dc4cfb2f75ab31d15c55701922e50a5d786fb01707ab53fcec08567cd366362c898df2d6e0e93 +b60a349767df04bd15881c60be2e5cc5864d00075150d0be3ef8f6b778715bebca8be3be2aa9dbdc49f1a485aeb76cda +b8d5a967fdd3a9bcf89a774077db39ef72ca9316242f3e5f2a350202102d494b2952e4c22badecd56b72ba1eea25e64b +8499ebd860f31f44167183b29574447b37a7ee11efcc9e086d56e107b826b64646b1454f40f748ccac93883918c89a91 +99c35e529782db30f7ccab7f31c225858cf2393571690b229ece838ec421a628f678854a1ddbd83fa57103ccebd92c7f +99817660d8b00cbe03ec363bcdc5a77885586c9e8da9e01a862aca0fc69bf900c09b4e929171bc6681681eae10450541 +8055e130964c3c2ebd980d3dc327a40a416bcdbf29f480480a89a087677a1fb51c823b57392c1db72f4093597100b8d3 +877eaddef845215f8e6f9ed24060c87e3ab6b1b8fbb8037d1a57e6a1e8ed34d00e64abb98d4bf75edb5c9788cbdccbef +b5432bbff60aeae47f2438b68b123196dfb4a65cc875b8e080501a4a44f834b739e121bec58d39ac36f908881e4aa8ab +b3c3f859b7d03ff269228c0f9a023b12e1231c73aba71ad1e6d86700b92adc28dfa3757c052bbc0ba2a1d11b7fda4643 +ab8a29f7519a465f394ef4a5b3d4924d5419ca1489e4c89455b66a63ac430c8c9d121d9d2e2ed8aa1964e02cd4ebac8c +866ae1f5c2a6e159f2e9106221402d84c059f40d166fab355d970773189241cd5ee996540d7c6fc4faf6f7bcff967dce +973a63939e8f1142a82b95e699853c1e78d6e05536782b9bb178c799b884f1bc60177163a79a9d200b5ff4628beeb9e7 +a5fc84798d3e2d7632e91673e89e968f5a67b7c8bb557ea467650d6e05e7fe370e18d9f2bdd44c244978295cf312dc27 +b328fe036bcd0645b0e6a15e79d1dd8a4e2eda128401a4e0a213d9f92d07c88201416fc76193bb5b1fe4cb4203bab194 +99239606b3725695a570ae9b6fb0fb0a34ad2f468460031cfa87aa09a0d555ff606ff204be42c1596c4b3b9e124b8bd6 +af3432337ca9d6cce3574e23e5b7e4aa8eda11d306dc612918e970cc7e5c756836605a3391f090a630bac0e2c6c42e61 +8a545b3cb962ce5f494f2de3301de99286c4d551eaa93a9a1d6fef86647321834c95bf754c62ec6c77116a21494f380d +8f9b8ea4c25469c93556f1d91be583a5f0531ac828449b793ba03c0a841c9c73f251f49dd05cbb415f5d26e6f6802c99 +a87199e33628eeffd3aff114e81f53dd54fba61ba9a9a4d7efdbff64503f25bc418969ab76ef1cf9016dd344d556bb29 +a2fda05a566480602274d7ffcaefdd9e94171286e307581142974f57e1db1fa21c30be9e3c1ac4c9f2b167f92e7c7768 +a6235d6a23304b5c797efb2b476ed02cb0f93b6021a719ae5389eb1e1d032944ae4d69aec2f29fcd6cbc71a6d789a3ba +a7f4a73215f7e99e2182c6157dd0f22e71b288e696a8cff2450689a3998f540cfb82f16b143e90add01b386cb60d8a33 +922d8f9cd55423f5f6a60d26de2f8a396ac4070a6e2dc956e50c2a911906aa364d4718aea29c5b61c12603534e331e7e +96d7fdf5465f028fc28f21fbfe14c2db2061197baf26849e6a0989a4ea7d5e09ab49a15ba43a5377b9354d01e30ce860 +8f94c4255a0fc1bd0fa60e8178c17f2a8e927cac7941c5547d2f8f539e7c6ed0653cab07e9fb1f2c56cdd03bb876512a +95984c10a2917bfa6647ebce69bf5252d9e72d9d15921f79b2c6d7c15ee61342b4fb8a6d34838e07132b904f024ded04 +93e65e765a574277d3a4d1d08ca2f2ff46e9921a7806ca8ca3d8055f22d6507744a649db7c78117d9168a1cbdb3bbc61 +8d453b7364662dc6f36faf099aa7cbbe61151d79da7e432deba7c3ed8775cfe51eaf1ba7789779713829dde6828e189a +acffa3ee6c75160286090162df0a32a123afb1f9b21e17fd8b808c2c4d51a4270cab18fba06c91ef9d22e98a8dc26cdd +a5597cc458186efa1b3545a3926f6ecaaa6664784190e50eed1feac8de56631bee645c3bac1589fa9d0e85feb2be79d4 +87ba9a898df9dfa7dabc4ab7b28450e4daf6013340e329408d1a305de959415ab7315251bad40511f917dfc43974e5f0 +a598778cf01d6eef2c6aabc2678e1b5194ee8a284ebd18a2a51a3c28a64110d5117bcbf68869147934e600572a9e4c8a +84c69a4ad95861d48709f93ade5ac3800f811b177feb852ebcd056e35f5af5201f1d8a34ab318da8fe214812d0a7d964 +9638a237e4aed623d80980d91eda45e24ebf48c57a25e389c57bd5f62fa6ffa7ca3fb7ae9887faf46d3e1288af2c153b +800f975721a942a4b259d913f25404d5b7b4c5bf14d1d7e30eee106a49cb833b92058dab851a32ee41faf4ef9cb0dea4 +b9127a34a59fed9b5b56b6d912a29b0c7d3cb9581afc9bd174fc308b86fdb076f7d436f2abc8f61cef04c4e80cd47f59 +8004eda83f3263a1ccfc8617bc4f76305325c405160fb4f8efeff0662d605e98ba2510155c74840b6fe4323704e903c4 +aa857b771660d6799ff03ccad1ab8479e7f585a1624260418fc66dc3e2b8730cfa491d9e249505141103f9c52f935463 +98b21083942400f34cde9adbe1977dee45ba52743dc54d99404ad9da5d48691ddea4946f08470a2faad347e9535690c7 +a4b766b2faec600a6305d9b2f7317b46f425442da0dc407321fc5a63d4571c26336d2bccedf61097f0172ec90fb01f5f +b9736619578276f43583de1e4ed8632322ea8a351f3e1506c5977b5031d1c8ad0646fb464010e97c4ddb30499ddc3fb0 +973444ffaff75f84c17f9a4f294a13affd10e2bceed6b4b327e4a32c07595ff891b887a9f1af34d19766d8e6cb42bfd1 +b09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef +b80c2aa3d0e52b4d8b02c0b706e54b70c3dbca80e5e5c6a354976721166ea0ca9f59c490b3e74272ef669179f53cb50d +8e52fa5096ff960c0d7da1aa4bce80e89527cdc3883eba0c21cb9a531088b9d027aa22e210d58cf7cbc82f1ec71eb44f +969f85db95f455b03114e4d3dc1f62a58996d19036513e56bee795d57bf4ed18da555722cd77a4f6e6c1a8e5efe2f5d7 +ab84b29b04a117e53caea394a9b452338364c45a0c4444e72c44132a71820b96a6754828e7c8b52282ad8dca612d7b6a +83e97e9ab3d9e453a139c9e856392f4cef3ec1c43bce0a879b49b27a0ce16f9c69063fd8e0debbe8fabafc0621bc200c +8c138ebdf3914a50be41be8aa8e2530088fb38af087fa5e873b58b4df8e8fd560e8090c7a337a5e36ef65566409ad8f3 +a56da9db2f053516a2141c1a8ed368ae278ab33a572122450249056857376d1dffc76d1b34daf89c86b6fe1ead812a0c +a3233ea249f07531f5bc6e94e08cea085fd2b2765636d75ff5851f224f41a63085510db26f3419b031eb6b5143735914 +b034bb6767ce818371c719b84066d3583087979ba405d8fbb2090b824633241e1c001b0cb0a7856b1af7a70e9a7b397e +8722803fe88877d14a4716e59b070dd2c5956bb66b7038f6b331b650e0c31230c8639c0d87ddc3c21efc005d74a4b5cc +8afe664cb202aacf3bd4810ebf820c2179c11c997f8c396692a93656aa249a0df01207c680157e851a30330a73e386b9 +a999e86319395351d2b73ff3820f49c6516285e459224f82174df57deb3c4d11822fd92cbbed4fc5a0a977d01d241b19 +9619408e1b58b6610d746b058d7b336d178e850065ba73906e08e748651e852f5e3aab17dcadcb47cc21ff61d1f02fcf +947cf9c2ed3417cd53ea498d3f8ae891efe1f1b5cd777e64cec05aba3d97526b8322b4558749f2d8a8f17836fb6e07aa +aec2fdae2009fda6852decb6f2ff24e4f8d8ca67c59f92f4b0cf7184be72602f23753ed781cf04495c3c72c5d1056ffe +8dba3d8c09df49fbfc9506f7a71579348c51c6024430121d1c181cad7c9f7e5e9313c1d151d46d4aa85fb0f68dd45573 +b6334cb2580ae33720ebf91bb616294532a1d1640568745dcda756a3a096786e004c6375728a9c2c0fb320441e7d297a +9429224c1205d5ecd115c052b701c84c390f4e3915275bb8ce6504e08c2e9b4dd67b764dd2ea99f317b4c714f345b6ff +abe421db293f0e425cfd1b806686bdfd8fdbac67a33f4490a2dc601e0ddbf69899aa9a119360dad75de78c8c688ca08b +95c78bffed9ae3fff0f12754e2bd66eb6a9b6d66a9b7faaeb7a1c112015347374c9fe6ce14bf588f8b06a78e9a98f44c +ac08f8b96b52c77d6b48999a32b337c5ad377adf197cda18dbdf6e2a50260b4ee23ca6b983f95e33f639363e11229ee4 +911a0e85815b3b9f3ba417da064f760e84af94712184faeb9957ddd2991dee71c3f17e82a1a8fbeec192b0d73f0ebce7 +aa640bd5cb9f050568a0ad37168f53b2f2b13a91e12b6980ca47ae40289cf14b5b89ddd0b4ca452ce9b1629da0ce4b5d +907486f31b4ecea0125c1827007ea0ecb1c55cadb638e65adc9810ca331e82bb2fd87e3064045f8d2c5d93dc6c2f5368 +8cbfaf4ce0bbbf89208c980ff8b7bc8f3cfef90f0fe910f463cb1c0f8e17cce18db120142d267045a00ba6b5368f0dd3 +9286f08f4e315df470d4759dec6c9f8eacef345fc0c0b533ad487bb6cfefa8c6c3821a22265c9e77d34170e0bc0d078b +94a3c088bc1a7301579a092b8ece2cefc9633671bc941904488115cd5cb01bd0e1d2deef7bdccb44553fd123201a7a53 +8f3d0114fbf85e4828f34abb6d6fddfa12789d7029d9f1bb5e28bc161c37509afdab16c32c90ec346bc6a64a0b75726f +a8ed2d774414e590ec49cb9a3a726fafd674e9595dd8a1678484f2897d6ea0eea1a2ee8525afac097b1f35e5f8b16077 +9878789ff33b11527355a317343f34f70c7c1aa9dc1eca16ca4a21e2e15960be8a050ec616ffb97c76d756ce4bce2e90 +854e47719dae1fe5673cacf583935122139cf71a1e7936cf23e4384fbf546d48e9a7f6b65c3b7bf60028e5aa1234ba85 +af74bdda2c6772fe9a02d1b95e437787effad834c91c8174720cc6e2ea1f1f6c32a9d73094fc494c0d03eef60b1a0f05 +80a3e22139029b8be32cb167d3bc9e62d16ca446a588b644e53b5846d9d8b7ab1ad921057d99179e41515df22470fb26 +86c393afd9bd3c7f42008bba5fe433ec66c790ebd7aa15d4aeaf9bb39a42af3cfaf8c677f3580932bbd7ada47f406c8c +90433c95c9bb86a2c2ddcf10adccb521532ebd93db9e072671a4220f00df014e20cd9ce70c4397567a439b24893808dc +95b2c170f08c51d187270ddc4f619300b5f079bbc89dbca0656eae23eecc6339bf27fa5bf5fd0f5565d4021105e967d2 +8e5eced897e2535199951d4cff8383be81703bca3818837333dd41a130aa8760156af60426ceadb436f5dea32af2814c +a254a460ebefbe91d6e32394e1c8f9075f3e7a2bb078430ac6922ab14d795b7f2df1397cb8062e667d809b506b0e28d4 +ac2062e8ca7b1c6afb68af0ebab31aebd56fc0a0f949ef4ea3e36baf148681619b7a908facf962441905782d26ecbdb5 +8b96af45b283b3d7ffeec0a7585fc6b077ea5fd9e208e18e9f8997221b303ab0ce3b5bafa516666591f412109ce71aa5 +afd73baada5a27e4fa3659f70083bf728d4dc5c882540638f85ea53bf2b1a45ddf50abc2458c79f91fb36d13998c7604 +a5d2fff226e80cb2e9f456099812293333d6be31dd1899546e3ad0cd72b2a8bcb45ec5986e20faa77c2564b93983210c +a8c9b8de303328fbdaccf60f4de439cf28f5360cf4104581dc2d126bc2e706f49b7281723487ff0eaf92b4cc684bc167 +a5d0d5849102bf1451f40e8261cb71fc57a49e032773cb6cd7b137f71ee32438d9e958077ffafce080a116ccc788a2d4 +80716596f502d1c727d5d2f1469ce35f15e2dbd048d2713aa4975ee757d09c38d20665326bd63303cfe7e820b6de393d +97baf29b20f3719323cc1d5de23eaa4899dc4f4e58f6c356ec4c3ad3896a89317c612d74e0d3ab623fe73370c5972e2f +b58bdc9aa5061bf6e5add99a7443d7a8c7ba8f6875b8667d1acbe96fc3ecafbdcc2b4010cb6970a3b849fff84660e588 +b6be68728776d30c8541d743b05a9affc191ad64918fdbd991d2ddd4b32b975c4d3377f9242defef3805c0bfb80fbac7 +b0cddace33333b8a358acad84b9c83382f0569d3854b4b34450fd6f757d63c5bdab090e330b0f86e578f22c934d09c36 +854bd205d6051b87f9914c8c2494075d7620e3d61421cc80f06b13cea64fd1e16c62c01f107a5987d10b8a95a8416ad9 +80351254a353132300ba73a3d23a966f4d10ce9bf6eae82aedb6cdc30d71f9d08a9dd73cb6441e02a7b2ad93ad43159c +937aae24fb1b636929453fc308f23326b74c810f5755d9a0290652c9c2932ad52cc272b1c83bd3d758ef7da257897eae +b84d51ef758058d5694ffeac6d8ce70cef8d680a7902f867269c33717f55dd2e57b25347841d3c0872ae5f0d64f64281 +a4b31bb7c878d5585193535b51f04135108134eff860f4eac941053155f053d8f85ff47f16268a986b2853480a6e75e6 +93543f0828835186a4af1c27bdf97b5dd72b6dfa91b4bf5e759ff5327eaf93b0cb55d9797149e465a6b842c02635ffe5 +afdac9e07652bf1668183664f1dd6818ef5109ee9b91827b3d7d5970f6a03e716adcc191e3e78b0c474442a18ad3fc65 +9314077b965aa2977636ae914d4a2d3ce192641a976ffa1624c116828668edbfbe5a09e3a81cb3eed0694566c62a9757 +b395ddcf5082de6e3536825a1c352802c557b3a5118b25c29f4c4e3565ecaaf4bdd543a3794d05156f91fc4ceadc0a11 +b71f774aad394c36609b8730e5be244aaebfff22e0e849acc7ee9d33bedc3ec2e787e0b8b2ffe535560fcd9e15a0897e +92e9409fa430f943a49bce3371b35ac2efb5bc09c88f70ff7120f5e7da3258a4387dfc45c8b127f2ef2668679aeb314e +8ef55bef7b71952f05e20864b10f62be45c46e2dca0ef880a092d11069b8a4aa05f2e0251726aca1d5933d7dea98f3f8 +aad3fba9e09fae885cdeef45dfafa901419f5156fb673818f92a4acc59d0e2e9870b025e711de590a63fd481164f3aa8 +b444d52af545dd3a2d3dd94e6613816b154afea0c42b96468aceb0c721395de89e53e81a25db857ca2e692dcb24ba971 +88b279fe173007e64fe58f2c4adba68a1f538dbd3d32d175aa0d026bbb05b72a0c9f5d02b8201a94adb75fe01f6aa8b2 +88494cea4260741c198640a079e584cabfea9fcfb8bcf2520c9becd2419cde469b79021e5578a00d0f7dbc25844d2683 +94f3cce58837c76584b26426b9abdb45f05fee34dd9e5914b6eae08e78b7262ed51c4317031dab1ad716f28b287f9fc2 +b8c7ed564f54df01c0fbd5a0c741beed8183ce0d7842dc3a862a1b335de518810077314aa9d6054bb939663362f496da +81c153320d85210394d48340619d5eb41304daea65e927266f0262c8a7598321aba82ad6c3f78e5104db2afd2823baca +ab6695a8d48a179e9cd32f205608359cf8f6a9aead016252a35b74287836aa395e76572f21a3839bec6a244aa49573e5 +920ed571539b3002a9cd358095b8360400e7304e9a0717cc8c85ab4a0514a8ad3b9bf5c30cb997647066f93a7e683da9 +a7ec7c194d1e5103bc976e072bf1732d9cb995984d9a8c70a8ee55ce23007f21b8549ad693f118aa974f693ed6da0291 +87a042d6e40c2951a68afc3ccf9646baf031286377f37f6ac47e37a0ec04d5ac69043757d7dff7959e7cd57742017a8d +b9f054dd8117dd41b6e5b9d3af32ee4a9eebef8e4a5c6daa9b99c30a9024eabeae850ab90dbdb188ca32fd31fd071445 +a8386da875799a84dc519af010eaf47cdbc4a511fe7e0808da844a95a3569ce94054efd32a4d3a371f6aba72c5993902 +8b3343a7cf4ffb261d5f2dbd217fb43590e00feac82510bdf73b34595b10ee51acae878a09efebc5a597465777ef4c05 +8312a5f1ea4f9e93578e0f50169286e97884a5ed17f1780275ab2b36f0a8aa1ab2e45c1de4c8bce87e99e3896af1fa45 +b461198cb7572ac04c484a9454954e157bdd4db457816698b7290f93a10268d75a7e1211e757c6190df6144bbb605d91 +9139764a099580d6f1d462c8bf7d339c537167be92c780e76acb6e638f94d3c54b40ed0892843f6532366861e85a515a +8bb70acb3c9e041b4fc20e92ba0f3f28f0d5c677bcb017af26f9171e07d28c3c0729bef72457231e3512f909455a13a2 +93301a18e5064c55fcfe8e860fab72da1b89a824ca77c8932023b7c79e4a51df93a89665d308a8d3aa145e46ebe6a0ad +ae3bca496fbd70ce44f916e2db875b2ce2e1ded84edd2cebc0503bdfdec40ec30e1d9afb4eb58c8fa23f7b44e71d88f8 +93cb3a918c95c5d973c0cb7621b66081ed81fba109b09a5e71e81ca01ec6a8bb5657410fdec453585309ef5bf10d6263 +95a50b9b85bb0fc8ff6d5f800d683f0f645e7c2404f7f63228a15b95ce85a1f8100e2e56c0acee19c36ed3346f190e87 +816cc4d9337461caca888809b746ab3713054f5b0eac823b795a1a9de9417c58e32a9f020fef807908fa530cbf35dee8 +a9c2890c2dd0d5d7aedc4cca7f92764086c50f92f0efd2642c59920d807086031bfe2d3ba574318db236c61a8f5f69c2 +ad0d5c8c80bddfe14bdaf507da96dc01dc9941aecc8ad3b64513d0a00d67c3f4b4659defb6839b8b18d8775e5344c107 +9047c9fad6ef452e0219e58e52c686b620e2eb769571021e3524bd7eac504f03b84834b16b849d42b3d75c601fd36bb7 +a04dd988fed91fb09cb747a3ac84efe639d7d355524cd7dee5477ecbcdec44d8ac1cec2c181755dcfdb77e9594fb3c5b +b0ea0c725debd1cec496ced9ce48f456f19af36e8b027094bf38fa37de9b9b2d10282363ea211a93a34a0a5387cace5d +b5fc46e2bb3e4653ea5e6884dcb3c14e401a6005685ee5a3983644b5b92300b7066289159923118df4332aac52045b8c +841fc5b26b23226e725e29802da86b35e4f5e3babc8b394f74e30fd5dec6d3840b19a9a096625ce79a4f1edae6369700 +8fd2bbbeea452451def3659bbe0ceb396120ebe8f81eee1ea848691614422c81d7c3e6a7a38032b4120b25c5ffa8f0c2 +9131ce3d25c3d418f50c0ab99e229d4190027ee162b8ba7c6670420ea821831dec1294ac00d66c50fac61c275a9e2c71 +99ec6eafe0eb869d128158cee97b984fb589e1af07699247946e4a85db772289dff3084d224a6f208005c342f32bbd73 +ac100fbbe7c2bf00cc56fcd5aa1f27181f82c150c53bbb1e15d2c18a51ed13dcfa7bccab85821b8ddddf493603e38809 +affd73a458d70c0d9d221e0c2da4348fed731f6b34c0b3e2d5711ba432e85a1ec92e40b83b246a9031b61f5bc824be47 +8ed30ed817816a817e9e07374ef1f94405a7e22dd0096aeaae54504382fc50e7d07b4f1186c1792fc25ea442cd7edc6b +a52370cfe99a35fa1405aeca9f922ad8d31905e41f390e514ea8d22ee66469637d6c2d4d3a7ee350d59af019ae5a10a4 +8d0b439741c57b82c8e4b994cf3956b5aeaee048b17e0a1edb98253a8d7256f436d8b2f36b7e12504132dbf91f3376b1 +8caac7e1a4486c35109cff63557a0f77d0e4ca94de0817e100678098a72b3787a1c5afc7244991cebcd1f468e18d91d4 +a729a8e64b7405db5ebfb478bb83b51741569331b88de80680e9e283cc8299ba0de07fcf252127750f507e273dc4c576 +a30545a050dad030db5583c768a6e593a7d832145b669ad6c01235813da749d38094a46ac3b965700230b8deacd91f82 +9207e059a9d696c46fa95bd0925983cd8e42aefd6b3fb9d5f05420a413cbc9e7c91213648554228f76f2dd757bde0492 +a83fa862ae3a8d98c1e854a8b17181c1025f4f445fbc3af265dc99e44bbd74cfa5cc25497fb63ee9a7e1f4a624c3202c +84cdfc490343b3f26b5ad9e1d4dcf2a2d373e05eb9e9c36b6b7b5de1ce29fda51383761a47dbd96deca593a441ccb28e +881a1aa0c60bb0284a58b0a44d3f9ca914d6d8fa1437315b9ad2a4351c4da3ee3e01068aa128284a8926787ea2a618d1 +aace78e497b32fbff4df81b1b2de69dbc650645e790953d543282cb8d004a59caf17d9d385673a146a9be70bf08a2279 +aa2da4760f1261615bffd1c3771c506965c17e6c8270c0f7c636d90428c0054e092247c3373eca2fb858211fdb17f143 +acb79f291b19e0aa8edb4c4476a172834009c57e0dcc544c7ce95084488c3ad0c63ffd51c2b48855e429b6e1a9555433 +814b58773a18d50a716c40317f8b80362b6c746a531776a9251c831d34fb63e9473197c899c0277838668babc4aa0ecb +b1f69522b0f7657d78bd1ee3020bcce3447116bf62c146d20684537d36cafb5a7a1531b86932b51a70e6d3ce0808a17e +8549712c251ef382f7abe5798534f8c8394aa8bcecdca9e7aa1a688dc19dc689dcd017a78b118f3bd585673514832fe4 +912a04463e3240e0293cfc5234842a88513ff930c47bd6b60f22d6bc2d8404e10270d46bf6900fee338d8ac873ebb771 +a327cb7c3fada842e5dd05c2eeedd6fcd8cf2bfb2f90c71c6a8819fb5783c97dd01bd2169018312d33078b2bc57e19f7 +b4794f71d3eceed331024a4cee246cc427a31859c257e0287f5a3507bfbd4d3486cb7781c5c9c5537af3488d389fe03e +82ffcb418d354ed01688e2e8373a8db07197a2de702272a9f589aed08468eab0c8f14e6d0b3146e2eb8908e40e8389c5 +910b73421298f1315257f19d0dfd47e79d7d2a98310fb293f704e387a4dc84909657f0f236b70b309910271b2f2b5d46 +a15466397302ea22f240eb7316e14d88376677b060c0b0ae9a1c936eb8c62af8530732fc2359cfd64a339a1c564f749b +a8091975a0d94cdc82fbaff8091d5230a70d6ea461532050abbdfee324c0743d14445cfe6efe6959c89a7c844feaa435 +a677d1af454c7b7731840326589a22c9e81efbbf2baf3fdeaf8ea3f263a522584fbca4405032c4cdf4a2a6109344dfc8 +894e6ffa897b6e0b37237e6587a42bbc7f2dd34fb09c2e8ac79e2b25b18180e158c6dc2dd26761dba0cfed1fb4eb4080 +928d31b87f4fe8fe599d2c9889b0ff837910427ba9132d2fba311685635458041321ae178a6331ed0c398efe9d7912f0 +afc1c4a31f0db24b53ee71946c3c1e1a0884bd46f66b063a238e6b65f4e8a675faa844e4270892035ef0dae1b1442aa0 +a294fcb23d87cf5b1e4237d478cac82ba570649d425b43b1e4feead6da1f031e3af0e4df115ca46689b9315268c92336 +85d12fd4a8fcfd0d61cbf09b22a9325f0b3f41fb5eb4285b327384c9056b05422d535f74d7dc804fb4bab8fb53d556bd +91b107d9b0ea65c48128e09072acd7c5949a02dd2a68a42ff1d63cf528666966f221005c2e5ca0a4f85df28459cdede6 +89aa5dc255c910f439732fcd4e21341707e8dd6689c67c60551a8b6685bd3547e3f47db4df9dfadd212405f644c4440b +8c307d6b827fa1adcf0843537f12121d68087d686e9cc283a3907b9f9f36b7b4d05625c33dab2b8e206c7f5aabd0c1e5 +843f48dadf8523d2b4b0db4e01f3c0ea721a54d821098b578fcaa6433e8557cadfea50d16e85133fa78f044a3e8c1e5b +9942eb8bd88a8afa9c0e3154b3c16554428309624169f66606bfb2814e8bac1c93825780cf68607f3e7cffe7bf9be737 +b7edb0c7637a5beb2332f2ae242ba4732837f9da0a83f00f9e9a77cf35516e6236eb013133ddc2f958ea09218fe260d3 +9655fe4910bc1e0208afbcf0ff977a2e23faded393671218fba0d9927a70d76514a0c45d473a97ecb00cf9031b9d527c +8434bc8b4c5839d9e4404ff17865ded8dd76af56ef2a24ea194c579d41b40ed3450c4e7d52219807db93e8e6f001f8da +b6c6d844860353dab49818bed2c80536dbc932425fdaa29915405324a6368277cf94d5f4ab45ea074072fc593318edff +b2887e04047660aa5c83aad3fa29b79c5555dd4d0628832c84ba7bf1f8619df4c9591fcde122c174de16ca7e5a95d5e3 +953ba5221360444b32911c8b24689078df3fbf58b53f3eec90923f53a22c0fc934db04dd9294e9ec724056076229cf42 +926917529157063e4aade647990577394c34075d1cb682da1acf600639d53a350b33df6a569d5ebb753687374b86b227 +b37894a918d6354dd28f850d723c1c5b839f2456e2a220f64ecadac88ae5c9e9cf9ab64b53aac7d77bf3c6dfa09632dc +b9d28148c2c15d50d1d13153071d1f6e83c7bb5cb5614adf3eb9edede6f707a36c0fa0eadb6a6135ead3c605dfb75bd1 +9738d73ea0b9154ed38da9e6bd3a741be789ea882d909af93e58aa097edf0df534849f3b1ba03099a61ceb6a11f34c4d +afabbecbbf73705851382902ec5f1da88b84a06b3abfb4df8d33df6a60993867f853d0d9bd324d49a808503615c7858a +a9e395ddd855b12c87ba8fdb0ea93c5bd045e4f6f57611b27a2ee1b8129efe111e484abc27cb256ed9dcace58975d311 +b501c2f3d8898934e45e456d36a8a5b0258aeea6ff7ac46f951f36da1ec01bd6d0914c4d83305eb517545f1f35e033cc +86f79688315241fe619b727b7f426dbd27bcc8f33aef043438c95c0751ada6f4cd0831b25ae3d53bcf61324d69ea01eb +83237e42fa773a4ccaa811489964f3fab100b9eea48c98bdef05fa119a61bde9efe7d0399369f87c775f4488120b4f2e +b89f437552cab77d0cd5f87aca52dd827fb6648c033351c00ab6d40ac0b1829b4fcdf8a7dad467d4408c691223987fbe +8e21061698cb1a233792976c2d8ab2eeb6e84925d59bb34434fff688be2b5b2973d737d9dda164bd407be852d48ef43f +b17a9e43aa4580f542e00c3212fbf974f1363f433c5502f034dfd5ed8c05ac88b901729d3b822bec391cca24cc9f5348 +aac6d6cda3e207006c042a4d0823770632fc677e312255b4aff5ad1598dc1022cab871234ad3aa40b61dc033a5b0930b +b25e69f17b36a30dada96a39bc75c0d5b79d63e5088da62be9fcbddfd1230d11654890caa8206711d59836d6abbc3e03 +af59fe667dd9e7e4a9863c994fc4212de4714d01149a2072e97197f311be1f39e7ad3d472e446dcc439786bf21359ede +957952988f8c777516527b63e0c717fc637d89b0fd590bcb8c72d0e8a40901598930c5b2506ff7fea371c73a1b12a9be +a46becd9b541fc37d0857811062ca1c42c96181c7d285291aa48dc2f6d115fcff5f3dfdf4490d8c619da9b5ce7878440 +87168fbd32c01a4e0be2b46fe58b74d6e6586e66bbb4a74ad94d5975ac09aa6fa48fd9d87f1919bd0d37b8ebe02c180c +895c4aa29de9601fc01298d54cfb62dd7b137e6f4f6c69b15dc3769778bfba5fc9cbd2fc57fd3fad78d6c5a3087f6576 +b9cf19416228230319265557285f8da5b3ca503de586180f68cf055407d1588ecec2e13fc38817064425134f1c92b4d5 +9302aaef005b22f7b41a0527b36d60801ff6e8aa26fe8be74685b5f3545f902012fcade71edca7aaa0560296dac5fca5 +a0ccda9883027f6b29da1aaa359d8f2890ce1063492c875d34ff6bf2e7efea917e7369d0a2b35716e5afd68278e1a93a +a086ac36beeba9c0e5921f5a8afea87167f59670e72f98e788f72f4546af1e1b581b29fbdd9a83f24f44bd3ec14aee91 +8be471bf799cab98edf179d0718c66bbc2507d3a4dac4b271c2799113ce65645082dc49b3a02a8c490e0ef69d7edbcb1 +8a7f5b50a18baf9e9121e952b65979bda5f1c32e779117e21238fb9e7f49e15008d5c878581ac9660f6f79c73358934a +b3520a194d42b45cbab66388bee79aad895a7c2503b8d65e6483867036497d3e2e905d4d51f76871d0114ec13280d82f +8e6ca8342ec64f6dbe6523dc6d87c48065cd044ea45fa74b05fff548539fd2868eb6dd038d38d19c09d81d5a96364053 +b126a0e8263a948ba8813bf5fb95d786ae7d1aa0069a63f3e847957822b5fe79a3a1afa0ce2318b9ba1025f229a92eb7 +8e4461d6708cac53441a3d23ac4b5ff2b9a835b05008c26d7d9c0562a29403847cf760b7e9d0bcb24a6f498d2a8a9dd2 +b280a761bab256dfe7a8d617863999e3b4255ddbdc11fe7fe5b3bb9633fc8f0cb4f28e594d3b5b0b649c8e7082c4666a +a3e3043bfd7461e38088ee6a165d2ca015de98350f1cb0efc8e39ed4fcdb12a717f0ede7fbf9dadb90496c47652cc0ce +a4c1f5b1b88ae3c397d171e64395afe0cd13c717677775a01dd0461d44a04ee30ec3da58a54c89a3ca77b19b5e51062c +a268638e0655b6d5a037061808619b9ae276bb883999d60c33a9f7f872c46d83d795d1f302b4820030c57604fa3686e7 +ac20176111c5c6db065668987227658c00a1572ce21fe15f25e62d816b56472c5d847dd9c781fb293c6d49cc33b1f98f +acc0e22d9b6b45c968c22fd16b4ece85e82a1b0ab72369bdd467857fee1a12b9635f5b339a9236cbd1acc791811d0e29 +b56066e522bee1f31480ff8450f4d469ace8eb32730c55b7c9e8fa160070bdec618454e665b8cbc5483bc30b6cebbfb9 +8c1772bdfacff85f174d35c36f2d2182ae7897ad5e06097511968bbb136b626c0c7e462b08a21aca70f8e456b0204bf8 +b4de3cf4a064bf589be92513b8727df58f2da4cd891580ef79635ac8c195f15a6199327bb41864e2f614c8589b24f67e +8f3c534125613f2d17bf3e5b667c203cb3eab0dbca0638e222fe552fddf24783965aa111de844e8c3595304bfc41c33b +8e445b2711987fe0bf260521cb21a5b71db41f19396822059912743bf6ca146100c755c8b6e0e74f1bf2e34c03b19db9 +87ff9adf319adb78c9393003b5bdda08421f95551d81b37520b413fe439e42acf82d47fa3b61476b53166bf4f8544f0e +83f3c00c55632e1937dcdc1857de4eccd072efa319b3953d737e1d37382b3cf8343d54a435588eb75aa05bf413b4caa0 +b4d8ee1004bac0307030b8605a2e949ca2f8d237e9c1dcf1553bd1eb9b4156e2deb8c79331e84d2936ec5f1224b8b655 +93b2812b6377622e67bf9a624898227b56ebe3c7a1d917487fc9e4941f735f83679f7ac137065eb4098ad1a4cfbc3892 +81943d9eab6dcea8a120dde5356a0a665b1466709ebb18d1cbfa5f213a31819cb3cf2634e6d293b5b13caa158a9bb30b +a9042aae02efd4535681119e67a60211fc46851319eb389b42ebadcab1229c94199091fb1652beba3434f7b98c90785f +91db52b27fd9b1715df202106b373c4e63ce8ec7db8c818c9016ace5b08ef5f8c27e67f093395937ba4ce2f16edf9aef +83cb9b7b94bd6ead3ff2a7d40394f54612c9cb80c4e0adadffea39e301d1052305eb1fe0f7467268b5aba3b423a87246 +8720fd6712a99d92dd3fdaae922743ab53fad50d183e119a59dae47cdac6fbea6064c732d02cb341eaea10723db048fa +8d40022c1254462a2ac2380a85381c370b1221e5a202d95c75bccba6d1e52972dd5585a1294a1e487bf6ae6651867167 +b7bc06e08d8c72daba143627582f4b4f34cc2234b5cb5cd83536f2ef2e058631a3920468ea4d550aea01cad221d6a8a6 +a6e1a6f70fba42d3b9ce5f04ffdcfca46fc94041840c0066a204030cf75ea9f9856113fea3a9f69ea0037d9a68e3a9d4 +8b064c350083fce9a52da2e2e17bf44c4c9643d2d83667cbd9ad650bbeba55e2c408e746ccf693e56d08826e8a6d57fc +8d304a5405a0c0696917fcddc6795dd654567ca427f007d9b16be5de98febbf8692374e93f40822f63cf6f143c4d9499 +b968db239efec353a44f20a7cf4c0d0fca4c4c2dc21e6cbb5d669e4fe624356a8341e1eec0955b70afb893f55e9a9e32 +98971f745ce4ce5f1f398b1cd25d1697ada0cc7b329cee11d34b2d171e384b07aeb06ac7896c8283664a06d6dd82ec6b +881f5a20a80f728354fad9d0a32a79ffe0ba9bed644ed9d6a2d85444cda9821018159a3fa3d3d6b4fadbf6ea97e6aff6 +b7c76cbb82919ec08cf0bd7430b868a74cb4021e43b5e291caa0495ca579798fab1b64855e2d301f3461dd4d153adeb6 +b44c8c69b3df9b4e933fe6550982a6f76e18046e050229bd2456337e02efb75efa0dfe1b297ed9f5d7fa37fec69c8374 +a5bd7781820ba857aee07e38406538b07ab5180317689a58676f77514747672dd525ea64512a0e4958896f8df85e9d4d +a8443d1dc91b4faa20a2626505b5b4ad49cc5c1fd7a240a0e65d12f52d31df1585ba52c21e604dcec65ec00b81ae21fe +a157ae42fc6302c54bcdd774e8b8bafc4f5d221717f7bf49668c620e47051b930dce262d55668e546272dd07ca7c8d3f +8732c10448b63e907ff95f53cd746f970c946fd84fcbfe4cf9ede63afbbfc66b293bbc7c470d691bbd149bb3c78bb351 +a82192f4fd9a0c33489a0486b79d0f6c797c7eccb45f91f7f1e8e1dd1924ca9944b983951025b99ab5861d31841451fe +839efc6d199ddd43f34f6729b6b63f9ee05f18859bf8fd3f181fa71f4399a48bff7dde89b36e9dc1c572f1b9b6127cca +992ef084abe57adfd5eb65f880b411d5f4ed34c1aeb0d2cfac84fff4f92a9a855c521a965ba81b5eef2268e9a9e73048 +a2518ab712fa652e6e0bd0840307ef3831094e9a18723fb8ec052adacbb87f488d33778c6ec3fd845003af62e75125d1 +b630ac3c9e71b85dd9e9f2984bb5b762e8491d8edb99cad82c541faf5a22dd96f0fddb49d9a837b1955dea2d91284f28 +8d886d1b7f818391b473deba4a9a01acce1fe2abe9152955e17ba39adc55400590c61582c4fef37a286e2151566576ed +884f100dc437639247f85e5d638fcc7583d21bf37a66ce11e05bfc12f5dbe78685b0e51b4594e10549c92bb980512e12 +806d7bac2d24cfff6090ba9513698292d411cdea02976daa3c91c352b09f5a80a092cfa31304dcfcd9356eaf5164c81b +934ed65f8579ee458b9959295f69e4c7333775eb77084db69ad7096f07ad50ad88f65e31818b1942380f5b89e8d12f1b +aaf50ca5df249f0a7caf493334b6dca1700f34bd0c33fe8844fadd4afedbb87a09673426741ac7cbbb3bf4ab73f2d0f3 +b2868642cfa0a4a8a2553691c2bef41dab9dff87a94d100eaa41645614ab4d0e839ec2f465cc998c50cd203f0c65df22 +a326513112e0b46600d52be9aa04d8e47fe84e57b3b7263e2f3cf1a2c0e73269acb9636a99eb84417f3ae374c56e99b0 +97b93efc047896ddf381e8a3003b9e1229c438cc93a6dbef174bb74be30fac47c2d7e7dc250830459bed61d950e9c924 +b45e4f0a9806e44db75dbb80edc369be45f6e305352293bcae086f2193e3f55e6a75068de08d751151fdf9ebc6094fa1 +87f2161c130e57e8b4bb15616e63fa1f20a1b44d3e1683967a285f0d4f0b810f9202e75af2efa9fc472687c007a163f7 +8f6400a45666142752580a2dce55ef974f59235a209d32d2036c229c33a6189d51435b7ea184db36f765b0db574a9c52 +a0ee079462805f91b2200417da4900227acde0d48c98e92c8011a05b01c9db78fc5c0157d15cb084b947a68588f146f4 +ab0612d9bb228b30366b48e8d6ae11026230695f6f0607c7fa7a6e427e520121ff0edea55d1f0880a7478c4a8060872d +ad65dfde48f914de69f255bb58fa095a75afe9624fc8b7b586d23eb6cf34a4905e61186bc978e71ccb2b26b0381778a6 +8c8a4847d138d221c0b6d3194879fd462fb42ed5bd99f34ebe5f5b1e1d7902903ec55e4b52c90217b8b6e65379f005a4 +a41dca4449584353337aef1496b70e751502aeed9d51202de6d9723e155ca13be2d0db059748704653685a98eaa72a07 +ae40e5450fd994d1be245a7cd176a98dd26332b78da080159295f38802a7e7c9c17cc95da78d56558d84948cf48242cd +863878fda80ad64244b7493e3578908d4a804887ad1ad2c26f84404dcad69ea2851846ad2c6f2080e1ed64fe93bbec31 +b262fb990535f162dc2b039057a1d744409a3f41dd4b70f93ff29ba41c264c11cb78a3579aad82f3fa2163b33a8ce0e1 +a7f6eb552b9a1bb7c9cb50bc93d0dda4c7ecf2d4805535f10de0b6f2b3316688c5e19199d5c9ec2968e2d9e2bd0c6205 +a50aa5869412dc7081c8d827299237910ecec3154587692548da73e71fa398ff035656972777950ba84e472f267ba475 +924c3af750afc5dfad99d5f3ed3d6bdd359492cff81abcb6505696bb4c2b4664926cb1078a55851809f630e199955eb3 +a1acffa31323ce6b9c2135fb9b5705664de8949f8235b4889803fbd1b27eb80eb3f6a81e5b7cc44e3a67b288b747cf2f +8dec9fd48db028c33c03d4d96c5eecea2b27201f2b33d22e08529e1ae06da89449fe260703ac7bb6d794be4c0c6ea432 +aa6642922ccf912d60d678612fffe22ef4f77368a3c53a206c072ed07c024aa9dcde2df068c9821b4c12e5606cfe9be2 +a16ddf02609038fcb9655031b1cb94afe30b801739e02a5743c6cd2f79b04b2524c2085ca32ec3a39df53de0280f555d +b067d48589e9d3428c6d6129e104c681e4af376a351f502840bbea6c3e11fcbfdf54dadf6f1729621720a75ff89786c3 +b14a24079de311c729750bb4dd318590df1cd7ffc544a0a4b79432c9a2903d36a0d50ecd452b923730ade6d76a75c02c +97437bac649f70464ace93e9bec49659a7f01651bba762c4e626b5b6aa5746a3f0a8c55b555b1d0dc356d1e81f84c503 +a6f4cb2ffc83564b1170e7a9a34460a58a4d6129bd514ff23371a9e38b7da6a214ac47f23181df104c1619c57dff8fe2 +896d0f31dfc440cc6c8fde8831a2181f7257ffb73e1057fd39f1b7583ea35edf942ad67502cd895a1ad6091991eabc5e +9838007f920559af0de9c07e348939dfd9afe661b3c42053b4d9f11d79768cba268a2ee83bb07a655f8c970c0ee6844b +b41b8a47e3a19cadec18bff250068e1b543434ce94a414750852709cd603fc2e57cd9e840609890c8ff69217ea1f7593 +a0fb4396646c0a2272059b5aeb95b513e84265b89e58c87d6103229f489e2e900f4414133ed2458ddf9528461cfa8342 +ae026cfa49babc1006a3e8905d6f237a56a3db9ddf7559b0e4de8d47d08c3f172bde117cdf28dfdfd7627bd47d6a3c85 +a6a3f3e7006bc67290c0c40c1680bf9367982eb8aaf17ecb484a58c8e9c2a7c24932e2caa9aacc9b4fbf4c0abd087a46 +9093e05bd814177a01a3b8d7b733db66294e1c688c56def6e1827c0f2d9a97cf202721641bf81fb837f8581ae68cb5ce +87feef4de24942044f47d193d4efc44e39a8c0f4042fba582f2491a063e3a4640cb81f69579b6f353b9208884a4f7ce6 +975f9b94e78aac55bd4755f475e171e04f6fbddb6fd3d20a89a64a6346754a3ff64ecff8c04b612a1250e1d8d8a9e048 +87cde4d0164922d654cf2dc08df009e923c62f1a2e3b905dfde30f958e9e4dd6070d9f889712acd6c658804f48f3edb1 +ae8e22e158dda90a185eec92602831b5d826e5a19aab8c6400dba38b024c7d31c4cf265eb7b206dd45834f020b3f53cd +a4475807adc28aa086e977b65bbd7c8512119318c89d2619ea03a6739a72c3fb90c9622451896c7113ad4d12a3004de6 +97f1ae1e0d258a94532c7b73fa8ebdbbd53349a4d2d0a217fe56dfdd084dd879960bc6ff45ebb61b5dbf2054642800a4 +b3c832bd3691332a658b0caaa7717db13f5b5df2b5776b38131ac334b5fd80d0b90b6993701e5d74d2b7f6b2fd1f6b9d +a4b6af590187eb1b2cb5ae2b8cffa45c5e76abdb37cec56fc9b07a457730f5af0706d9ce0a17da792bbece5056d05670 +97b99a73a0e3145bf91f9dd611a67f894d608c954e9b8f5a4c77e07574064b3db47353eba8038062cebaad06a2500bab +8e5ca5a675de6e6d3916bd9ce5898bb379372afe3f310e70ff031bc8cc8fabfb7f3bfb784f409bb7eb06fdb4511ee477 +aabbbee4da1f16b5bbe001c19debe04745932d36dfbbf023fbf1010a2b1d54eb92fa5e266ac1e9337e26e2ddba752f40 +b13447c77496825f48e35c14f9b501c5056e6d5519f397a2580cea9a383a56a96994d88926aa681142fe2f1589c03185 +b89c55db39ff0e73dde7435b61e8a4d3e10f51dd8096cbc7f678661962e6de3d16f2f17a0e729cc699234cb847f55378 +82c36b7de53698a1bafbb311fefc6007fcefa47a806ebe33a4e7e0fc1c7b6b92a40a1860702cf9295a16c6b1433e3323 +8daeec8c88543d09c494a15cc9a83c0b918d544311fd2a7d09e06cf39cdebfa0cfc0e8fc0e3b5954960b92332f98697c +b18e55a1a7ae16be3a453d2bfa7659a7ec2d283dd46bdc82decef6d3751eeafc4f86f2416a22955c7e750c0582d4f3eb +b50c743462e2915bf773848669e50a3bcdb5a9ac5f664e97eaccf568c7d64a6493d321be0225de16142ce82ce1e24f66 +af69c9643805fb860434424b1608aababc593aaebc6a75fc017f7f62bb2b1da932b0b9bd5e6dcbba328422dafc06efd8 +b5947db4f809fd0d27af838b82eef8ab4fe78687a23ebc61c09c67eb7e8d0e6a310ecb907fd257859d5a2759a07c21cc +92c7960e163ca5bdf9196c7215102f8e9d88efc718843321c6e2a6170137b8ecec4ea5d5a5ce4c28012b6cdbd777dd01 +b63f9509ed5e798add4db43b562e8f57df50d5844af6e5c7acf6c3b71637c0a2d2433f4a0627b944f0af584892208bb8 +8ef28304a9bfe5220af6a9a6a942d2589606f5dc970d708ef18bc7ed08e433161020d36fb327c525398cd8ecb57002f9 +b722e0410f896c4462d630a84a5a14e94289fc38ed6d513ca88a09005935cec334c480028efa1943c7a5e202ae8c8379 +b56b6672b488e64d4dde43571f9ceaa7e61e336b0fd55bb769a57cd894a6300e724e5f88bad39a68bc307eb7406cb832 +8bf493da411fd41502b61a47827731193652e6ce3810709e70869d9aae49e4b17a40437a7a0dcc0547dbac21f355c0da +9613b60a144c01f6a0e7d46ddde07402e2133a1fe005c049a56415ff90401765040b2fc55971d24b94c5fd69fec58941 +85e2f02b291563d8eea3768cf6a4602c0ca36568ffcf3d93795d642044196ca6b0b28991ea5898e7974ee02831a0ec70 +b08ef66703dd9ac46e0208487566fbf8d8654d08c00f03e46f112c204782ccc02a880a3f9dffd849088693cee33b7b6d +a0b19eeda6c71b0e83b1f95dffef4d370318bdea6ea31d0845695e6b48d5c428c3dbba1a0ded80964992c4a0695f12ee +b052642e5772d2ef6f49dd35c5e765c5f305006b2add3b4bee5909ca572161edf0e9c2bc3bc3bc7f56fd596360ef2201 +8261af164c768fec80d63fca6cd07d1c0449e9ca665fe60c29babdbd8a2b20cf1f556a4b24cd7341712468a731c21b32 +8a17016a1b2fc0fa0d9e3610ea80548fcf514e0a35e327f6b5f8069b425c0f0829af7e206013eab552be92b241be5ac5 +8eea25c680172696f5600271761d27ef4c8cec9ab22f01f72b2c7c313a142fafaec39e6920b96fcace858883e02eff7a +b8e0c590106e125c5bca7e7a071cc408b93629da0d8d6381f1b73fbdf17024a0cf13f679f5203a99bbbcb664b4a94e88 +b9943b29395258b7afdf1781cfaf131297a4f325540755df73401b2ec4a549f962952e9907413c39a95585c4aff38157 +8286eab4a04f8113fb3f738a9bc9c2deaf3a22bf247151515568703da4efe6450ab3970f5c74e978a2db7e8d795331b7 +a10cf383c8a7e3f0a0a5556b57532170ff46dabdcbb6a31c4617271634b99540aa575786c636d3809207cbf1d2f364d3 +a5af7eb998140d01ba24baa0e8c71625aee6bd37db4c5ff607518f907892219ba8c9a03c326b273bfd7068232809b73c +aed5f461e38fccc8b3936f1328a9747efcbceb66312f6d6eddce57c59570852767159f1a7d9998f63342515fef4ba9bf +aec3e94b029aa692bfe2b8dbc6c3b0d132b504242e5ebe0cad79c065085e2fc05550e5cdaa2353892a40ff1a062dd9eb +87c23703960129396018d0347f5dd034abdbd57232b74195b6a29af34b6197b3cd63c60ac774d525add96ae54d5c0fb4 +97964a7768216e1c84dece71ce9202cc64b6d483650aa6f6d67215f655f66cda14df0a0f251db55832c77bfd9b6316e2 +8167aaf24c8a023d0aea16b8c24d993618b9d0c63619e11a28feab8f14952bafcb0918ed322cbc0ae1b2e1786071819b +b58318bd62852ffb712fc58f368c21b641dde7b3fa7d7269974c7a7b5b3e1641569fc7b5f32ca49de22f4f993506d92d +b172e7911d5cd3f53af388af847b928947c711185aebd3328f8e6ed1106c161ae0c1b67d3d9eb237e9e66eb0672edec0 +a6834cf69b2c4433cf6e779bfbb736b12e73e71e149c38101d13dbacf6c5048db53994a6a039381df40bbd67de40fcd0 +882604aa3bb19fffd6db744b5cf4a2431b157dac06d0617e0703684a118ca90b2d22a7758a1de7732a7144e68b11b7f7 +addc128ba52bf7553b9ba49eff42004d388a02c6b6e9809abe1c0d88f467e5ff6cb0c82a8fd901b80dfc9a001f7b9997 +abf19604a3f0cffefa7a9ced81627f6aacb8d7267b52b825f25d813d9afa24af6d70da21450ed93eaff8b4d2a9b905a9 +a3c67e7bf02dbca183d86924611a7149556ee17cb3469793624da496b6c25617a9071925dd02aab9cb028739cb79043d +b1cea4284a3ac4d5b1c6f0947c6ec8365b3281ed15495bf328a907a9a02cdd186e7cb1ef080385b3399df786855985a9 +a6edb126314559e6129caf1111dc3c82ff914efce658b11f2c9b48081be1cf3f46bde482469d493379025a158d95ab1b +9843fd7dd424da1acc6f92f87fac364a8b0d4097d74b6b451386384966c85145d43fc6ecedf04271b0f963ac731fd93f +83852bedca03a97a2e63053cb102387866cbefe6707ebb6dae2d32a59c343079f1a863f299fd64d0ecbe024d0a1247d5 +a570e645a0679ebc6f0ca03cc8f7367b03c3886f3d9c787992de7f3e93360a170d3ac9ae7720999c727a887b1dc762bb +ad644c40555238f28844eed632c8972b63d2602098031d53b5599d1a874903e0d0c428e0ab12a209ea3fb31225578f1c +b64e9f92a14812ed31075f9fdd3324659a036ef2f293ef9ca6f6feb87d0c138e1ba74bc36a910afd22ff9b3c8ec7cfa5 +8f2d75a86d517dafac09b65596f4b89c4a9c0a7003632407504153fa297c9e3228e236948a5d5224b8df49a087c8e0e3 +b02d6ab9292ae336c8a74115f33765af2c9f62c331d70c087cf4c2979792bb3c2666f6699c017f8d4c6b378fd4bda86a +a923d660d2e55228b8bc74f87d966069bd77c34a776fa96f37b48539c85634482e514e2cb76cb8eb20efd85eb9c83fae +81d7ffb53090a6d512055ecfd582ca92805525a05654e39bb12653a6a8902a16e651ba7b687b36b8bea7186632c7e9e3 +83e9b33e29b57ae53d9f72bd4622ff388252333b4fa32ad360a5b00f3ffc8813b9cb8a1361454d3bb7156c01b94b6a08 +ad7d6bffe4d67eb53b58daa3fc8a5a60790c54fa42226ae12847e94c6de3b4365b3be39855a4f6a5f12e4803cdaed96b +a7709fed85abbee5a2fa49c5238582ec565da08c132d4912821491985bf83b681eb4823634bfe826abd63a6c41a64ea7 +b8fb6ed55741132a1053b6ca77bdf892e96b048488373ba4aa2f2225fae6d578724124eb6975e7518e2bf3d25d215763 +85e0c53089529a09b5bce50f5760af6aeafef9395388aa4b6144ca59953169101783347ee46264ec0163713a25fe7c63 +8f9e47a9c37b678e56c92b38d5b4dab05defc6b9c35b05e28431d54b1d69ac31878c82c1357d016f3e57ca07d82d9c16 +a81f508136ee6ec9122c48584df51637f768ccfe8a0b812af02b122a0fafa9abcc24778bf54143abb79eccebbdde2aac +931a96d2257a4714d1ef20ac0704438481632647b993467e806b1acc4a381cc5a9dec257e63239ba285deb79f92122dd +99fb0ff747bcd44b512bf8a963b3183ce3f0e825a7b92ddd179253e65942a79494a515c0c0bc9345db136b774b0a76b0 +a9dbb940b5f8ab92f2d85fc5999e982e3d990fe9df247cfc6f3a3f8934fb7b70e2d0362ba3a71edc5d0b039db2a5f705 +99011a1e2670b1b142ec68b276ff6b38c1687eed310a79e2b902065bc798618c0cdee7b2009ad49623ed7ae0aa2b5219 +9361e9f3aa859c07924c49f3d6e9b5d39a3df2fc1c10769202ec812955d7d3814c9e6982f4df3a8f3bdbfb4550cd1819 +a8aa23f177ddc1e7a7856da3eac559791d8b3f188c0b3ae7021bcb35dfb72b0f043c3699597a9188200408bc3daf6ab7 +a5a502ff673f6dab7ae4591a7b550c04ede22a45a960c6b5499644f721c62b12b9e08248e7f8b8a59a740b058d2a67e6 +ad374f80f0b52bc5a9491f79a547ce5e4a3ce4468a35d7dbca8a64083af35ab38eff9aa774ccba2e2e1e006e45cb0b85 +ab6851827125e3f869e2b7671a80e2dff3d2d01ce5bfbeb36cbaf30c3d974a2d36fd9f7c3d331bb96d24b33dbd21f307 +96658f6a2d225a82f7ccee7f7a7e476967e31a0cd6c62859d3b13ee89702bb821547f70ffd31cb46a6a0d26a93158883 +878f59ff2590bc3d44fdc674717589800748b78d543d3c0dbb50125b1d6011d6a083f10ab396e36b79f2d89b7cf51cdd +b8bdfb97829c5d973a15172bfe4cb39620af148d496900969bd7ca35de9b0e98eec87af4e20bef1022e5fb6c73952aa0 +a292a78b452743998aee099f5a0b075e88762222da7a10398761030ffcc01128138d0f32fccf3296fcbea4f07b398b5f +85da44fdd7b852a766f66ba8804ed53e1fc54d282f9a6410106c45626df5a4380cbea2b76677fdfde32446a4d313742a +84bebf036073d121e11abc6180cba440465c6eaadc9a0c0853a5f1418f534d21cccf0cfc62533eaeae4653c7b4988046 +923dec006a6af04ef675f5351afffffd2c62a17a98f4144221927c69f4553dd105e4fcc2227b5f493653d758cd7d0352 +a51eda64f4a4410a1cfa080d1f8598e23b59856436eb20a241e11106989fbbb48f14c2251f608cbf9531c7c442b30bf7 +ac6d26ae7bab22d49b7fba7fe4b8cf6d70617977008c8290787c9da1a4759c17c5e441efb3dee706d5d64d9d2ace1de5 +ab5138b94d23c1bf920b2fb54039e8a3c41960a0fe6173261a5503da11ff7b3afdb43204f84a99e99888618a017aac1b +8c85647a91e652190eee4e98a1eec13a09a33f6532926427bf09e038f487e483f7930fbe6ff7a2126ccde989690dc668 +a6026ab87cffec3e47b4c9673957d670cb48c9b968d2ad0e3d624d81c1082dcebbc70d0815cbd0325e0a900d703a6909 +ac4f6ff6baf8374a3c62bdd5a8d207d184ff993f6055bcee1e6dcc54173d756c37c24570d6462395add6f7871d60b1ae +a0dd6bc93930d0016557588f2598b7462ca48cbed637c8190be0fb4811e4576217ca9fc3c669c2a4db82e3f8bb24acaf +a67c1d79f7e7193a23e42928a5cc6a6e8e0c48b6b286607dbcfaaa0f10a7ba29ad62d1d57ca28c486794f0908bece29c +822f411bab4882202ed24e67c84e0c9a8da5b3389804ed9dfba0f672e3e1457ea76cad0cb935dbb3d7a39500fba5fe12 +8a1198572323689300a9d7db2e2bcb7c519392e5d3d33e83cd64bcf1517a7dde52318a98203727b186597702c0eed258 +8a84141b02f1d037c68d92567d71cda3a0b805d1e200b1d3fff3caf9902457cbfbaac33157b87ab0bb9e4fe3bac882c3 +8070ace16d9eef8658fdcf21bed0d6938f948f31ca9d40b8bdb97fc20432cd2a7ef78eeefc991a87eae7f8c81adf9b19 +9522e7123b733ce9ca58ab364509f308a1ead0915421ccede48071a983fd102e81e1634ffa07a9e03766f167f5c7cb5e +82cbdf97a755e952304f5a933fd4d74a3038009f242dac149595439130a815e9cc0065597c0b362130183a4c4a444173 +81e904f9b65cd7049c75f64c7261e0cbb0cc15961ffcac063d09399d0d2b0553b19e7c233aca0f209f90cf50c7f5e0b2 +8f5f6ea87429542ea04ad3eb5fc7eeb28fcf69c01c1a5d29b0de219524f6fba90c26069bfc9092379fe18cb46274393a +a4e5815481eb33b7990d2de1a3a591c1ab545b64fbeb4cff8c71b6bcb04d28940097899062bf43b27c5a8f899616703e +a7afe6066681e312882b3b181f462a1af2139d9bd2aefffae7976f3fc357bfd8fbd6ddd4e5e321412f107736e77f0cb6 +b8ab102d7ff8d46b055095d8fb0ec2f658c9e18eee523c295b148b37f8342c120798113553b8bfebf2a11f27bc704cc4 +862175ecc7e0e294c304a0352cd0f1d11b2603d326bb0e54e02b6cc8d04d01ac31c8864e9395aa1f3b90b76bc4397f5b +a4ea51ef3d82509f0e4eb6af705fa7530921cf9512cb5bf030571e69f4504a299297219a0a7e40db1b45165a5ea3a3f2 +a6fb8b573e2ba6db0e8aba53a489e99bebe533c0fcd947dbfa732e00594f03f4e8609ccc44d8215986d38bc3d4e55d48 +93fe8e0bdd5d66df2bd18be5963e864bddfcdcd3298590e7c3b11d99a070a4948fecef46453f19960bbfeada37979613 +acbc45bc55c7080b45c69a3db80cbfc0267006dcf49c47330975aeff2a8ac07b206e1b1c3a515e50866ff510739b92c0 +94a577df0983e4ee3d6b80c73d7e8e3bb78bd8390ff56fea350e51bdf5e0176b8494e7e81dc7b1d842ada961089cd1eb +81eb1fbe9e9c89f5818d0ef98e694da86e88625f0a37cfe88e6de69f90e58297e67f1d5c9d71263b523b63e42685975a +a81a2391ea4d0f65ab4325196559d67e2648b3f1e464509430b40d9948d5b0fc01c337d9b51048a93c4d62e6b73e1e8c +849a026e55ed77135138836c9df67883763e4602357d8566da2ee2505d135d44061de0c070cf333ffb9ac2e55a0894b2 +8e272cc5734374c003c7b2e6ba833eb99b6be608da04e576df471c24705b6b2a790549c53e7971df2d9f0b88d0f570c6 +b0f9e6d985064aa311d4a147f41007fdc576b7b9194aa4b8712bf59a76a71543fec2ee3db21bd3d30d4096f25babc543 +96331837f0d74e2ba6cb1bfaddf4b1fb359bf46cb6c3c664938eb030e56bc85a5ce17bcd60b7fa7b72cb0ba1f3af0b5b +a0eaab6de4b5a551896e7d26153fb5df4bc22a37833ec864090b57b5115b0f8f1279e855cea456bb844802b294b0dbb7 +955e87d3b966edff34f28137f871881c59bbbc6d69986b739867807680ca22b5e3272ced1d25854ed9700d87f133848b +9270a6db157a8ce78a1af6bfe2b5bbe7b621d56cc8f9940a03b5a5f600848b87b05d83595b2a3a315d4b7f4687c46085 +9043328f2dd4dd85e14c91237a3478dc1eed239164924b53d1de9364d76c81315afa9639b58eedb1ab2122e2ae2e7cfb +857fe9f7d00b03bce367de7f789d755911a5f85d78044f18311ecd9b955e821b4a50228347260ba1205aef61219001fe +a0f878050367a7103fddf380908da66058ef4430eae1758335c46c24f5c22fefb0753991b3a47dba5c7eaafa4d598178 +ab5959296b1af14d2878816c7da9926484cbf8896b7eeac8a99dc255013319a67a0209025e1f8266ffd8cd7d960bdc87 +abe53abc57ea46419dbe0ac1f39eee39a4feae265e58b50928eb0695e25938a16a8b00e65c1313837dc3367297e2c258 +93e3e42ed6ba9c45d4e7a4bf21c1e469efafded1f3be9931a683dbb780db2494742fd76c9ad29fd7d12da2b778ede543 +ab3e64035c488a6e63496ddb2de9648cc63a670c5d4b610c187d8ceb144fcc50b016046f50b10e93b82937ebe932ac08 +a3a8fa898f489b313d31838ad9f0c7ffe62ef7155de5da9ffe6ecd49a984fac3c6763e8cb64e675e1c4a0e45e7daf078 +8356b26aa7c9fc9734b511480dad07b164cfec1324ad98eec9839a7943f2889d37c188d465515ad4e47c23df641c18c3 +83c4476f829e0fe91da2353d5b58091e9335157941e89ca60ccab1d7fdd014bcf21bd55249805780ddc655c5c8c2536e +814f6e66505b2cb36de92c0de8004d6d094476522e66b9537787beff8f71a1381ed9f2b7d86778979ad016a7dae6cbac +b1cd7f6da4a625b82bea475442f65d1caa881b0f7ce0d37d4b12134d3f1beb3ad4c2f25f352811e618c446185486adb6 +a71b918481b9bda667de0533292d81396853a3b7e2504edd63904400511f1a29891564d0091409f1de61276d2aebc12a +a2cd3d4104ec5fb6d75f5f34762d5e7d2ff0b261bea5f40a00deec08fbdab730721231a214e4df9b47685d5bacfe37c6 +807f2d9de1399093bf284814bc4093f448f56a9bde0169407cdc0e7d2a34ff45052aef18bcb92f0ac7a0a5e54bd843e9 +abeb03010c3ac38587be2547890a8476dd166ac7b2a92c50d442f031eaf273ad97114c38e57fe76d662c3e615334ac0b +b90a688da4b0bf65ff01bcf8699f0cba995b3397fcbe472e876ae1091a294463e4b94350ae8bd5c63b8441089e0884fd +ad88db4afb177931788fb08eff187e15ad739edc7e1a14c8b777b6bf668aec69ca4749773f94250c1fdda3b59f705f7c +9886809f9ae952797c6527c6db297d2aa3d5209b360efe6a19970575a9f78aee3c21daadb8e8dfcbeeea5290238d16d9 +930f486e95d7c053c9742e6f0b31e6d4fa2187e41229e46a074b469aafb87880aa8e972719b363049fc9fe2db8f03ce2 +8d229af4fa08bd8aeb5fd9acfee47571eb03fcd2f19073b94cd27e2a6735029d31f123249d557f8d20c32ac881eae3aa +84576ed5aebe3a9c3449243a25247628993fdb2cc327072418ea2f1d11342756e56e9a82449bc3ea6e8eaecabc62e9b5 +b775cb86cbec9c46a4a93d426379c62872c85dd08bccda39b21cb471222b85b93afd34a53337b6d258f4891c6458e502 +8be1540e6b535b416b8d21e3ecf67dfb27a10fd4010f9f19426422edaeb0a4961d43ff3afd1db0994170056ce4d77aec +b9c7438e90a5501a4d05bbb8ab68d6db7e9baa8927231a5c58715ee2ab76ca1da0e94910a076958654869148d813d0e9 +aa9bed1c4d2e7cbc2e1a884c8998773f7cc6fa9d6493c8abe8b425114a48305c3a43a1abda2292177ffd39ef02db4163 +897b395356047cd86f576cfc050f7e4546ecd4df30b2c31ed8945797b81dd4ed9b9106cfbe6d7dd8bf91882e3cf1f42e +949a37e1037d9464b2ccd3ad23eda7089570d6b5ffa18025d2548a9df8829de8d62960f04a603f21eecbca5893d45284 +b8a0642f68ff169ffbcd8cd684fae75d96f9bd76949472775bf155edc55a3d9c3e6f0299ee73a6cfb96289361fdbe9ee +a1273141510fcddd89b9b92c19a268dadd1528ad85744b8174684c9b56668e6b35dabb05f2b4cc6ef5611eaea6052f27 +97c7415c82de83ecc066eb922268b8205ad7266c65b2b8f7e0aadac87f076c738cea72f9b0f069b8d28cf9d5438b8287 +b32c7005380c848f71092a74297555dc6022369fc2a4f285e586ac8f53f6bd354fbe4b1f8a4cfb406a101103bf87bb64 +91b48eeba52f02d04f536d32112038f8ba70bb34284fbb39e0f7bae2e08b3f45ad32e2f55d1beae94b949c15652d06a1 +99e24f5ea378cb816a4436af2ee7891ac78a2e37c72590be0abd619244a190fee51fc701b6c1c073611b412cb76332c9 +9465d1e73a1a0a5f7b1cd85f4fa4f5dee008b622b14d228d5cd5baeec174451e7ae93c5de688393d37cc24ce15df4139 +a6ac3986ee01debdacb5ddc1e2550cb4f039156df15c7d5752b79f333175b840bdca89c4959a523e58cf97bbd6b2039e +b7f7a5cc1b1b6145988170d619c170c130231abbe0b5143a9bccaaebeef9ceb1c16e26749bc9dc5650fe91f92fa1b79b +854cb04f1557457383a401d79a655adfd0a4b706ea2bbc6262949c8d657efcfdc9c7960cbe1a50b5eebb361c5e378f80 +8dd199dccbdc85aeca9ddcb5a78dd741a452f7a0d3ceb6546d76624bad2fce0e7e6c47ee30d60bf773f18d98503e7f9c +889e1ca9f0582be9bf5f1aede6a7312b30ea9bed45ab02d87182a013430f16007ae477ee6a823ae86c7fef7da016a0ec +892a60e63edfb3e7a6cf2d0be184413d214401fc1e6c004ca2902c3f1423728bf759a136e6e715d26d5bb229c75cc20a +a2287cd092261b39d22dcb1fa19512590b244771bb69fb62eda72f12be37d48e408b3e37a47608f68d743834edee7f15 +b3b6afb950bbec0ff631bdf18af433e68adc63d02cb479704f24329ca6b6edd9a3d1d606563dbdce6038b676b85130b9 +847da90f37b294509de51ab6521fdff12d5a1ec3cccaf730aa744da7e54b85fd9c70618787e87c0ba9947ce6c81387fb +ad872153c00bccac75bdb30d1ab7044d814f4f8655ff26421d48fea04fb21d4dc82c1900620a57d13adc45c1062a1817 +90fa5ee98fd7ec719f2a8543bbd0ff45ac69296c2416fc8666d05de3deea1017079a68aba55540a19585925803c8335d +962ba6d029e9176d0e8c80a21f2413f7322f22a9e9a32c933697a8b0e995ce25bea5264736a75718b3d330e215a58a05 +a446f9530db30c5e9c1b3844d635e5c2cd311cc4537ff277fe83dd1a0382bcfa73beb07aaa0cf5a97d24c67e688086a4 +8766b2053f16c72db387abe18b43d7b357a542916c9b8d530ee264e921c999494d6eb1e491352ecdf53758640c7a246d +83f32f511f7b0233662acfc14f30df345af99d2d6c777ce0b4bcdc4dd110533f30b45071df17230aaec392cc482355e1 +82e3521bc9519b36f0cc020225586b263e4feb57b533b38d8e89ccf8d03f301d94da90efb4902002732fbf3876697f38 +b5d1ea69c97ceaa34a720bb67af3fcf0c24293df37a5f6d06268b1eabe441531606954ac2598a1513f64231af722b3a3 +956842696b411e6221c5064e6f16739e731497e074326ef9517b095671f52a19e792d93fe1b99b5a99a5dc29782a5deb +b19b5658e55c279eb4b0c19a0807865858cbec1255acd621f6d60c7e9c50e5d3ee57da76b133580899a97c09f1dd8dac +89e6a8b916d3fcc8607790e5da7e391f6bc9eae44cc7665eb326a230b02bc4eb4ef66e608ccc6031048fc682529833d0 +b1a210bc8070ed68b79debd0ec8f24ec5241457b2d79fd651e5d12ceb7920e0136c3e0260bc75c7ff23a470da90d8de9 +85b1954278e2c69007ad3ab9be663ad23ae37c8e7fa9bc8bd64143184d51aea913a25b954471b8badc9e49078146f5ac +98bf63c7a4b200f3ce6bf99e98543925bc02659dc76dfedebe91ec5c8877d1271973a6e75dad1d56c54d5844617313e1 +b7404b6e0f320889e2a0a9c3c8238b918b5eb37bcdab6925c9c8865e22192ba9be2b7d408e1ea921a71af3f4d46806d0 +b73cbbebf1d89801aa838475be27c15b901f27d1052072d8317dcae630ab2af0986e56e755431f1c93f96cd249f2c564 +95b2027302f7f536e009f8a63018da6c91ec2b2733c07f526cc34cbcfa2f895ccfd3cc70be89f4e92c63c7ddc2a93370 +9201d9ff5d0b1222bfa2345394f88ddf4fe9282acf51bee9b18b96bb724fdf8e736d7101acc2795a34e72f9e0545c9a8 +acbff7eb160f427d8de6f29feeddfa8994674e033a0ccdc8e8c73f9243968f1a6379da670a7340f422892d50c97113c7 +97ae8d03352c3729e1623e680dd9664f303b3bcfb844ef80d21e9c773a247967d27b86c9326af29db5eefd0bd3d4fac8 +8e53ae5c22f5bfa5fe4c414dad6a10b28a3e5b82a22e24a94e50ce3b2bf41af31e7ba017d2968811c179017b78741ef0 +b5ac7dd150247eb63dfb7dd28f64b1bf14426dc3c95c941e8e92750c206c4c7f4ad1a6b89e777cfe26ecb680dbf0acb6 +99ae2e4652ea1c1c695e7ea2022fd35bd72b1a0d145c0b050da1be48ad781a413dc20fbda1b0b538881d4421e7609286 +b8abe1fb3a7443f19cd8b687a45e68364842fc8c23d5af5ec85da41d73afb6840ef4b160d022b2dad1a75456d809e80b +842619c3547e44db805127c462f5964551f296a270ed2b922e271f9dc1074fdf1c5e45bb31686cec55cb816d77853c01 +902dff769391de4e241a98c3ed759436e018e82b2c50b57147552bb94baddd1f66530915555e45404df9e7101b20e607 +82e4f2ee7c7ca1ee8f38afa295d884e0629a509c909a5464eb9ea6b2d089205478120eed7b6049b077b2df685ec8ba48 +aa21a68b0888e4a98b919002a7e71e6876b4eb42227858bf48c82daf664c3870df49e4d5f6363c05878a9a00a0bcf178 +a8420cd71b1d8edd11ebc6a52ba7fc82da87dd0a1af386d5471b8b5362c4f42718338bcbc302d53794204a0a06b0671d +98c686bd3a994668fbbd80c472eed8aedd3ab5aa730c8d3ce72e63fb70742e58525437be1f260b7ecc6d9d18a43356a0 +aca0b2df9ec8ede0b72f03b121cded5387d9f472b8c1f3a5f1badd5879fb2d5d0bbb6af1a2dd6bdebf758cfceadbe61d +93b1abd9cb41da1422d171b4dbf6fbcb5421189c48e85c9b8492d0597838f5845198494c13032e631c32456054598e1d +a246ab3a47f7dc5caedc26c6c2f0f3f303ed24188844ab67a3da1e793d64c7c7fe3e5cc46efafbd791b751e71de0614c +b9b52095ca98f1f07f3b0f568dd8462b4056c7350c449aa6ce10e5e8e313c2516ac4b303a4fc521fe51faf9bf7766ce9 +8e2e9d26036e847c2a2e4ba25706a465ac9fbb27804a243e3f1da15dd4084f184e37808661ec929479d3c735555085ee +8b8c4f4ad5c8e57e6a7c55d70ef643083d4b8dac02716ea476d02dbbb16c702a2f2d5dd5efe3aec7704d2b8cdafe3959 +a800afea30d0df333805d295bac25419b7049d70044be00c7c85a92a0503ca471001bc1e6552323f1a719eb96616fc20 +868bced4560e1495b8527058ebc82a538b7cf806f8d8fe8eeed6981aba771de4d5e9f03cbfc7157d38b9f99cdea87b96 +86b86258b0c1feb988cc79f6c4d4b458ff39428eda292f9608a5fc4c3765782c8c23c66f82d7538e78e092cd81d69a56 +9370eac15de2555824c7d48520a678316a7bb672e66f8115ad7dbc7c7b1f35a7718e8fa0c35f37e3ef2df32dfa7ca8d1 +ae200bc5be0c1c8c6ec8e9fd28b4d256c6f806c0f270766099e191e256d67b9cceda2cc2fed46dfa2d410971a7408993 +af2428c77b2b9887ecde1ea835ed53c04891547fb79fe92e92f9c6009cdfffa0cb14de390532ad0ef81348b91798bd47 +a9069eef0316a5d13d1aa4cef0cf9431518f99b916c8d734bd27b789828ae03e5870837163ea6ad0be67c69184b31e8d +b1b1ce6d529f5a8f80728173b2f873c8357f29644b00f619c15111224377ae31a2efb98f7e0c06f5f868030aab78ed52 +b89c98beef19ee7f300e1c332a91569618ef8bf2c1d3de284fc393d45f036e2335d54917c762f7c2874a03fe4f0f6926 +8264f993dceb202f8426339183157e9e0e026d4e935efe4cf957eb14cd53edcdc866305fb1334cdf0e819b69eafbaccf +aebd113f73210b11f5ac75b474f70a2005e5c349345003989175dffa19f168abd7f0e28125b18907502fff6fcc6f769b +9993ad061066ca6c2bb29fe258a645089184c5a5a2ef22c811352749a199be3a3af3a0d5ce963febf20b7d9e63508139 +97952105000c6fc6c2dcae1ebdb2feae64f578d26a5523807d88e6caf1fe944b8185e49222d06a4553b3bdb48c3267a2 +82dd955f208957d74693bed78d479c9663f7d911f68ff033929418eb4a5c5dc467589ca210c1ba3c2e37d18f04afe887 +b816fc4763d4c8a1d64a549c4ef22918e045ea25fa394272c7e8a46dcb0c84d843d323a68cc3b2ef47a5bbb11b3913bc +a7a87ba4d12a60ee459aad306309b66b935d0c6115a5d62a8738482f89e4f80d533c7bba8503e0d53e9e11a7fd5fe72b +92b36d8fa2fdee71b7eea62a5cc739be518d0ecf5056f93e30b8169c3729a6a7ed3aa44c329aa1990809142e0e5e2b15 +8835b6cf207b4499529a9034997d2d3bc2054e35937038deb9c3e2f729ebd97125f111c12816d30b716b397016133c52 +acf14cd6d978ba905cf33b9839b386958b7a262b41cbd15e0d3a9d4ef191fcc598c5ab5681cf63bc722fe8acfda25ce6 +b31302881969c5b283c6df90971f4fb2cc8b9a5da8073662da4029f7977fbb4aaa57dd95b003a9e509c817b739f964e7 +b74669e1c3fa7f435e15b5e81f40de6cfb4ad252fcdfb29862724b0a540f373d6e26c3d600471c7421b60a1d43dbeb0f +861d01615cba6ca4e4ef86b8b90f37fa9a4cc65cef25d12370f7e3313b33bb75de0953c8e69972b3c2a54fe110f2a520 +a58a56820efaf9572fd0f487542aaff37171d5db4a5d25bfb1a5c36ca975eb5df3cb3f427589e1101494abb96b5e4031 +af13d0a6869ef95cb8025367c0a12350800c6bc4ae5b5856dcb0a3ca495211d4139f30a8682d848cb7c05c14ae9f48cb +8c385767d49ba85b25a3a00026dd6a3052e09cd28809d5a1374edf4f02dc1beed367055b0dee09102c85985492b90333 +b5129fc2fec76711449f0fcb057f9cf65add01b254900c425e89b593b8d395fc53bb0a83ddbd3166acc6d2c17f7fc2a4 +86bd01b3417d192341518ad4abf1b59190d9c1829041e6f621068bce0bef77ec3b86875b7803cf84ff93c053c2e9aad1 +a74fc276f6af05348b5fabccb03179540858e55594eb8d42417788438c574784919fb6297460f698bd0da31ce84cebfc +967ed3ec9f1fc51f76f07b956e1568d597f59840ef899472a3138f8af4b4c90861e23690c56b7db536f4dd477f23add6 +b9e678206de4fc1437c62d63814d65f3496be25a7a452e53d719981d09c7e3cae75e6475f00474e7c8a589e2e0c6bfa3 +b028eaffaa4ff2b1b508886ff13c522d0b6881998e60e06b83abe2ac1b69f036eece3ded0f95e9ae721aea02efff17b6 +935f82de9be578c12de99707af6905c04c30a993a70e20c7e9dd2088c05660e361942fa3099db14f55a73097bfd32a44 +96a1cc133997d6420a45555611af8bcd09a4c7dbddf11dbe65aab7688cc5a397485596c21d67d1c60aae9d840f2d8e48 +80d117b25aa1a78e5d92ea50e8f1e932d632d8b37bebf444dcc76cc409322fb8eface74a5dddab101e793ff0a31f0a53 +893229136d5ab555dc3217fb4e8c6d785b5e97a306cdaa62f98c95bad7b5558ed43e9a62a87af39630a1563abd56ec54 +b7ec1973ec60bd61d34201a7f8f7d89d2bc468c8edc772a0ba4b886785f4dadc979e23d37b9f7ef3ff7d2101d3aa8947 +b6080ca201d99205a90953b50fc0d1bd5efd5eadbfe5014db2aeb2e1874d645ab152fb4b0ff836f691b013b98ce7c010 +b546e66ec0c39037bbaa66b2b3f4704a6a72cf1924a561550564b6fcf41fbc2930e708cd5cac1d05e12a4b8ec93ff7eb +8abeed90a01477260f4b09fff8fa00e93afe727e8eed6f111d225c872a67e6ab61d0472ab6add3fe987744e16f7c5268 +8e02342d5cc1836ed21834b9ef81686172cc730f0412479db5f590b0ff7a729a0e986ffed16d6ecafd6b83d65922ca5e +b05660605cf8e8a10c8d3c77cccbe4e7179fa27cc829571f6b722a58e65e4e44d7fe977446118e9da2d2f40af146cc2d +942a00e006baba6d025cbd99297bdb0cbf3d84cddf849b1b5a9fe9ef1745352fad81313cce5d7622d6652096a8fa065c +aace8212b3d8dbe44ac97460a5938a3b803aca9bd00d8a643a859351daf391b22d1fd2a6b3e0ff83cc9ee272a1ad7686 +965a9885a5259197a75a19707a2f040e0fd62505e00e35ebe5041d8467596752aedf0b7ec12111689eceb3e2e01ecfc8 +81d58270a4e7ee0137cb2bf559c78c4fd5b3a613468a8157b6a9c5c0b6ca20a071b87c127d59cecc3d0359237a66d890 +af92b6354fbf35674abf005cb109edc5d95845e3d84b968e6001c4b83d548715dffc6723ac754c45a5ace8cd7dd30a24 +b112caa707f9be48fdde27f1649149d9456857f928ea73e05b64bb62d597801daac0b89165fea76074f8b5770043f673 +b6e7380746da358fc429f676b3d800341e7ab3f9072c271310626ae7f67b62562ff76c63bc9f5a1dbc0e0af87752408a +a45e9e8d0931207ebc75199aa0c983134aa97f771ff546a94a3367bcedf14486f761e7f572cf112e8c412018995fdaf4 +854381128de5bfb79c67b3820f3005555f3ee6f1200046ebbfaee4b61b3b80a9cebf059c363a76b601ff574b8dbf0e6b +aa1b828a8b015d7c879669d5b729709f20a2614be6af6ff43b9c09b031f725f15b30cde63521edda6cd4cf9e4ab4b840 +8f28f6b62c744084eeddcb756eced786c33725f0f255e5999af32b81d6c6506a3f83b99a46c68fc822643339fe1b91c5 +ac584e76a74cafe4298ca4954c5189ccc0cc92840c42f557c40e65a173ea2a5cd4ae9d9f9b4211c9e3dfd6471fc03a1b +a413365df01db91e6a9933d52ab3e5ed22d7f36a5585ad6054e96753b832e363484fb388c82d808d1e4dfb77f836eab9 +8a68c51006d45bf1454a6c48a2923a6dbeb04bd78b720bb6921a3ca64c007043937498557f0a157262aac906f84f9bf8 +b93ff8b6c8c569cc90ee00cfe2fc3c23cccea2d69cbca98a4007554878311635cb3b6582f91636006c47b97e989fe53d +b9a8a44d54592511d74c92f6a64d4a8c539a1d8949916ef3773e544f6f72c19a79577de9878433bd35bb5f14d92f411d +94f066a7e49ae88d497893e4ce6d34edc2dc0b42fe03934da5d4ed264d1620d506fcc0661faa90a6cf5083e1720beaaf +b42b102adef8f42c1059b5ca90fe3524dcd633cf49893b04b4a97a1b932ca4c7f305cebd89f466d5c79e246bad9c5ced +86b560d78d3c5fb24a81317c32912b92f6ea644e9bedfdea224a2f0e069f87d59e6680b36c18b3b955c43c52f0a9d040 +a3829fa7e017c934fa999779c50618c6fb5eafb5e6dec0183f7254708a275c94ba6d2226c5ca0c0c357b2f2b053eea93 +9337dda730076da88798fd50faed1efa062f7936a8879ea4658c41d4fcf18cee7120366100d574536e71f2f11271b574 +853d09a30f4342f5a84c4758e4f55517a9c878b9b3f8f19e1362be9ae85ca0d79c2d4a1c0c14f5eff86010ad21476a7a +b0bc74cb69bdd8fdffca647979e693ad5cbf12a9f4ead139162fa3263bfebef3d085aab424ed8c6220b655228c63c6b1 +88d8dc8faf3aab12ba7180550e6a047f00d63798775b038e4a43a3b40a421a3f5f152a7e09f28ccd7198bb8cefc40c07 +88db2e3b8746415d0c3e9f5706eda69a29d0b9ee5135ad006060be7787f4f1f7069e2e2e693c5e10b7c3d5a949085ae0 +b5bd830d2f1c722188dba2690d21b7b84b92cbdd873a55aaa966f1d08d217bfc8cffe8caea68868f3850b90b4ab68439 +b5ad4be0c9626a33fce6c8501297bdde21b07b88531451912ed41971a4c48fdd1036d8a4994a99a7fbba4a5901a7095e +b0e1337a2a1772191faa91302f1e562e7cdc69ba5b25139e7728ce778a68a7fa9817f852ec8e04a159122cff62992ec6 +b4fd4a4c1be8bc7e4e2bfd45404c35d65b75f45fb19ce55c213a8035b41f1ccbce9766f3df687c0d7cd6cdfc1abb00a5 +814bf565ece6e9e2a094ffbd101f0b9fea7f315a2f4917abe2bf7d070ed8c64a2987bd288385a42fd336ed0a70a9d132 +af860af861dc80894ed69f29c8601d986917ec4add3d3f7c933a5e9d540bc8ff8e4e79d0bb01bbc08fa19ef062f2890c +b66d33fcf3cd28f15111960ffc6ed032c3b33d4bb53d035ab460cc5fa7ce78872f0476d0bb13f1d38f2672347d2d6c4d +89603ae1a5dd7c526936b86a3b69b7b1d0bdf79ba3cc9cc2e542ec801a6126d1514c075d6ad119fe6b6e95544ffe7fbe +8a1b097f46a62d85cff354d1e38df19a9619875aad055cc6313fdb17e2866d8f837a369a9ee56d4f57995e2b0a94310e +8dc165d86c7f80b0fcd4b6f90d96cd11dc62e61d4aae27594e661d5b08ef6c91156c749de8948adfaf3265b1d13e21cf +98e3173772c3b083b728040b8e0ee01dc717b74c48b79669dd9d2f7da207af64ccd7e9244bc21438a5d4ac79b88e9822 +924d168099b6952d6fe615355851f2b474f6edfcd6a4bd3ad2972e6e45c31bf0a7fb6f7fca5879a0de3ea99830cfb5bc +95452f0b7efda93c9e7a99348e13f356bad4350f60fcd246a8f2aa5f595a9505d05ec9f88b1fe01b90ecd781027b9856 +b95e8af516bb0941fc0767ecd651ada2bc64cc3e5c67a1f70048c634260c0f2c0e55ed22948e1870c54590b36683a977 +82f7feb71e746d5ca24455e3f3e57e4eade92669ab043e877b836612efd3de82009f0555e5d8811bff9f2b75fc57a01d +87623c02caf590ea84cf4a84d1be501f89262e26eb463f2f94a2d3042889c051b058823c3367a989498e46ff25edab16 +b88da847b1ef74c66f923773ce8c920ca89751335fde17b3a98c0603862069a2afbf35b1552b43ad64dccea69f040ff8 +96b734758c823e5ce5b44625c252957e16fa09f87f869baac195956052dc92f933f377b288c7f63b8028751cbbdca609 +a23cc5fbbe5cb7c1d33d433cec4e502f6548412e2374e285d307f75e98280b0c0af4f46bba18015be88cdf7db8b1239c +8bd5bbe04bc929ca8f546e673803ec79602f66ec24298d3e3b6bf6f2c25180fc0032ea6f86c38a6e0ec20ff4eaafc7a1 +b95768ca113e5d57ad887a1cb5ef84ce89007ce34c3156cd80b9aa891f3ebaa52b74c0cb42919cfbcf0cb8bafa8085f9 +a117f99045f65e88acc5a14fc944f8363f466e4a64057eb8fc64569da5dd022a01f2860c8e21b16aff98aebdf89461b7 +895cda6503907c98c43477eaf71dfd26759032523691659f13662ca3a967d93bbc5be342d168223cef7e8a333987d6a0 +a084d77d913d3ec0586ad5df2647610c7ed1f592e06a4993a5914f41994a29c4a8492d9dce2e14d8130c872d20722920 +84a328b73c64137bb97a0a289b56b12060fa186ce178f46fe96648402f1b6a97d1c6c7b75321e4b546046c726add5a08 +b7c35087b2c95127ce1470d97bceb8d873a7ad11a8034cc1cba7b60d56f7e882fc06796048435a9586eab25880787804 +ab05e3394375ee617c39c25c0ec76e8a7f2381954650c94fbcd11063ea6772c1823c693d2d9dd18bd540a130d7b92855 +82ba5907051d84b37fd9d28f8b9abebc41fc4aaa334570516ca2e848846644016356d40fa9314543017d4f710d193901 +9170517b6e23ee2b87ff7c930cb02b3e6bd8e2ae446107b5b19e269bf88f08de5ded3d81a2ff71b632ca8b8f933253a0 +93dc0e3f6234b756cdbb3fe473b9214e970972e6bf70803f4e2bf25b195b60075177a1a16382f1dee612a4758aa076ee +b4b49fac49cdfccda33db991994a8e26ab97366545166cc7140aef3d965529f96a5dac14d038191af4fb9beb020ff6d5 +b826537670acdf7a8a45ef4a422d5ae5a1b5416ad0b938307518d103cc7ba78e495ea200adc5941414a70158a366e8a2 +8ae3588b1fbecbc769c761f0390d888e34773cf521d976ee335f6c813bf06dad38850871ac8a8e16528684f1e093d0c1 +ad9c00b8dccdb545315fbf26849135699c6aa3735f89581244281154c906aba80d20c1e7f18f41acc61e0565f8015a33 +954ce68146c05fc1c9e536add3d4f702335d93c1650b8c1fad893722a81f915eee2d38275dad00ce87f3f5bc90ef7341 +8243feaeff9a12f5aeb782e3dd68609ce04ecde897c90fd8a19c9c5dace3cf43bd5bc0f1624bf7fd2607ca0d71adbba8 +a8a1be55259cd27898d9d60a61998d8da2bf2d439ba6eedb61d6d16dacc4a81ec706b9196dfa080ba20701d2cd9fa1f4 +b0eac6212c7a62ef6062c30875fbe24b8e1a9d88854c035686f849a9eed4d17fbc9af27429eb7c3fd60b47a5e29f6783 +878561a88412e95f19f1cb8894be9d0ea4a2cdd44f343387f87dd37445e5777bceb643cebc68c910acb5e588c509cd2e +a57b6c347955d8b0057a87494223148ff9ff12b88e79dbd9d0aae352fe55e15ea57fcfb9add3d5d269ee0001d8660f20 +a07fa66340d4082585e4d72c77510c59b272e7a3345f4b1de6be7ff4a11ea95d712d035a7355fc8d2e571fa65fe8236f +b9d84a627462438e8ede6c453e3367bfaf81cff199d3e5157ef2bc582d358b28b5ccc3bc27bb73af98ef45179ea79caf +b14f26ea7ca558761cb19508e5940fbf5dcf2ad8555c5a03e8ff92481994072f523b1ab6b7176f698e2cfd83d4f8caad +800cca1cbb14e1fc230c7b420ff06864a934b082321bbf5b71f37340383923f23183d4fdc8fa2913928722b8892db28e +94790c950b92e971ec39e9396c3f32dee32a8275d78e6ea28a47130651bddc86a189ef404c5e8c210bd291186dee0df4 +ad7b3b3e377df64023b8726d43a7b6ec81e5a5e8c0943c5bebe5ab5ddd6597255f434a205c14ba90e9e5e3c462a1fe0c +86ff8156cc857a416e735009cf656b89da59b766b4c4e5a0c0165282b530c10657cc28cf5cb847696725c37ac48b69d7 +89cb64cf9294f68f01533660a2af2aec0ec34cc0b4a0cc36a128f2e0efb3da244981f69aede962f50590faeeb9a5da01 +a2ea5a94a524bb8e6f767017246cd1af9d87c9abb9894e91c4e90c34c5161be6179b49dafcab9cff877a522c76beb145 +b5d9abf29ed6030a1e0f9dc19be416c45ba8cb5ed21aff5492233e114035715d77405d574cd62f2716285e49f79b9c99 +ac441cf6104473420babdfb74c76459cbea901f56938723de7ad3c2d3fadb0c47f19c8d9cb15a3ff374e01480b78a813 +abea34bd2d36c5c15f6f1cdd906eb887f0dd89726279925dbe20546609178afd7c37676c1db9687bc7c7ea794516af03 +8140abfd0ec5ca60ef21ad1f9aabbb41c4198bac0198cb4d220e8d26864eedb77af438349a89ca4c3ff0f732709d41a9 +a5a25abf69f3acd7745facb275d85df23e0f1f4104e7a3d2d533c0b98af80477a26ac3cf5a73117db8954d08f9c67222 +b45ac8d221a7e726ad2233ba66f46e83ed7d84dbe68182a00a0cf10020b6d4872f3707d90a6da85f6440c093914c4efa +80f586dfd0ceaa8844441c3337195ba5392c1c655403a1d6375f441e89d86ce678b207be5698c120166999576611b157 +b8ce52089e687d77408d69f2d1e4f160a640778466489d93b0ec4281db68564b544ec1228b5ab03e518a12a365915e49 +8990f80bae5f61542cc07cb625d988800954aa6d3b2af1997415f35bd12d3602071503b9483c27db4197f0f1f84a97ac +8329858a37285249d37225b44b68e4e70efeef45f889d2d62de4e60bd89dde32e98e40e2422f7908e244f5bd4ffc9fe2 +8d70c66ea780c68735283ed8832dc10b99d3daeb18329c8a44a99611a3f49542e215bf4066ff4232d36ad72f1a17ccc3 +a3b2676cc8cdf4cc9e38c6cb8482c088e5e422163357da3b7586a3768030f851ad2a138eeb31584845be9ffb8067fc00 +95b1fa74e9f429c26d84a8e3c500c943c585ad8df3ce3aea1f6ab3d6c5d0ed8bb8fa5c2e50dd395fa8d4d40e30f26947 +b1185f2ac7ada67b63a06d2aa42c4970ca8ef4233d4f87c8ffa14a712a211b1ffde0752916bfafdfa739be30e39af15d +8705a8f86db7c4ecd3fd8cc42dd8c9844eab06b27d66809dc1e893ece07186c57b615eab957a623a7cf3283ddc880107 +af6356b372f0280658744c355051f38ff086f5563491fc1b3b1c22cfec41d5c42b47762baeb9ee6c2d9be59efd21d2b7 +86bdd4527b6fe79872740d399bc2ebf6c92c423f629cdfcd5ece58e8ed86e797378a2485ead87cbb5e2f91ba7b3fbda1 +a900f0be1785b7f1fda90b8aedd17172d389c55907f01c2dfb9da07c4dc4743cb385e94f1b0fc907dd0fedb6c52e0979 +a9f59f79829a9e3d9a591e4408eaec68782c30bc148d16eb6ae2efccb0e5478830bbdaa4ae6eac1f1088e7de2a60f542 +99cf54a69ad5e8c8ec2c67880900e0202bcc90c9815531d66de8866c0a06489ea750745cc3e3aa1c4d5cb55dcd1e88f7 +8676246a4710d6d73066f23078e09b3fa19411af067258e0b8790456525c02081727b585d6f428c8be285da4aa775a4b +b596c7014fe9214529c8e6b7602f501f796b545b8c70dbf3d47acc88e2f5afd65dccef2ef01010df31f03653566b16df +a12205c6c1780fc8aebdd98611e12180005b57750d40210b9eff0396d06023bd4ff7e45f36777123ff8bed7c5f52e7a3 +ae7dbd435bba81685d5eab9abc806e620253da83e56b4170952852d442648a5d8743f494a4b0fc9d606574f87895b0d6 +9786257b1726b7cdc85219ca9eec415f98f5a11e78027c67c7b38f36f29fe7a56443570fdfedc1d9293a50e4c89d89f6 +aaf0515070d1ca92aacdf5fac84193d98473d8eb2592381f391b8599bcd7503dbf23055324399d84f75b4278a601c8b2 +b31654dbf62fbbe24db4055f750f43b47f199a2f03c4d5b7155645276b2e456a218ca133743fb29d6f1a711977323f6e +8f4d39106ecdca55c1122346bdaaac7f3589d0cf0897a6b4b69e14b4d60550fd017876399401ce7c5d35f27da95f50be +8a7bfdb48cd47afe94aff705fac65f260b3a3359223cff159b4135565c04b544dd889f6c9a6686f417e6081ad01e0685 +967ba91111e5e08f9befcbaad031c4fb193776320989f8ede4018254be0e94586254432d3dbae1455014f3a2f2549d01 +a9db52352feeb76715a35c8bed49fb3a8774c9c8e58838febf800285fd6c4938ec162eb8457029e6984d8397dc79ea19 +811794e6bfe2539e8f6d5397c6058876e9e30763ad20dad942bb5dbcab2f16d51718ce52bfb4de17889ba91da1b85bcd +a6db0f65a6dc8b8cc2312a3e0146d8daf520255bb12f74874c05693914e64e92be0cd53d479c72cb2591e7725dfaf8b0 +918d21bfa06d166e9eb5b7875c600663a0f19cc88c8e14412319d7aa982e3365f2dff79c09c915fc45013f6b3a21200d +9894852b7d5d7f8d335dd5f0f3d455b98f1525ad896fdd54c020eeaf52824cc0277ecbfa242001070dc83368e219b76d +ad00acc47080c31fcc17566b29b9f1f19ccaae9e85a312a8dcc0340965c4db17e6c8bd085b327eaf867f72966bf61452 +965e74649e35696744ecc8bed1589700bae9ca83978966f602cf4d9518074a9aa7c29bc81d36e868a0161293f5a96e95 +961e29a239c2e0e0999b834e430b8edfe481eb024cc54ffaffd14edaf4b8522e6350dc32039465badfff90dcb2ba31cc +943dda8fa8237418a07e311efde8353c56dd8ec0bfa04889ccdd7faa3dee527e316fdc60d433a3b75a3e36ca2aa9d441 +a0ed4c102e3f1d6ebf52e85a2bc863c1af2f55dc48eb94e40066f96964e4d37fff86db2cff55a8d43d517e47d49b5bd7 +9045770ad4e81345bc6d9a10853ee131232bf5634ef4931b0e4ba56161585b4286876bc8a49b7b1f458d768718cb8ebf +b0dd430295ff28f81895fde7e96809630d1360009bbe555e3ac10962de217d93ead55a99fd4f84d8cadd1e8d86d7b7ef +95ced48419b870ea4d478a2c8db699b94292f03303f1bf4560b5b1e49ca9b47e7008514fe0a9cf785717f3824567e1b2 +a7986e0e389e8aef6aac4a7a95e2440a9af877ae2bc5ad4c5f29d198ec66aa0db1d58c451e76ae70275a2e44c3d3fa68 +85a8490faf32d15de12d6794c47cc48e02428af1e32205e0742f8299ea96b64bcd6d3b4655272afa595eec74ecbb047c +b790d7fb1307aacc2d303d9b6753a9773252b66c6b67763cf8841c690cbccc4866ffb5fec1c068b97601a7953fe0f7e8 +afcc4011f8c53f10d63c29b74d9779cd75c861e01974c28a4ec2cbb909b67a1b2287ead175231343c936ad75dfa416ff +918058bffdecc1ae8779dccf1d874bb9e28edbe34c8b5954a8da64a848858d2f0776437b423baf4e731f3f5fa05a2841 +ab554db549aa36dfa9f966a5ed6be8267e3aa9ced348695f3dafc96333c6dbb48ef031693aafd59d1b746ecd11a89c51 +ac4ecf746b46b26a7af49cc9cc1d381e1e49b538dbd7fb773ce6b1df63ae31c916693cca8a90fb89f1e7ec5e0e8dd467 +a8de66d48f16b016f780a25ba25bd6338fd8895a1909aabcfb6e70f04ff66f9866e6e2a339bcbfa4bfba4070a6a8db26 +b4b49374eff6dac622e49b0e9c0e334ecbec513a96297f6369696ad39e5ec0de81a1417f6544be866c9f60957a9ba09a +b8023968549ebab6c1e7a8e82954a5b213bec50bbf35b36697a8d4fd75f9e12d510b365962aace4c9978c5b04da974a7 +8d4bc016026dd19e4059d1c5784897cefa47f7ae2ed6dfa2b3c14a852fff2b64abc09549d106584e0daed861a2d6d6c2 +85e26f433d0b657a53da4c1353485e0c2efa092484c5b8adb3f63dc72ee00be79197ebef7937b37a6a006571641cd6af +abb37a917301e68328032ff4715abc0fee32e5f5be68232ca8bf7ffb8732bc47504e75b40bcc0a7c7720b71496fa80af +9837c8d2660522c0357f5222777559d40321a1377f89ca1717215195bad4a348a14764bd87fa75f08e1f6263e9d08982 +97e06f971b4c56408ed5f1de621d233e6a91c797f96ec912737be29352760a58831aaf1f64e377c3ed9f2f4dc8ad1adb +a12d211304da7b91101513d57a557b2504069b4383db8ecb88aa91e9e66e46e8139dadc1270620c0982103bc89666215 +aab74ba48991c728ba65213e8c769e6824c594a31a9b73804e53d0fda9429403ff3d9f6ea5ef60884585d46356c87390 +92f19be2b7adf031f73611282ad33e462852f778c5e072f689dd0e9458fa6ebccfae02f2b2dc021802c9225035862468 +953bb843c48d722604576cef297123755cef8daa648c30c3a678eada8718dfdb16e71cc3e042a51fedc80577235c2563 +86f509e3c1b9ee9a3b95e6da8516b47feb8c8a83403984228f4903c7ee1ee4f03addcb8fe86283af1196a54b36b9470c +903d793a377e98e2562c49de33e3fbf84bf99211925e7002a4f688470db655884e1efe92782bf970ffa55d9c418ef3b5 +a41b65681ed7f10987a7bfdf9e56b010d53683819d845d880fc21b2d525540605c5823e75c434f17b5a0d08a091c1564 +971be802de51cfc0d10a96be7977c037873f19334ed4ed4904b7675aec8bfa1f8956cd0150b07064caf18229ffd1ccd9 +b253ebe4f82cdbefbc3ef816d40c497fe426a9f0f0f170e783fa4a05ae6dabdfa8c448817a24e723a314b43e76a7c422 +86f397c95025489929ce9230b1466b5c330ec7c58a3c7e3153d6d05bcb8348a13398908e192590b8812f5c5ff09c133a +a0713983a3dc9f10b3833687cd2575de2fc63c4ad8d2f54ff85c6db23dd308daefef1bd1e51eec26732f77c1f37ba793 +8249a1d53ec92f311f4fa77e777800d777f3e9d4d452df740fc767fa7b0f36c8dce603d6e6e25f464c0399b8d0b93c30 +a73d0a206a62922f07b928501940d415e5a95716ee23bf6625b01ff2cd303f777adfa373d70279ba8a30fbb4c99a6f1f +b1106b407ecf234e73b95ff58ac9fdf6709ad2e763b58f0aacc5d41790226d441b5d41405ac03a0641f577848a4f5e8e +b009963ccc7b2d42792f09ab7cb0e929503dd1438f33b953104b4de43274ca3ce051554d10d7b37041b6f47d7a2dab6f +b744512a1b3c7ef9180b095c6a0c5bc16086a50020cf20dc2216bbff24d91ca99b95cb73070444dafc3ab45c3598960d +a0209669ffeddc074d35cc6aa2dac53acac8e870f8a8a5118e734482245b70c3175f760652e792118fdddac028642259 +8ddd3e0d313da17292fdcc1bbc6e9d81189bb1d768411c6fe99801975eddb48dbf76699dcf785cac20ab2d48e392c8fd +8392aa285b8b734aa7a6e0f5a1850b631ddf6315922e39314916e627e7078065d705ff63adbc85e281d214ec7567863e +b655a1fff4dba544a068bf944e9de35eaaa6c9a0672d193c23926776c82bebed8aa6c07c074b352882136b17abdab04b +af5095f40d1e345b3d37bebee3eb48c5d7b0547f12c030d5bfe8c0285943e0a7a53a186f33f791decba6a416cba0c5c9 +8223527f9eb3c8ff52708613cd2ee47e64c0da039cea3a0189b211dc25e9bfa3d5367a137f024abe94f98722e5c14b67 +afdb106d279273edc1ee43b4eead697f73cb0d291388f7e3fc70f0dd06513e20cc88b32056567dcc9d05364cb9ca8c58 +9319eac79ff22a2d538dcd451d69bca8aa8e639979b0d1b60d494809dbd184a60e92ad03b889037a1ac29a5547423070 +b79191ce22dbd356044e1777b6373b2d9d55d02b2cc23167642bc26d5f29fd9e2fb67dce5bd5cf81a602c3243bedd55c +988e0da1e96188ffd7c5460ecdf2321f07bc539d61c74a3292c34cb8c56dbafbca23eb4471a61e8e64e9a771a49fd967 +b0792b6cf4b10f8af89d3401c91c9833736616bb9fe1367b5f561c09d8911fb5a43b7a4fd808927b33ab06e82dd37a28 +862f68ea55206023ca470dbd08b69f0f785fcbabb575a1306ff3453c98ffcad5fd6ead42e8a1f9edf14c6fd165ffd63a +815ff0898b1330ac70610180c0f909561877888ff10def749a1e65edf9f4f7cea710a757c85241dfb13d0031efb5e54b +aa6e6ce21776ea4507d452ccdaf43a161a63687aae1cb009d340c9200e5646e9c2de4104dfd66b8e55dfa6de6ee83e4a +8e8f3d3403e0256ecc254b9b1464edca199cad3f3348002d744721c345a1a3c7f257c3587d2229774cd395e26693d1ba +90483e28985e4a0f7a3cb4bc5e865b9d408b94cd2146c04aed00b48a7ab80a28deb05efec320817d63578d4f953bd137 +84fb2a762ba29193b07f1dd84b3f69153cedb679b66ad04f8a4adf01c14f115163a107e6db23aaf0f0c9687824ded197 +b4a23922bf4302cc9a6583f252a1afa026c87c132b9ae44cc1f75a972cb6ae473447c500827906f9b677617ddd6fb473 +809bb9edbbe3a2769165f029f2a48b6e10e833eb55d8f9107c4a09ca71f0986dc28f3bf4ead9cab498086eb54c626bbf +a0459dbb08db4155d16301933ec03df77c4f835db2aa3f9697eeb2bb6fcd03337fab45fa43372a469fecc9a8be2e3119 +a638eaace7f21854de49f4db6e4ea83d2983751645e0fb200c5e56561f599fd37dac70bdbd36566fdd10d4114fbb9c2f +a3a27bc2728390643a524521bf8ef3b6437cfba6febfd8bb54f2b6ecbafafb96196d3dea279ce782efd97b212f364ef5 +b86693b3ea23ea6b2c4d52554f61ef39c0ef57e514ff6da80c6e54395df8376e2e96b9d50e4ec301c59e022c5c5610db +af4d7cd678d79e67ae19789d43331dff99346cd18efff7bab68f6170c111598d32837372e3afe3e881fd1e984648483e +b8735a555ba7fe294e7adc471145276b6525de31cda8c75aae39182915129025fb572ed10c51392e93c114f3a71bd0be +b1dfb6dbda4e0faaa90fe0154f4ddaf68ee7da19b03daad1356a8550fca78a7354a58e00adeecb364e2fd475f8242c24 +9044b73c1bd19cd8bb46d778214d047f5dd89b99b42466431b661279220af5c50c0cffecebd2b64c3d0847a9c7a8b1ec +891f0d162651a0aa0d68fb1cc39fa8d77fd9f41ff98b5d6c056c969c4bac05ba8c52cbfa7fbb6ef9adfe44543a6ec416 +8920ae1d5ac05bf4be6aba843e9fc1bc5b109817381cdd9aa13df53cabea319a34ee122dcb32086d880b20900ff28239 +abb14023142876cbc9301336dced18c7878daa830070b5515ff4ac87b7bf358aa7ff129ebbf6fb78e827570a4142661f +a74b15e178cf91cde56eab0332e62d5ff84c05fcc849b86f45f94d7978bf9c0fc72a04f24d092a9d795ca3d976467f46 +806829621a908ca9b6433f04557a305814a95d91c13152dca221e4c56bfaa3473d8bb1bacd66e5095a53070f85954278 +b09a3c185e93869aa266a0593456a5d70587712bca81983dbc9eebbb0bd4b9108a38ae1643020ecf60c39c55bb3ac062 +b2bbe8f5361a3ecdb19598dd02e85a4c4c87e009f66fee980b4819a75d61f0a5c5e0bdc882830606cb89554ef1f90ead +825e16cb54fc2e378187aedae84a037e32903467ac022deb302cf4142da3eda3ead5b9f3e188d44f004824a3b5d94fbe +8b39d4a11d9b8ba885d36bcdb6446b41da12cfd66cb22705be05ab86936464716954360cc403f8a0fd3db6d8b301cb59 +ac19d453106c9121b856c4b327ddb3e3112b3af04793df13f02d760842b93d1b1fbdff5734edc38e53103a6e429a1d1f +b1cacbb965ec563f9e07d669ffc5e84d4149f1fb9fcfbc505788c073578c8f67956fb8f603e0b9a9d65e2d41803038ce +b7612d9e7dc930bff29191d1503feb2d6451b368b69fa8ecb06353c959967daccdc262a963f01c7fb95496f1bd50d92e +93f8fceb65ea9ef2052fa8113fb6720c94f0fed3432d89014ee5ad16260aeb428aadea0d1f1e002d2f670612ba565da3 +b3eb9213752156ed1fced3bca151fd0c630554215c808b9a0938b55fed42b6b89f9b76bc698f3e37c3c348d2395dbed1 +b46ab3553ef172ae40fc21c51d1d7eab8599a67f2f89a32a971aa52c2f031664e268b976dd2f7dc2195458fcf4bf3860 +8fb66f2c67ca5b6fb371c7d04592385a15df0c343857ba8037fe2aa9f2a5d4abc1058323ff9652653261b1c7db0edc24 +a7dfdbbf0b14e4af70fdb017875cdc36ad2108f90deb30bfca49301c92cbf821645a00ade1d1ee59a1a55a346675c904 +856199cad25ec80ee0327869077f272e33d59bf2af66c972e4a5839ec3b2a689e16f7fd0a03a3138bec458fcff8edbea +a2842ac5a715c2f48394988c6f84a6644c567673806feaa575838e906138c1b25d699e1b6ffdfc9be850b15da34077e4 +814b448ada88f769de33054c3c19f988226317797acacdbe55ed2485b52cd259ac5bcbee13f9de057eee33930a7fa0c0 +b49de8dd90da916ed374ca42665464b6abe89ff4453168921f5a7e5ddd3dcfa69422782e389e586e531fd78a1f236a8b +851f9d942b4c8ffc020c02c7fbee0f65ef42b1ab210ab4668a3db6aa0f8ab9eedb16f6fd739a542cc7e3cc03172b565b +a5128c155b8062d7fa0117412f43a6fdc2de98fa5628e1f5fc1175de0fa49fc52d015ec0aff228f060628268359e299c +b0765849127cc4ce1a1668011556367d22ce46027aa3056f741c7869287abcaccf0da726a5781a03964a9ded1febf67d +984562c64f3338ffe82f840c6a98a3dc958113f7ed28ee085af6890bbc0cd025723543a126df86f379e9c4771bb69c17 +8087fe60a9a22a4333f6fbe7d070b372c428d8c5df3804bb874b6035e5602c0693757fb30a9cd5a86684b5bca6737106 +a15e195b5850f7d45674cdc3bd74f972768b46fe9473182498263edc401745a8716fc532df8fc8c1375e39e391019226 +858ec10208c14a67c4156ea9c147f36d36c4fa0a232195b647e976ba82c8e16262b2b68d31e3b4702070c3dc701bccb5 +84bf3fb83c003380ee1158e2d6b1dca75cd14c7b2a32aec89d901f0d79e1475aa0827cb07cba1784a6bb0d37f6ca5cd4 +91e69f5392648e7f7c698059a0fc4b8478ab8af166d3842fb382ec5c396daa082ee3b2cb0192da3c9d90f6523c4c039d +8f7299f451c5e641d6fd961946b7a6ba4755685b2a40164e6276c25aefc66715b92492097a191813d39bb4405dc5da36 +ade2cf04ff6c94c1019bfa1e0e8f580696230fa6ee9695c4772e5a44501b2fffdd765ec7cc71ba14b83559ad62cc0fc5 +85fc98ecf469d6f98c8b3e441680816f764de39001a249bc7162f990c5a5354683e849164d4fc9287ee516780cdcd436 +928d118188120d038c37abdbe66c05adaa87f1cf9957dee2783b09fa91c4c43a7b0d0b2b6c5f4dea57e3ec8af230e84f +8025f71cf8d3085d6ea5104dddea8fa66cdb8527e40db01472469be021632daf22721f4acf1a8698a53439fe2f82596c +83266fffb12b3c795a6b551ac2aa7d9a29c183f861e78768c11286a04e22bd423bba05a68775bd77273e3ca316a4318e +95fd0c69c2d9df4e795c7ba71ed71a9d9f2878cd7e3a64be7b671d9611649fd41d29f8bdab642ba19cbd3db660d6a7e7 +92a912cb4d5ef4b639876daf4289500c4ebdbd80aff07fd93dc3eea645f084f910e5c02c10492a37f16acaa7e646d073 +b3d2622c987189a0873932aaea8b92ebb6e9e67ff46e91a96bf733c3b84175fffe950f8f4622cc4fa50f116321c5537f +a98f9a40054b31023a8f7549a44cae853b379bbfe673c815b8726e43ecd11a96db40b20369d712cbf72ffab064ecfac5 +b4e9a38e371fc21f4b8a3d7ad173c9ffad0554530dc053365d9555ddb60f5c9063c72ff4c65d78b091af631a9e1ee430 +875a31aee4ba19e09f8c2754fab0b5530ec283c7861a4858b239a12432f09ef155a35fedb0bc33eac2117c7e62f1c7ee +95edd0d1a6e94af718590756b5c5f5492f1c3441ecc7fa22f4e37f4ec256b9fffd2fda4c11fc1a7c220daee096eb1ff8 +b35fdc435adc73e15c5aaf4e2eea795f9e590d3e3ee4066cafa9c489ee5917466c2a4c897a186b2d27b848c8a65fa8a8 +94a5ce56f8d72ec4d0f480cb8f03e52b22f7d43f949a4b50d4a688a928ffd2c9074ecbab37733c0c30759204a54f9a6a +987562d78ef42228c56074193f80de1b5a9ed625dd7c4c7df3bf5096e7d7b08e2ee864bd12d2ea563e24fa20ad4d30ef +95a8218405038c991ace2f45980dbb1efa9e4ad0d8153486b0213a89e4d7e3cac6d607100660784627c74f90a8e55482 +b6a29d566f5a924355b7f7912f55140e1b5f99f983c614b8a92814ce261f2750e8db178866651ea3b461fb8f92890b14 +afdacc0a13da0446a92455f57a42b3ba27ba707f24171727aa974d05143fae219de9e2eb7c857235dd9c7568f43be5a8 +862a7dc25f7cfa4a09aeca0ed2c9c5ee66189e119e226720b19344e231981504e37bca179aa7cad238ee3ab1386aa722 +a336364e76635f188e544613a47a85978073f1686e4ee7a8987f54da91c4193540ac448b91d07d1fc5c7a8538b1f1688 +8f1ddca9638decd8247c1ce49c1e6cf494d03d91c4f33e48a84452d12b6736e8bd18c157068dfeff3a90977af19e5b1e +96ae91b9aaf00e437c18ddfc1aef2113ee278153ba090aedeb3f48f1e66feb8897bb1ac7f5ffeffc3be29376dd51e498 +8230b5bd9067efb6089e50213f1cc84da892e6faf0b79d5e4768c29303a80b1b754cb09d17a21933aba4c5f32070878a +a79dfe217faec7b4d3cf97d8363949efbc6f3d2c6bbc25df2c7bb8b7fd2521e6d3fa76672bfc06de6f426290d0b3cc45 +8290bd36552609d6b3ac9ccb57ff8668fc8290548eecdcee9a231f1125298c20bd8e60f033214dfbd42cd3c8642c699b +8945db9e8ec437c5145add028d25936ee8823ceb300a959402d262232ae0cbd9a64c1f0a1be6aed15ff152202ea9a70c +949e232b48adeaf57bd38eacb035267d3e78333c6b4524cab86651a428a730baf9c27ff42cb172526d925de863132e82 +98917e7a5073a9c93a526399bb74af71c76958a74619caccf47949f8fd25962810a19e399b4efcba0c550c371bea3676 +b5b144e0707aefc853ea5570bd78dedc4e690cf29edc9413080f28335ac78022139bfe7f7d6986eb1f76872bb91e82ad +949945072a08de6fd5838e9d2c3dc3200d048b5d21183020240fa13e71a1a8d30e6bfee4e6895e91d87b92f1444d0589 +b351a03c7c98506ee92d7fb9476065839baa8ed8ac1dc250f5a095c0d4c8abcfab62690d29d001f0862672da29721f16 +a82d81c136bc5e418d1fba614cb40a11f39dc526e66a8b1d7609f42fea4c02b63196315014400084f31f62c24b177cbd +87d51c907fdcdf528d01291b28adfee1e5b6221c6da68fd92ab66126247cd8086a6bcffff0ea17e7b57b0ba8d01bb95d +a2a9a1a91dfd918f36c1bfeeca705ea8e926ee012f8c18d633e50ec6e50f68f3380ef2ee839e5a43cf80fbb75bfb5304 +86f22616caed13c9e9cd5568175b6b0a6a463f9a15c301b8766feca593efa6e5ee4c7066e1cd61b407c0be12b3d8236a +b57e0a2c42790d2fd0207ef6476a433fca0cf213d70840c4af1ad45833f23fca082d21a484f78af447a19a0b068ea55c +8ae9bda5d85e6e3600dde26379b7270abd088678098506b72196ac8f9ce5b0173bc9c7ff245c95cbab5b5b967bcb043b +95c7d11f6c874f59ba632b63ce07a7a9d917a74d0b89cefa043f52aa1a7fe2e81c38dea0b20378264b5b4f64039932bc +ac7dee7479f50722526ea1c9d4e2f1a4578d1b5cce2092a07722069c96bb4da295de1c4f16e21005276e3b3f1624ac5a +89b8aaa49bd18b09f78fc5a1f3dd85d69b5dfcff28fc6d5a92b1520bc54107b8b71bb71fd6e0bde10e0a5809c633e5d2 +8982cb43fe4d3488c55e8c08b935e6c8d31bb57e4f2aeb76d6319470cce99ebf7dc2f116ac15b9d845ab1bc16aa6a583 +a12c63f48e27b1a1c83a32992642f37fb5b89851a35e80f6d1f9bc483cb25acd0e12b1dcf68781ae0cc861f002368bcb +aa6da92a4b4fa229afc8007abca257ce0ff5fad3b1ccfe5d836b9b52ff6b72575a0b915a759403b993733b16a47fdb15 +8bf706a92fe54f15d633b9463926b874dd43e28aaeca3fe2353fb58ad7753c8a293c56b0e94176070e8a9ec7401073a1 +b81e86de4bb5c1046e40cca79585c5b98c8673626fd3a28e563c5a3296256c2f7086522ae95cbabfaa8f1a8f7eae6272 +ad10f895b05d35cb251f78cc042d3f0969a8b6b3f289ddb4b016e0b8e06bfffc3a3e1afa9b0cc548f8c092832bb766bc +ad993aceb68d5217cfb07f862956cde83d05dec5060fc7a8fbfd37c6bfd5429ba69bdaf478b6cd01c323a06793dcd9fa +83da9c9a8fcb2775df0777aceabe90642a2df1c6abc646566e954f42d6e43455b00b101ec5ef58850c8d4b3100222ca1 +b55484f339fe7c7d107e70432601f4a34e1cc02ae4de5d18b99e5aa995f7b3710fc745769b85c1af803d457491dd8ce3 +8009d80593e82f3e751cec9e7e495fd29ad6f45f8d3ae513bec998b43c49fed74c44229c6f27c421e80c65413b897644 +9868081bbcc71192f7ff8dcf99a91dcd40f96556fbd6f285bdbfdfc785f604d8bf75c368c59db5ac8cdcc663087db53a +a04b1e91af025b4387ee0a2d790a1afb842e46f4c3717e355578efd1f84fea78782c6f7944b4961268de7f1ac71fb92b +a7b6301ddb9738b89b28a36d29d5323264a78d93d369f57ddab4cea399c36018a1fcc2cc1bfadf956a775124ae2925bd +a6cdb469014b33c590a07a728ce48f15f17c027eb92055e1858a1f9805c8deb58491a471aaa765de86a6bda62a18aef4 +828a23280ec67384a8846376378896037bd0cb5a6927ff9422fca266ee10a6fde5b95d963a4acfa92efbb0309cdb17b4 +b498ec16bcdb50091647ae02d199d70c783d7c91348a1354661b1c42bc1266e5a5309b542ef5fdf5281d426819a671cb +806533fb603e78b63598ff390375eebe5b68380640f5e020e89a5430037db2e519ab8ae5d0d0ad3fa041921c098448e1 +9104ad119681c54cdee19f0db92ebfe1da2fa6bef4177f5a383df84512d1b0af5cbe7baf6a93ad4b89138cd51c7c5838 +ac695cde30d021d9f4f295109890c4013f7e213d2150c9d5c85a36d4abfdca4cdc88faee9891e927a82fc204b988dcd9 +a311c244df546d5dc76eccb91fe4c47055fc9d222d310b974d4c067923a29e7a7f6d5a88bfef72fd6d317471f80d5c82 +89e4518335240479ad041a0915fc4f1afaab660bd4033c5d09c6707f0cc963eb2e6872cabc4a02169893943be7f847d4 +a8ad395b784c83aacf133de50d6b23bd63b4f245bb9e180c11f568faca4c897f8dbda73335ef0f80a8cb548a0c3c48fc +93e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8 +99aca9fb2f7760cecb892bf7262c176b334824f5727f680bba701a33e322cb6667531410dfc7c8e4321a3f0ea8af48cb1436638a2093123f046f0f504cc2a864825542873edbbc5d7ed17af125a4f2cf6433c6f4f61b81173726981dd989761d +88e2e982982bf8231e747e9dfcd14c05bd02623d1332734d2af26246c6869fb56ee6c994843f593178a040495ba61f4a083b0e18110b1d9f5224783d8f9a895e8ee744e87929430e9ba96bd29251cbf61240b256d1525600f3d562894d93d659 +a2d33775e3d9e6af0d1b27d389e6c021a578e617a3d6627686db6288d4b3dffd7a847a00f7ef01828b7f42885b660e4204923402aca18fbae74ccd4e9c50dd8c2281b38dc09c022342ed1ac695d53f7081cb21f05fdfc0a3508c04759196fcd3 +af565445d2ad54c83a75c40e8895f5ad7219a8c728bce9d58d7a83716e095432993ebbd3f6911c66415a6f920d1a4d171478509b54a114308a020b33bf4487a7a8d0aa76ae4676a9b54e765a680f562d3a4fcb2e92c58b14b49b5b2917cc258f +8aa99cfaf514cef4801599cadd780d222194ca1ad69a34779c2bcfda93e5dbeb931e13914421b5809a6c81f12cf7038b04a35257cc9e94c33761e68565b1274aa6a6f9d66477229747a66b308b138f92aa4326a3bf23df65a1fe33b3b289bfe1 +99ba36d8b4f56bde026099278548b1afc0a987cbd7c9baa51fc8e6cbb8237a17636f1a44a385cec69b05a5802059956a11fe793cabb939c38800f9c239ca2518e898ade1ec2513c9ee492071a35aabd78182392a09123d28dbc233313c9120c4 +a7dc40c36afccb30a2eaff250860b28b227c195cf05674704c567d77d6655c446ae835f8fc8667e71147ab02afcb2dad0babe60cbfa37d7c2cddc68d2dec54f28a4142f8353590a3902d5ddaa22066ab563dd1435dda83f276387b9767d69120 +939e6cc97a8b88572852a5b7f25e4838556307f60aeafb5d2b6961edbcafd4b48cb6ac980ffbacf4be963f324ba81e3d12de4f1459d8c746d0762c66ae1b166027f7fbe641d9c48f3c7d97b06d956b0be51dcc9aab65f3e99e1388e63bdd79f9 +b391e156541dfd4003d1697cdb7ec815b309807320574906b2e652ef0175828b356d215cd374b1b34d9f470b3fa0e643113e67b2273268f922f04f072cfb89008358185b25cd631f82911a3f20f90f75758ffb99bebb8076458ae1e9d1ae898c +b9ac9c84934cc2a85c876eff65577e1dfce1935cd6392c877dd881a7d2f5c3e9344f28c04f90c62a6db4237ca00f9e0d00cb5f63e3f060fc7303916e19273b6fe455f331cabbe2fe5a22d584484f0d4176120fec9819fbb0a01e6d38695acfcd +88209eb030c5d78734bf2c2a5c539653fd3c24b4c08e624f9ddc4a6550efbdc1054a56eb0c807595aad6de56fda326aa196d032a8b4b48d40140a2d77df3c7243eda6507936389a321a5811eb38e32ee433c788deeae1eb928b00940e2944bcc +a8632ddc9cf7cbc1e8b74a05b7d4a89618c64afe30367ca0c9550ae7d320bf4e51c5a69e1501a1d8bee4240d13d7835501aa39fdc401a74f4d5734e268a7ce29a1fcfdb0a8bc64e0dd4a9e8578d6985bc2bc6a3764ce7a3703f6fb2e52557a2b +a037ac67e8bb6f4193ac967e05d080a489f58ef8d3d30a89798246f3e4936121ee445b03e410a09e8ebc0db2e2477d110aad0ade99b0887f1eb016e750f42135866907f150bd6f4f99a8cb94281474166874808ebe03b118c5daab16dafdc38b +a50d9143116bffa3b237da8e1805327e81e9cd25e658289bd727d5f9e0020172cc8690dcfe31a240e5cbc48353b88c4908baa1dd7320165556e0aa633f62fcbe7870222d345a3bbcdb7ab6c07f0fd86be559964afabf56f0a8cbc0b4b91d477e +afa988ea6fa4f40c5ad07d2d580d29025ddf56d6ef1171a8b8de3464203f70b97d6f5ace72747345204b35150e06154d1477516a989ce8eea7871cc0d0de00a077c0fb23ad4837e409d0b885bf3f2dde11a30fa6273d662e68e09f461e52932f +97fa1a943ed8b81574304a3d03f4f15907f6e6e0cd36a66bd2ad2c75afafc70a61d3ff69b77ebe4dae9ca0fcedef80081062705e60bbb6ea0f1f398c84d2f8e4a3ac142ac66426c21ad5e9994ebbcc406af474c4aec5e32fadcb21875af7c9f1 +b30a564614493886f14a5dd71c89457504f8c59a7ac01b665ed167e9a8f9ee5832198fd319ecd234196ee57031bdf3840bd5a923e203a1938bc795c704b5285389750e1fd10d7050061ba19db00a60a2c0384a7d661d7d48ebe6962272230859 +84c8dea942cfae71cb02e705ec496d967425793ce8812e7ee53c2f23713abeaff566a658cd1c73dfd18187d16253a6ee0a623e82cd18e31cd1a1875d19c078835dc9292e141686150a88065226ada264740143e87c03a0f6c4da8c187438ebf4 +8c3abae8aed60338f8c4ff80aab22f8a2ae56756a93566c906f490a97151d34a1c3318054e1c494c60cc53327ad86a2d02c6c76a406726ce4f88635bc32eff0db0b61762dc518b95fa8da82e87e4bf3de54f1d72180ef53ed7bc5413e6a9a510 +a328230c92a6b1cef6a444bcb64edb992f71e3d7b93f0b6b8b408ba7c908db746d92ddb2c7588bab438ef3bc61be1c2f0dfc86ba2ff514b42b35c80f89b2e780f813ea1dfb977fbded2cd9b553b747fa952e227ebd8f071163d421fc337f04c9 +b482cab423cd5f1c5df036070aade7aa016283d69619d664025c3feab866a0a5691d344b2ee2bedc5dedd1f9a73eae16003a3827c9e5bbe22ded32d848fba840ffad1141ad158f5c40bc8ae0d03781b9705d851a7f1391b096c576c0f4f2a6b0 +919ee1df27fabcb21237a1b7b98f53d41d849e1b6a8f9e28c3fae2841c6b5a250e4041c737e6725476e5cd715e34d3880f58d80f61efaabc261bdc703e8750f48a923e9bf8980931b9fd9e40014c66c54b3e7c98241d76d1aa47af43313a65a1 +ac94830145dbe9a8f7e6e0fc1f5fb454502d22abcafdc2dd96c6933c604461fa83b2b37385f4bc454875a02a6d4157841250956783515d11c7456e7f11b745f12856d89f5feedaf6a61a483a6c33a21cd2ba0c18eb41a1a2e7fc33bb53e4c570 +b209c699f1233735c5bb4bce848e4365fd76651ae2184d2279a90df0c2f69ffa2a24d84a9b9f274021072953c0d65e1a0202d490d6c37186af240114e445d87bff754b4824937e4f2c90a574061b1c4910fed88d90f698025a2a264e656cb8a4 +93320dc0576b0d069de63c40e5582b4486d9adf5e69e77e3ebaf3da26976fe42147a65051501bc8383f99e7ba75479c70a6726c2cd08bf98c7481f1f819712292d833a879f21a1221a9610bc748fb5e911055122fdb4055cdc84e8bfe0f4df9b +a4380b240e998cdf668591f71a0c88ed143b0185a920787627ce65095f8223dc606fa5bce93377af100de92d663e675c0736d7f1973603a84a5c4162fb5e01c88c7493503ae1d7e9fbe8ece9b418397d68c21eeb88dae226e09875d372c646dd +aab48517d69135a16b36b685adfe9b2544a709135a21ba3e75981a2cba4ec81d1fe28ac0f72fde0c0001c15300ed6a810f58d3117bdd58d0149751d6508cf8a1a1ff7b63dd02d2730a9d6fe96c77c502fe8ed46d50a181ec4bb35e37dfbd6af4 +8277265fe75ab89ce4ec65b33fb4084bec0a56d81faf2f7a9070d2ca3065678e03a790350eba56323a54e0285bc32fe8007d5259740fde226e16cbde8354eacd562294eb9b7f727ed72ffbdad86f467cf057c737b34b80a41deb92634ed866f5 +aa40a24cb2ebe606d969392c03020070f044c95088d80f57f771b837c048342d2cd3474600d7660441090ffb8d2ffb7f0eddd67eb378e3e1477a6ba0bc38096d5d2d3355bc8b60f605f57f0c1899da591457440352381d2b38c0aa9acc7fe419 +80815d10685808cb630820629bcd2fa9041c9b74433630c0b9c1b7f7e8edf1440b520217f76ec9a50c125cf4438aa66006a1928a9ed2321da7ea325c3d56b65462b72118ca2c99a0ea733aa11da9abbeda6cc71ffeed301ae70213a29e697dcd +ac235d079f91b00b1fead7523da8f73a5409fa8970907af0c5d5e4c6a0996dccfcdb0d822d08c7fbc0c24799457d011d04312d20831825f23cf988141056a6814c8a1cac9efe37bdcbfa272aed24cd92810fea7c49b0d07683a5c53643872179 +b8aa59534d75fa5ac1c2c3f963bf73899aff5210059dbde8a8635561c6249e5143affee3bd2fd57575213b52d9a73d5702525867a7dcbb1d0a49b98c2925556fc5463ff0209742046a24ab29e74257d6419401093cc4371944d811cc300b6a67 +80bbfc5b816eea29a6d84e2217dee4d547306994d39e5592515e1b0807b67fe960d1d5addb0ff1a20c158bdb294c04bf093d28996121845a2c9268e2c9ac0f4067e889c6aaca62f8535d35b45036954bd069e3afa84f04721538c26003304c20 +a535c17d0e151d0e03d42dd58ba8c715bee3fabca2890e0e016071d34184b6b34e770d2be29c8ec76b69bcc471d50f4d043c2c240e9b93a81cff7ee2724e02018dfd9b534e40be641fdb4884abcd83b76f517557ffba508f1ba2f56313f4de94 +b237eb7465df0d325a3aa58269be2627e4978f9863f4f100ed4c303cb1f6549e606f2e3c9180824d8049191965c8dacd0a0c76cc56cb22cf1bcfdb39372c8aa29b4f7b34582b1719e6bd59c930d87d5ccd838743b585d6e229d5ed42337315c0 +805c335a2a9d2de30809cf30808ef836d88e9453c510716f01696f14c72dd60505eca8f128970edc8e63a9aa1f8792ac0dd50dcc84fbf4cc8b32349c682a6a27bc7551c7aa273a94c1606d07710188d93579afe3be1781bded15a34ed6047922 +b25dadf385ddd3c39bcb0a014d3d4f66127946b1aceae8809e3a03d66cc25e27142ca108316391f857fe82fdea4db2520cc73793b695eafbf3ade00ef7ec747b0457e49303f5e1a370f5263b436566fe24a0876e5fe088238c7be37a0718d65f +b0f753081cabe2c8fce73aba82ff67dbc9842598b3e7fa3ce2a1f534536f8ac63c532fe66552ac6b7adb28c73ed4c8a4184849be7c1756a4681ce29ebf5e1c3aa806b667ee6bd68f6397aba3215dc1caec6742f21d681e32cd1160d6a3b1d7ee +b798771eeb3d7a17c62ba5916cc034bba870da6b1ac14c2e1cae71af3ad4e0c0d1ff983f691e0e55289d5a33b131f2ec12430c9566dd71f4d8be9c79155357a5c30c5efcfd75bbe1bb6d5ada4d50604ea49ed838d3641f268ca6e25c9c4b6b72 +b52554c017388b099804abbe565346591a086d9979e10140ddaccc0a3680e506db775d7cbeafde67563adf0f09f5c2420caf19629f4e8f03e6fe02e9416ecd5269989e482b90004a083967d1141387eb74865bac6bd17e7a6d5f58225e52d4b7 +b520ff694520919023d44d53f98a7de2f78ff37b2d9193dcaa35556a6a0febf767781a4c961dce7c804bfdf81935f8f0082865253da52e79dfa1c5ff74d61495b2da76e167d46114709e877a7791a3a95e33a42f56b83f5f5afe271c67ae997c +b721401983440797a03d5b99f2088a0b249aa911969c34dd6c615b0060325da555d2ad99d931170c0868b0488a2234a4114cc0013d5163b833f5c45c5eb536421c016cf85788390176bb2dc4c196d6be26bbbfceae048b82f0d8039222e71c94 +acd9d833ba0a8cbd8d1ba939a11ea0fa5607e1bc6e693ec318bdb097aedd042d76e695dcebebd142e2e4ac30b1905dff03ec36d9cc70577e4dbe5e9ed7c20c7afb13a7f0155f203c6b83b9f1ad3d20a0d4aef0fbbbcf466ffc1bcd482bc2f5e0 +8cc1795de015f2b0e72116f169f3b4624b7738ceebea354e0bd9051c27b86f647ea36cad57ea6884c1a8adf9b45cd83514fa687e68878bbd613d793aa10986d5a0411f081689229e0d72133b3667b9f3f1a02211d0e680564eb1ea43393e1f36 +aa9281c61113c343a108de1036570feefc72fb7a96ff11f73024de12b83f29631f5a8a5900e6f10b15227c6f7462881511271bf785ebdf95ce288100e5dab391f664f6ff76c72b65b34479a4f43e5e8eba292209d6654157286ad3242ac342db +aaf16866275082e59d415db317aa874267d048ee405a553e852e6d175711d31a1fee99912345915bce121f43bc3e00d81338e5fcd3c8a1012fb4f172a9fe15622dd368b4d9d5cb60d189f423b071791fe26cea7676aca8df07965cacf80b0cd0 +accc80b3d8a6ffa648487a3d3c0ce1aeeb5401edf3cf2e385ea4a6d5fc110054fcce38f01f1da7141bbed30eb7a0a6810c82212bbb9da75d6033082dbcf6bc6a5791f85aa0f045a10da5de015edbf369b4d23b32b0c058962d2ee88e6911f994 +83f1089395a16077738cc7c9a6d6a3dc9033aac4abc508af5a1f007ca92e1a80b2e6f2dbda7fdcf0d5646de790a6201d0a9cfbcb6620a1426600e3a6a425ec004384f49fb9dcd166691a47177d45dcbcb761a11d46220b0aa09fc946131f7aa5 +9246bb586d43cb817c2e15ed609156e9f1cd284ba2f4797bbfa51c0341e1ba382eaac059aa9f63fb88d228a1a932839a171e7c7d00199dc7c4d6c5ea038a02cbc3cc5297c70401520e70ebbcffacd6a703f62896f3c788f94dde3c33ab0ecbdb +a316cb7c74feb0563c56cc79015e2774fbeca458bf8e9fb07894f9d6bcd73f7fb9428e87c816e5629e4bf7f3ec567fbc091549471b75492dde08217cb334b716b4582b24384586e53388873a78a90ec01bd7c3bace9cfc52161467df16e27c33 +ade18c74bbe60d1d69f4a570f8e5fd8696c26cc9e02829040b6b14cb9c49a4b3263b5bd5e16ec0b29010b4be054c16ab09304e23442af7d7f5fcc60bc6c5634ab6e4aed7ef334b2785e4c7672d59a687278e42d310342db5e5975d716e6d1595 +b7728800bb2039acf228fa3d8028569c426cb85d28b2b5820bbef938d5ca8c4df981d3e01a309e26ca101e8295d0f6990c03b8c239798323575874a4ee5bfe46cfe99b9657189142aacd8f8d1f26cf4c0e73c6397c31ba8f18102b9ea315b638 +8fb14f2a9be193f54977ecd3021663108ea143627b9a9d9faff85d1a86b855f6c437eab435fad3304f245bd7732af07f1173494cdb802fb96e85d2db89e1643206e183f3b228ca8d3f586e71aa9308eaf0223100bf07942fc39e465016d1f775 +ac1e025e53d98fdb3380489dce82d9d4bd3a6c98f0a523b841cb09a6f26ddd4d22efd98776e78d10fd996995fd00e81e08d3c25dd14a54b25a9d483677a24bbb8d1cb41a443b2c71038e6893b1b30f70758424e0f2039a48060191389033ef55 +a4c017311b9e930868132527a9849072b91db04fd36c619ae39c98da9e2174e6201d3c2ff1246c06b1b6815bbf3ea4a1116564f55ee2fe4c4d655e2294c0ded842cba209c255ca3d7b7f82d162f97890dfdeed087aa2f87cbfc61d61815da39d +89516315a3956b455843c2555248bd94dcb19993060fe75fdd51f7aa9c9147ab13997d8a98036a8f04bee5c91d78d2990907e35a52537a8ab3ed15f1a71afdcd38044a5b6e93f662b9d36c16933a881927cacae668c4c06ee6f004c9e3989bad +a1e78a011e210400c68ca76045f7da74119bff3cbe382efd2bd2ac76567c52d68d75536a91999d084043e1ce2d07d02e0b69fb99924101d2543521747536fbc51b0454aa9a4cbbec101121f597863a5c0fee2ca5eab35dff9b9085bef8b2b0d0 +830fd8d083e39153ecab43cabb22e29d7b44a55fba467af4ddd3f069439d2972ef53c3518de788f96b3f4f64963987d0155ba27afc28643af3de8e476ff515a68285728167408f45d99e574680bda6bacdd4322e587e4aa99386e035c0e931ad +b89584da22237e3061d991b1a55a5e55dc637b8b671130d304587729348138ef87885180310efe9f9f6d3580b9d7fdcf0649e8a79d2dec8c25a9f53df0fac5d517db999029cbfdd7c2cbd3e9a5503e5d267d3d8ad752335915c92b850b14bafb +959b8030733799882c5e3735479924b013756e57b893f9792bab4043e2d362d77cf308166d782e3989caa771b8a0c0a01302cb7b5e8ca12e2d6cebd59d4cd173c9dc25f438bac597fab17b4ff44997a489c168e7204b7d7c21d0938f0a2e3b51 +a0a9e5503d9afe0027891dab890c687fd5f5fac5741418490c64d7c15f59533dd603a50163c79402afa61bd02de486761983c94501da17e6bbe78c497f2122210071602f578adc0ebe7a4679f87fe77e09c8c122de69105f13455fea25f08e6f +9811487283ad620cd7c9b303ae2f348d0e6f5ee17b504baaa817ae207adb912a00d3cc36dbf48745eb899e6b6e22f09f0f9ba29d949ecd7350fbbfe87a8c7cdd5d0e687fc807751d07634aaf7c38baf3b24a0670c38fa6ccd7431436fc95525f +8a13aa5071c526e560def7d8583393942f07d88c9d8d26c98738fd65f57af2e3326dbb1edff0f39fe98eda4a13ed4fd71844254b954690154c4804e1c4a53df9dc4643f4b7b09d0860070f6b2318d0d63d28fb56bf5b6ff456a18dfc72fdfbbe +b9c90ff6bff5dd97d90aee27ea1c61c1afe64b054c258b097709561fe00710e9e616773fc4bdedcbf91fbd1a6cf139bf14d20db07297418694c12c6c9b801638eeb537cb3741584a686d69532e3b6c12d8a376837f712032421987f1e770c258 diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index edeb3eee5d1b..db86b18432a9 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -1,5 +1,9 @@ //! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants for shard Blob Transactions. +use crate::kzg::KzgSettings; +use once_cell::sync::Lazy; +use std::{io::Write, sync::Arc}; + /// Size a single field element in bytes. pub const FIELD_ELEMENT_BYTES: u64 = 32; @@ -23,3 +27,13 @@ pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER /// Used to determine the price for next data blob pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3_338_477u64; // 3338477 + +/// KZG Trusted setup raw +const TRUSTED_SETUP_RAW: &str = include_str!("../../res/eip4844/trusted_setup.txt"); + +/// KZG trusted setup +pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { + let mut file = tempfile::NamedTempFile::new().unwrap(); + file.write_all(TRUSTED_SETUP_RAW.as_bytes()).unwrap(); + Arc::new(KzgSettings::load_trusted_setup_file(file.path().into()).unwrap()) +}); From 6752db8e76e50a7a500cebc689f9e95fdd1f8277 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Aug 2023 15:36:03 +0200 Subject: [PATCH 384/722] feat: add max_fee_per_blob_gas fn (#4129) --- crates/primitives/src/transaction/eip1559.rs | 18 ++ crates/primitives/src/transaction/eip4844.rs | 2 +- crates/primitives/src/transaction/mod.rs | 298 +++++++++---------- 3 files changed, 164 insertions(+), 154 deletions(-) diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 3108c6e51e69..ba3e18a6d2be 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -64,6 +64,24 @@ pub struct TxEip1559 { } impl TxEip1559 { + /// Returns the effective gas price for the given `base_fee`. + pub fn effective_gas_price(&self, base_fee: Option) -> u128 { + match base_fee { + None => self.max_fee_per_gas, + Some(base_fee) => { + // if the tip is greater than the max priority fee per gas, set it to the max + // priority fee per gas + base fee + let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); + if tip > self.max_priority_fee_per_gas { + self.max_priority_fee_per_gas + base_fee as u128 + } else { + // otherwise return the max fee per gas + self.max_fee_per_gas + } + } + } + } + /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. #[inline] pub fn size(&self) -> usize { diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 34b43e19bb66..f9da9b0970ea 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -63,7 +63,7 @@ pub struct TxEip4844 { /// Max fee per data gas /// - /// aka BlobFeeCap + /// aka BlobFeeCap or blobGasFeeCap pub max_fee_per_blob_gas: u128, /// Input has two uses depending if transaction is Create or Call (if `to` field is None or diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index d3f03e082066..a2d64517fcb7 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -79,139 +79,6 @@ pub enum Transaction { Eip4844(TxEip4844), } -impl Transaction { - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { - Encodable::encode(self, out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { - match self { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { - // do nothing w/ with_header - let payload_length = - self.fields_len() + signature.payload_len_with_eip155_chain_id(*chain_id); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode_with_eip155_chain_id(out, *chain_id); - } - _ => { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); - } - } - } - - /// This sets the transaction's nonce. - pub fn set_nonce(&mut self, nonce: u64) { - match self { - Transaction::Legacy(tx) => tx.nonce = nonce, - Transaction::Eip2930(tx) => tx.nonce = nonce, - Transaction::Eip1559(tx) => tx.nonce = nonce, - Transaction::Eip4844(tx) => tx.nonce = nonce, - } - } - - /// This sets the transaction's value. - pub fn set_value(&mut self, value: u128) { - match self { - Transaction::Legacy(tx) => tx.value = value, - Transaction::Eip2930(tx) => tx.value = value, - Transaction::Eip1559(tx) => tx.value = value, - Transaction::Eip4844(tx) => tx.value = value, - } - } - - /// This sets the transaction's input field. - pub fn set_input(&mut self, input: Bytes) { - match self { - Transaction::Legacy(tx) => tx.input = input, - Transaction::Eip2930(tx) => tx.input = input, - Transaction::Eip1559(tx) => tx.input = input, - Transaction::Eip4844(tx) => tx.input = input, - } - } - - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - fn size(&self) -> usize { - match self { - Transaction::Legacy(tx) => tx.size(), - Transaction::Eip2930(tx) => tx.size(), - Transaction::Eip1559(tx) => tx.size(), - Transaction::Eip4844(tx) => tx.size(), - } - } -} - -impl Compact for Transaction { - fn to_compact(self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - match self { - Transaction::Legacy(tx) => { - tx.to_compact(buf); - 0 - } - Transaction::Eip2930(tx) => { - tx.to_compact(buf); - 1 - } - Transaction::Eip1559(tx) => { - tx.to_compact(buf); - 2 - } - Transaction::Eip4844(tx) => { - tx.to_compact(buf); - 3 - } - } - } - - fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { - match identifier { - 0 => { - let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); - (Transaction::Legacy(tx), buf) - } - 1 => { - let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); - (Transaction::Eip2930(tx), buf) - } - 2 => { - let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); - (Transaction::Eip1559(tx), buf) - } - 3 => { - let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); - (Transaction::Eip4844(tx), buf) - } - _ => unreachable!("Junk data in database: unknown Transaction variant"), - } - } -} - // === impl Transaction === impl Transaction { @@ -326,6 +193,20 @@ impl Transaction { } } + /// Max fee per blob gas for eip4844 transaction [TxEip4844]. + /// + /// Returns `None` for non-eip4844 transactions. + /// + /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). + pub fn max_fee_per_blob_gas(&self) -> Option { + match self { + Transaction::Eip4844(TxEip4844 { max_fee_per_blob_gas, .. }) => { + Some(*max_fee_per_blob_gas) + } + _ => None, + } + } + /// Return the max priority fee per gas if the transaction is an EIP-1559 transaction, and /// otherwise return the gas price. /// @@ -621,6 +502,137 @@ impl Transaction { } } } + + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { + Encodable::encode(self, out); + } + + /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating + /// hash that for eip2718 does not require rlp header + pub fn encode_with_signature( + &self, + signature: &Signature, + out: &mut dyn bytes::BufMut, + with_header: bool, + ) { + match self { + Transaction::Legacy(TxLegacy { chain_id, .. }) => { + // do nothing w/ with_header + let payload_length = + self.fields_len() + signature.payload_len_with_eip155_chain_id(*chain_id); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode_with_eip155_chain_id(out, *chain_id); + } + _ => { + let payload_length = self.fields_len() + signature.payload_len(); + if with_header { + Header { + list: false, + payload_length: 1 + length_of_length(payload_length) + payload_length, + } + .encode(out); + } + out.put_u8(self.tx_type() as u8); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode(out); + } + } + } + + /// This sets the transaction's nonce. + pub fn set_nonce(&mut self, nonce: u64) { + match self { + Transaction::Legacy(tx) => tx.nonce = nonce, + Transaction::Eip2930(tx) => tx.nonce = nonce, + Transaction::Eip1559(tx) => tx.nonce = nonce, + Transaction::Eip4844(tx) => tx.nonce = nonce, + } + } + + /// This sets the transaction's value. + pub fn set_value(&mut self, value: u128) { + match self { + Transaction::Legacy(tx) => tx.value = value, + Transaction::Eip2930(tx) => tx.value = value, + Transaction::Eip1559(tx) => tx.value = value, + Transaction::Eip4844(tx) => tx.value = value, + } + } + + /// This sets the transaction's input field. + pub fn set_input(&mut self, input: Bytes) { + match self { + Transaction::Legacy(tx) => tx.input = input, + Transaction::Eip2930(tx) => tx.input = input, + Transaction::Eip1559(tx) => tx.input = input, + Transaction::Eip4844(tx) => tx.input = input, + } + } + + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Transaction::Legacy(tx) => tx.size(), + Transaction::Eip2930(tx) => tx.size(), + Transaction::Eip1559(tx) => tx.size(), + Transaction::Eip4844(tx) => tx.size(), + } + } +} + +impl Compact for Transaction { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match self { + Transaction::Legacy(tx) => { + tx.to_compact(buf); + 0 + } + Transaction::Eip2930(tx) => { + tx.to_compact(buf); + 1 + } + Transaction::Eip1559(tx) => { + tx.to_compact(buf); + 2 + } + Transaction::Eip4844(tx) => { + tx.to_compact(buf); + 3 + } + } + } + + fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { + match identifier { + 0 => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + (Transaction::Legacy(tx), buf) + } + 1 => { + let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); + (Transaction::Eip2930(tx), buf) + } + 2 => { + let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); + (Transaction::Eip1559(tx), buf) + } + 3 => { + let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); + (Transaction::Eip4844(tx), buf) + } + _ => unreachable!("Junk data in database: unknown Transaction variant"), + } + } } impl Default for Transaction { @@ -664,26 +676,6 @@ impl Encodable for Transaction { } } -impl TxEip1559 { - /// Returns the effective gas price for the given `base_fee`. - pub fn effective_gas_price(&self, base_fee: Option) -> u128 { - match base_fee { - None => self.max_fee_per_gas, - Some(base_fee) => { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128); - if tip > self.max_priority_fee_per_gas { - self.max_priority_fee_per_gas + base_fee as u128 - } else { - // otherwise return the max fee per gas - self.max_fee_per_gas - } - } - } - } -} - /// Whether or not the transaction is a contract creation. #[derive_arbitrary(compact, rlp)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] From 123249fdfd5fea0b7a6f87d2b42b518ff5f4a8d6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 9 Aug 2023 14:58:44 +0100 Subject: [PATCH 385/722] chore: pruner charts in Grafana dashboard (#4094) --- etc/grafana/dashboards/overview.json | 583 ++++++++++++++++++--------- 1 file changed, 393 insertions(+), 190 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 8500bd19aaf8..58d3c9d8dc2d 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -159,7 +159,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.0.1", "targets": [ { "datasource": { @@ -226,7 +226,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.0.1", "targets": [ { "datasource": { @@ -609,7 +609,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.0.1", "targets": [ { "datasource": { @@ -1006,7 +1006,7 @@ }, "showHeader": true }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.0.1", "targets": [ { "datasource": { @@ -2736,143 +2736,6 @@ "title": "Downloader buffer", "type": "timeseries" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The number of blocks in a request and size in bytes of those block responses", - "fieldConfig": { - "defaults": { - "custom": { - "drawStyle": "line", - "lineInterpolation": "linear", - "barAlignment": 0, - "lineWidth": 1, - "fillOpacity": 0, - "gradientMode": "none", - "spanNulls": false, - "showPoints": "auto", - "pointSize": 5, - "stacking": { - "mode": "none", - "group": "A" - }, - "axisPlacement": "auto", - "axisLabel": "", - "axisColorMode": "text", - "scaleDistribution": { - "type": "linear" - }, - "axisCenteredZero": false, - "hideFrom": { - "tooltip": false, - "viz": false, - "legend": false - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - }, - { - "id": "unit", - "value": "blocks" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 5, - "y": 110 - }, - "id": 102, - "options": { - "tooltip": { - "mode": "multi", - "sort": "none" - }, - "legend": { - "showLegend": true, - "displayMode": "list", - "placement": "bottom", - "calcs": [] - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_size_bytes{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Response size", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", - "hide": false, - "legendFormat": "Individual response length", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length", - "hide": false, - "instant": false, - "legendFormat": "Mean body size in response", - "range": true, - "refId": "C" - } - ], - "title": "Block body response sizes", - "type": "timeseries" - }, { "collapsed": false, "gridPos": { @@ -3128,14 +2991,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Tracks the number of transactions inserted and removed from the transaction pool, as well as the number of invalid transactions", + "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisCenteredZero": true, + "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", @@ -3178,7 +3041,7 @@ } ] }, - "unit": "ops" + "unit": "bytes" }, "overrides": [ { @@ -3186,18 +3049,6 @@ "id": "byFrameRefID", "options": "B" }, - "properties": [ - { - "id": "custom.transform", - "value": "negative-Y" - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, "properties": [ { "id": "custom.axisPlacement", @@ -3205,7 +3056,7 @@ }, { "id": "unit", - "value": "cps" + "value": "blocks" } ] } @@ -3217,7 +3068,7 @@ "x": 0, "y": 111 }, - "id": 93, + "id": 102, "options": { "legend": { "calcs": [], @@ -3237,8 +3088,9 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "increase(reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}[$__rate_interval])", - "legendFormat": "Inserted transactions", + "expr": "reth_downloaders_bodies_response_response_size_bytes{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Response size", "range": true, "refId": "A" }, @@ -3248,9 +3100,9 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "increase(reth_transaction_pool_removed_transactions{instance=~\"$instance\"}[$__rate_interval])", + "expr": "reth_downloaders_bodies_response_response_length{instance=~\"$instance\"}", "hide": false, - "legendFormat": "Removed transactions", + "legendFormat": "Individual response length", "range": true, "refId": "B" }, @@ -3260,14 +3112,15 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "increase(reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}[$__rate_interval])", + "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length", "hide": false, - "legendFormat": "Invalid transactions", + "instant": false, + "legendFormat": "Mean body size in response", "range": true, "refId": "C" } ], - "title": "Inserted transactions", + "title": "Block body response sizes", "type": "timeseries" }, { @@ -3316,25 +3169,149 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 111 + }, + "id": 94, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_pending_pool_imports{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Transactions pending import", + "range": true, + "refId": "C" + } + ], + "title": "Pending pool imports", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of transactions inserted and removed from the transaction pool, as well as the number of invalid transactions", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": true, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" }, { - "color": "red", - "value": 80 + "id": "unit", + "value": "cps" } ] } - }, - "overrides": [] + ] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 111 + "x": 0, + "y": 119 }, - "id": 94, + "id": 93, "options": { "legend": { "calcs": [], @@ -3343,7 +3320,7 @@ "showLegend": true }, "tooltip": { - "mode": "single", + "mode": "multi", "sort": "none" } }, @@ -3354,14 +3331,37 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_network_pending_pool_imports{instance=~\"$instance\"}", + "expr": "increase(reth_transaction_pool_inserted_transactions{instance=~\"$instance\"}[$__rate_interval])", + "legendFormat": "Inserted transactions", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "increase(reth_transaction_pool_removed_transactions{instance=~\"$instance\"}[$__rate_interval])", "hide": false, - "legendFormat": "Transactions pending import", + "legendFormat": "Removed transactions", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "increase(reth_transaction_pool_invalid_transactions{instance=~\"$instance\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Invalid transactions", "range": true, "refId": "C" } ], - "title": "Pending pool imports", + "title": "Inserted transactions", "type": "timeseries" }, { @@ -3451,7 +3451,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 119 }, "id": 95, @@ -4187,7 +4187,7 @@ }, "gridPos": { "h": 8, - "w": 11, + "w": 12, "x": 0, "y": 162 }, @@ -4280,8 +4280,8 @@ }, "gridPos": { "h": 8, - "w": 13, - "x": 11, + "w": 12, + "x": 12, "y": 162 }, "id": 62, @@ -4372,8 +4372,8 @@ "overrides": [] }, "gridPos": { - "h": 7, - "w": 11, + "h": 8, + "w": 12, "x": 0, "y": 170 }, @@ -4412,7 +4412,7 @@ "h": 1, "w": 24, "x": 0, - "y": 177 + "y": 178 }, "id": 97, "panels": [], @@ -4481,7 +4481,7 @@ "h": 8, "w": 12, "x": 0, - "y": 178 + "y": 179 }, "id": 98, "options": { @@ -4641,7 +4641,7 @@ "h": 8, "w": 12, "x": 12, - "y": 178 + "y": 179 }, "id": 101, "options": { @@ -4736,7 +4736,7 @@ "h": 8, "w": 12, "x": 0, - "y": 186 + "y": 187 }, "id": 99, "options": { @@ -4831,7 +4831,7 @@ "h": 8, "w": 12, "x": 12, - "y": 186 + "y": 187 }, "id": 100, "options": { @@ -4862,6 +4862,209 @@ ], "title": "File Descriptors", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 195 + }, + "id": 105, + "panels": [], + "title": "Pruning", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 196 + }, + "id": 106, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_pruner_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Pruner duration, total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 196 + }, + "id": 107, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_pruner_parts_duration_seconds_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_pruner_parts_duration_seconds_count{instance=~\"$instance\"}[$__rate_interval])", + "instant": false, + "legendFormat": "{{part}}", + "range": true, + "refId": "A" + } + ], + "title": "Pruner duration, per part", + "type": "timeseries" } ], "refresh": "30s", @@ -4903,6 +5106,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 3, + "version": 4, "weekStart": "" } \ No newline at end of file From 7ea381f15e8d9fbf025b0f8176999f0002dd3f05 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Aug 2023 16:10:49 +0200 Subject: [PATCH 386/722] fix: hit database first for lookups (#4127) --- crates/storage/provider/src/providers/mod.rs | 33 +++++++++++--------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b1e0c3e009ca..4a930e9c17bf 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -209,12 +209,12 @@ where fn find_block_by_hash(&self, hash: H256, source: BlockSource) -> Result> { let block = match source { BlockSource::Any => { - // check pending source first - // Note: it's fine to return the unsealed block because the caller already has the - // hash - let mut block = self.tree.block_by_hash(hash).map(|block| block.unseal()); + // check database first + let mut block = self.database.provider()?.block_by_hash(hash)?; if block.is_none() { - block = self.database.provider()?.block_by_hash(hash)?; + // Note: it's fine to return the unsealed block because the caller already has + // the hash + block = self.tree.block_by_hash(hash).map(|block| block.unseal()); } block } @@ -541,14 +541,17 @@ where fn state_by_block_hash(&self, block: BlockHash) -> Result> { trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); - // check tree first - if let Some(pending) = self.tree.find_pending_state_provider(block) { - trace!(target: "providers::blockchain", "Returning pending state provider"); - return self.pending_with_provider(pending) + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } } - // not found in tree, check database - self.history_by_block_hash(block) + + state } /// Storage provider for pending state. @@ -658,6 +661,10 @@ where self.tree.find_canonical_ancestor(hash) } + fn is_canonical(&self, hash: BlockHash) -> std::result::Result { + self.tree.is_canonical(hash) + } + fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { self.tree.lowest_buffered_ancestor(hash) } @@ -666,10 +673,6 @@ where self.tree.canonical_tip() } - fn is_canonical(&self, hash: BlockHash) -> std::result::Result { - self.tree.is_canonical(hash) - } - fn pending_blocks(&self) -> (BlockNumber, Vec) { self.tree.pending_blocks() } From 6622f53c41c1e67415b4f262ef98df2068475b6b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Aug 2023 16:33:55 +0200 Subject: [PATCH 387/722] fix(trie): account prefixset unwind (#4130) Co-authored-by: Roman Krasiuk --- crates/storage/provider/src/providers/database/provider.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 5c9bedc41083..2f122fbe2378 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1779,9 +1779,8 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP // Unwind account hashes. Add changed accounts to account prefix set. let hashed_addresses = self.unwind_account_hashing(range.clone())?; for (hashed_address, account) in hashed_addresses { - if account.is_some() { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - } else { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { destroyed_accounts.insert(hashed_address); } } From 88aea631285b9c5a8ba32cf3fe924f40bd82cfca Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 9 Aug 2023 18:21:58 +0300 Subject: [PATCH 388/722] release: 0.1.0-alpha.6 (#4132) --- Cargo.lock | 94 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d15a79d1ac3..f0640e9cebff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1930,7 +1930,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "reth-db", "reth-interfaces", @@ -5218,7 +5218,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "backon", "clap", @@ -5288,7 +5288,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5305,7 +5305,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "futures-core", "futures-util", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "futures", @@ -5352,7 +5352,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "aquamarine", "assert_matches", @@ -5371,7 +5371,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arbitrary", "bytes", @@ -5386,7 +5386,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "confy", "reth-discv4", @@ -5403,7 +5403,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "mockall", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arbitrary", "assert_matches", @@ -5455,7 +5455,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "discv5", "enr 0.8.1", @@ -5478,7 +5478,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "async-trait", "data-encoding", @@ -5502,7 +5502,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "futures", @@ -5527,7 +5527,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "aes 0.8.3", "block-padding", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arbitrary", "async-trait", @@ -5591,7 +5591,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arbitrary", "async-trait", @@ -5619,7 +5619,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "async-trait", "bytes", @@ -5638,7 +5638,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "bitflags 2.3.3", "byteorder", @@ -5658,7 +5658,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "bindgen 0.65.1", "cc", @@ -5667,7 +5667,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "futures", "metrics 0.20.1", @@ -5677,7 +5677,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "metrics 0.20.1", "once_cell", @@ -5691,7 +5691,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "pin-project", "reth-primitives", @@ -5700,7 +5700,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "igd", "pin-project-lite", @@ -5714,7 +5714,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "aquamarine", "async-trait", @@ -5764,7 +5764,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "async-trait", "reth-eth-wire", @@ -5777,7 +5777,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "futures-util", "reth-interfaces", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arbitrary", "assert_matches", @@ -5846,7 +5846,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "auto_impl", "derive_more", @@ -5867,7 +5867,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -5884,7 +5884,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "once_cell", "reth-consensus-common", @@ -5900,7 +5900,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "boa_engine", "boa_gc", @@ -5916,7 +5916,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "reth-primitives", "revm", @@ -5924,7 +5924,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "arrayvec", "auto_impl", @@ -5943,7 +5943,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -5952,7 +5952,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "async-trait", @@ -6000,7 +6000,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "jsonrpsee", "reth-primitives", @@ -6010,7 +6010,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "async-trait", "futures", @@ -6024,7 +6024,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "hyper", "jsonrpsee", @@ -6055,7 +6055,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "async-trait", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "itertools 0.11.0", "jsonrpsee-types", @@ -6092,7 +6092,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "reth-primitives", "reth-rlp", @@ -6101,7 +6101,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "aquamarine", "assert_matches", @@ -6137,7 +6137,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "dyn-clone", "futures-util", @@ -6150,7 +6150,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "tracing", "tracing-appender", @@ -6160,7 +6160,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "aquamarine", "assert_matches", @@ -6189,7 +6189,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index ad0431f2ac0a..244240b97686 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From a8a2cfa7a324e5beb7915ffb10d9310019e0fe60 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 9 Aug 2023 19:12:48 +0100 Subject: [PATCH 389/722] feat(storage): account for pruned account/storage history (#4092) --- crates/interfaces/src/provider.rs | 2 + .../provider/src/providers/database/mod.rs | 53 +++++-- .../src/providers/state/historical.rs | 150 +++++++++++++++++- 3 files changed, 183 insertions(+), 22 deletions(-) diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index f9ed2a8dc4a1..5359aa6d7218 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -94,4 +94,6 @@ pub enum ProviderError { /// Block hash block_hash: BlockHash, }, + #[error("State at block #{0} is pruned")] + StateAtBlockPruned(BlockNumber), } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 5fbb0fceeaed..506cc59ebab3 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -87,7 +87,7 @@ impl ProviderFactory { } /// Storage provider for state at that given block - pub fn history_by_block_number( + fn state_provider_by_block_number( &self, mut block_number: BlockNumber, ) -> Result> { @@ -102,30 +102,49 @@ impl ProviderFactory { // +1 as the changeset that we want is the one that was applied after this block. block_number += 1; + let account_history_prune_checkpoint = + provider.get_prune_checkpoint(PrunePart::AccountHistory)?; + let storage_history_prune_checkpoint = + provider.get_prune_checkpoint(PrunePart::StorageHistory)?; + + let mut state_provider = HistoricalStateProvider::new(provider.into_tx(), block_number); + + // If we pruned account or storage history, we can't return state on every historical block. + // Instead, we should cap it at the latest prune checkpoint for corresponding prune part. + if let Some(prune_checkpoint) = account_history_prune_checkpoint { + state_provider = state_provider.with_lowest_available_account_history_block_number( + prune_checkpoint.block_number + 1, + ); + } + if let Some(prune_checkpoint) = storage_history_prune_checkpoint { + state_provider = state_provider.with_lowest_available_storage_history_block_number( + prune_checkpoint.block_number + 1, + ); + } + + Ok(Box::new(state_provider)) + } + + /// Storage provider for state at that given block + pub fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> Result> { + let state_provider = self.state_provider_by_block_number(block_number)?; trace!(target: "providers::db", ?block_number, "Returning historical state provider for block number"); - Ok(Box::new(HistoricalStateProvider::new(provider.into_tx(), block_number))) + Ok(state_provider) } /// Storage provider for state at that given block hash pub fn history_by_block_hash(&self, block_hash: BlockHash) -> Result> { - let provider = self.provider()?; - - let mut block_number = provider + let block_number = self + .provider()? .block_number(block_hash)? .ok_or(ProviderError::BlockHashNotFound(block_hash))?; - if block_number == provider.best_block_number().unwrap_or_default() && - block_number == provider.last_block_number().unwrap_or_default() - { - return Ok(Box::new(LatestStateProvider::new(provider.into_tx()))) - } - - // +1 as the changeset that we want is the one that was applied after this block. - // as the changeset contains old values. - block_number += 1; - - trace!(target: "providers::db", ?block_hash, "Returning historical state provider for block hash"); - Ok(Box::new(HistoricalStateProvider::new(provider.into_tx(), block_number))) + let state_provider = self.state_provider_by_block_number(block_number)?; + trace!(target: "providers::db", ?block_number, "Returning historical state provider for block hash"); + Ok(state_provider) } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 458a4c6421ea..b87227da4384 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -29,10 +29,13 @@ pub struct HistoricalStateProviderRef<'a, 'b, TX: DbTx<'a>> { tx: &'b TX, /// Block number is main index for the history state of accounts and storages. block_number: BlockNumber, + /// Lowest blocks at which different parts of the state are available. + lowest_available_blocks: LowestAvailableBlocks, /// Phantom lifetime `'a` _phantom: PhantomData<&'a TX>, } +#[derive(Debug, Eq, PartialEq)] pub enum HistoryInfo { NotYetWritten, InChangeset(u64), @@ -40,13 +43,32 @@ pub enum HistoryInfo { } impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { - /// Create new StateProvider from history transaction number + /// Create new StateProvider for historical block number pub fn new(tx: &'b TX, block_number: BlockNumber) -> Self { - Self { tx, block_number, _phantom: PhantomData {} } + Self { + tx, + block_number, + lowest_available_blocks: Default::default(), + _phantom: PhantomData {}, + } + } + + /// Create new StateProvider for historical block number and lowest block numbers at which + /// account & storage histories are available. + pub fn new_with_lowest_available_blocks( + tx: &'b TX, + block_number: BlockNumber, + lowest_available_blocks: LowestAvailableBlocks, + ) -> Self { + Self { tx, block_number, lowest_available_blocks, _phantom: PhantomData {} } } /// Lookup an account in the AccountHistory table pub fn account_history_lookup(&self, address: Address) -> Result { + if !self.lowest_available_blocks.is_account_history_available(self.block_number) { + return Err(ProviderError::StateAtBlockPruned(self.block_number).into()) + } + // history key to search IntegerList of block number changesets. let history_key = ShardedKey::new(address, self.block_number); self.history_info::(history_key, |key| key.key == address) @@ -58,6 +80,10 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { address: Address, storage_key: StorageKey, ) -> Result { + if !self.lowest_available_blocks.is_storage_history_available(self.block_number) { + return Err(ProviderError::StateAtBlockPruned(self.block_number).into()) + } + // history key to search IntegerList of block number changesets. let history_key = StorageShardedKey::new(address, storage_key, self.block_number); self.history_info::(history_key, |key| { @@ -199,29 +225,85 @@ pub struct HistoricalStateProvider<'a, TX: DbTx<'a>> { tx: TX, /// State at the block number is the main indexer of the state. block_number: BlockNumber, + /// Lowest blocks at which different parts of the state are available. + lowest_available_blocks: LowestAvailableBlocks, /// Phantom lifetime `'a` _phantom: PhantomData<&'a TX>, } impl<'a, TX: DbTx<'a>> HistoricalStateProvider<'a, TX> { - /// Create new StateProvider from history transaction number + /// Create new StateProvider for historical block number pub fn new(tx: TX, block_number: BlockNumber) -> Self { - Self { tx, block_number, _phantom: PhantomData {} } + Self { + tx, + block_number, + lowest_available_blocks: Default::default(), + _phantom: PhantomData {}, + } + } + + /// Set the lowest block number at which the account history is available. + pub fn with_lowest_available_account_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.account_history_block_number = Some(block_number); + self + } + + /// Set the lowest block number at which the storage history is available. + pub fn with_lowest_available_storage_history_block_number( + mut self, + block_number: BlockNumber, + ) -> Self { + self.lowest_available_blocks.storage_history_block_number = Some(block_number); + self } /// Returns a new provider that takes the `TX` as reference #[inline(always)] fn as_ref<'b>(&'b self) -> HistoricalStateProviderRef<'a, 'b, TX> { - HistoricalStateProviderRef::new(&self.tx, self.block_number) + HistoricalStateProviderRef::new_with_lowest_available_blocks( + &self.tx, + self.block_number, + self.lowest_available_blocks, + ) } } // Delegates all provider impls to [HistoricalStateProviderRef] delegate_provider_impls!(HistoricalStateProvider<'a, TX> where [TX: DbTx<'a>]); +/// Lowest blocks at which different parts of the state are available. +/// They may be [Some] if pruning is enabled. +#[derive(Default, Copy, Clone)] +pub struct LowestAvailableBlocks { + /// Lowest block number at which the account history is available. It may not be available if + /// [reth_primitives::PrunePart::AccountHistory] was pruned. + pub account_history_block_number: Option, + /// Lowest block number at which the storage history is available. It may not be available if + /// [reth_primitives::PrunePart::StorageHistory] was pruned. + pub storage_history_block_number: Option, +} + +impl LowestAvailableBlocks { + /// Check if account history is available at the provided block number, i.e. lowest available + /// block number for account history is less than or equal to the provided block number. + pub fn is_account_history_available(&self, at: BlockNumber) -> bool { + self.account_history_block_number.map(|block_number| block_number <= at).unwrap_or(true) + } + + /// Check if storage history is available at the provided block number, i.e. lowest available + /// block number for storage history is less than or equal to the provided block number. + pub fn is_storage_history_available(&self, at: BlockNumber) -> bool { + self.storage_history_block_number.map(|block_number| block_number <= at).unwrap_or(true) + } +} + #[cfg(test)] mod tests { use crate::{ + providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, }; use reth_db::{ @@ -232,6 +314,7 @@ mod tests { transaction::{DbTx, DbTxMut}, BlockNumberList, }; + use reth_interfaces::provider::ProviderError; use reth_primitives::{hex_literal::hex, Account, StorageEntry, H160, H256, U256}; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); @@ -440,4 +523,61 @@ mod tests { Ok(Some(higher_entry_plain.value)) ); } + + #[test] + fn history_provider_unavailable() { + let db = create_test_rw_db(); + let tx = db.tx().unwrap(); + + // provider block_number < lowest available block number, + // i.e. state at provider block is pruned + let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + &tx, + 2, + LowestAvailableBlocks { + account_history_block_number: Some(3), + storage_history_block_number: Some(3), + }, + ); + assert_eq!( + provider.account_history_lookup(ADDRESS), + Err(ProviderError::StateAtBlockPruned(provider.block_number).into()) + ); + assert_eq!( + provider.storage_history_lookup(ADDRESS, STORAGE), + Err(ProviderError::StateAtBlockPruned(provider.block_number).into()) + ); + + // provider block_number == lowest available block number, + // i.e. state at provider block is available + let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + &tx, + 2, + LowestAvailableBlocks { + account_history_block_number: Some(2), + storage_history_block_number: Some(2), + }, + ); + assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::NotYetWritten)); + assert_eq!( + provider.storage_history_lookup(ADDRESS, STORAGE), + Ok(HistoryInfo::NotYetWritten) + ); + + // provider block_number == lowest available block number, + // i.e. state at provider block is available + let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( + &tx, + 2, + LowestAvailableBlocks { + account_history_block_number: Some(1), + storage_history_block_number: Some(1), + }, + ); + assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::NotYetWritten)); + assert_eq!( + provider.storage_history_lookup(ADDRESS, STORAGE), + Ok(HistoryInfo::NotYetWritten) + ); + } } From fd7e28e786394068540c35b960814b39c466ce9c Mon Sep 17 00:00:00 2001 From: prames <134806363+0xprames@users.noreply.github.com> Date: Wed, 9 Aug 2023 11:40:38 -0700 Subject: [PATCH 390/722] feat(txpool) modify txpool guard to be for pipeline syncs only (#4075) Co-authored-by: Matthias Seitz --- crates/interfaces/src/sync.rs | 6 + crates/net/network-api/src/lib.rs | 3 + crates/net/network-api/src/noop.rs | 4 + crates/net/network/src/network.rs | 22 ++- crates/net/network/src/transactions.rs | 190 ++++++++++++++++++++++--- 5 files changed, 202 insertions(+), 23 deletions(-) diff --git a/crates/interfaces/src/sync.rs b/crates/interfaces/src/sync.rs index 8becffcda09d..622df29a3ca4 100644 --- a/crates/interfaces/src/sync.rs +++ b/crates/interfaces/src/sync.rs @@ -7,6 +7,9 @@ use reth_primitives::Head; pub trait SyncStateProvider: Send + Sync { /// Returns `true` if the network is undergoing sync. fn is_syncing(&self) -> bool; + + /// Returns `true` if the network is undergoing an initial (pipeline) sync. + fn is_initially_syncing(&self) -> bool; } /// An updater for updating the [SyncState] and status of the network. @@ -54,6 +57,9 @@ impl SyncStateProvider for NoopSyncStateUpdater { fn is_syncing(&self) -> bool { false } + fn is_initially_syncing(&self) -> bool { + false + } } impl NetworkSyncUpdater for NoopSyncStateUpdater { diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index f880fe745e66..f15d3fec4aa6 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -49,6 +49,9 @@ pub trait NetworkInfo: Send + Sync { /// Returns `true` if the network is undergoing sync. fn is_syncing(&self) -> bool; + + /// Returns `true` when the node is undergoing the very first Pipeline sync. + fn is_initially_syncing(&self) -> bool; } /// Provides general purpose information about Peers in the network. diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 9a5309993f77..dc1ef17a93ca 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -45,6 +45,10 @@ impl NetworkInfo for NoopNetwork { fn is_syncing(&self) -> bool { false } + + fn is_initially_syncing(&self) -> bool { + false + } } impl PeersInfo for NoopNetwork { diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 9a3c8926caf5..1175eea863e8 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -55,6 +55,7 @@ impl NetworkHandle { network_mode, bandwidth_meter, is_syncing: Arc::new(AtomicBool::new(false)), + initial_sync_done: Arc::new(AtomicBool::new(false)), chain_id, }; Self { inner: Arc::new(inner) } @@ -247,18 +248,33 @@ impl NetworkInfo for NetworkHandle { fn is_syncing(&self) -> bool { SyncStateProvider::is_syncing(self) } + + fn is_initially_syncing(&self) -> bool { + SyncStateProvider::is_initially_syncing(self) + } } impl SyncStateProvider for NetworkHandle { fn is_syncing(&self) -> bool { self.inner.is_syncing.load(Ordering::Relaxed) } + // used to guard the txpool + fn is_initially_syncing(&self) -> bool { + if self.inner.initial_sync_done.load(Ordering::Relaxed) { + return false + } + self.inner.is_syncing.load(Ordering::Relaxed) + } } impl NetworkSyncUpdater for NetworkHandle { fn update_sync_state(&self, state: SyncState) { - let is_syncing = state.is_syncing(); - self.inner.is_syncing.store(is_syncing, Ordering::Relaxed) + let future_state = state.is_syncing(); + let prev_state = self.inner.is_syncing.swap(future_state, Ordering::Relaxed); + let syncing_to_idle_state_transition = prev_state && !future_state; + if syncing_to_idle_state_transition { + self.inner.initial_sync_done.store(true, Ordering::Relaxed); + } } /// Update the status of the node. @@ -285,6 +301,8 @@ struct NetworkInner { bandwidth_meter: BandwidthMeter, /// Represents if the network is currently syncing. is_syncing: Arc, + /// Used to differentiate between an initial pipeline sync or a live sync + initial_sync_done: Arc, /// The chain id chain_id: Arc, } diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 9f9dfd0682ff..7345893d1e74 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -7,7 +7,7 @@ use crate::{ metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, NetworkHandle, }; -use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; +use futures::{stream::FuturesUnordered, Future, FutureExt, StreamExt}; use reth_eth_wire::{ EthVersion, GetPooledTransactions, NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, Transactions, @@ -29,7 +29,6 @@ use reth_transaction_pool::{ }; use std::{ collections::{hash_map::Entry, HashMap}, - future::Future, num::NonZeroUsize, pin::Pin, sync::Arc, @@ -206,8 +205,8 @@ where /// transactions to a fraction of peers usually ensures that all nodes receive the transaction /// and won't need to request it. fn on_new_transactions(&mut self, hashes: impl IntoIterator) { - // Nothing to propagate while syncing - if self.network.is_syncing() { + // Nothing to propagate while initially syncing + if self.network.is_initially_syncing() { return } @@ -310,8 +309,8 @@ where peer_id: PeerId, msg: NewPooledTransactionHashes, ) { - // If the node is currently syncing, ignore transactions - if self.network.is_syncing() { + // If the node is initially syncing, ignore transactions + if self.network.is_initially_syncing() { return } @@ -405,7 +404,7 @@ where // Send a `NewPooledTransactionHashes` to the peer with up to // `NEW_POOLED_TRANSACTION_HASHES_SOFT_LIMIT` transactions in the // pool - if !self.network.is_syncing() { + if !self.network.is_initially_syncing() { let peer = self.peers.get_mut(&peer_id).expect("is present; qed"); let mut msg_builder = PooledTransactionsHashesBuilder::new(version); @@ -437,8 +436,8 @@ where transactions: Vec, source: TransactionSource, ) { - // If the node is currently syncing, ignore transactions - if self.network.is_syncing() { + // If the node is pipeline syncing, ignore transactions + if self.network.is_initially_syncing() { return } @@ -595,9 +594,11 @@ where this.on_good_import(hash); } Err(err) => { - // if we're syncing and the transaction is bad we ignore it, otherwise we - // penalize the peer that sent the bad transaction with the assumption that the - // peer should have known that this transaction is bad. (e.g. consensus rules) + // if we're _currently_ syncing and the transaction is bad we ignore it, + // otherwise we penalize the peer that sent the bad + // transaction with the assumption that the peer should have + // known that this transaction is bad. (e.g. consensus + // rules) if err.is_bad_transaction() && !this.network.is_syncing() { trace!(target: "net::tx", ?err, "Bad transaction import"); this.on_bad_import(*err.hash()); @@ -794,12 +795,23 @@ mod tests { use reth_rlp::Decodable; use reth_transaction_pool::test_utils::{testing_pool, MockTransaction}; use secp256k1::SecretKey; + use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] #[cfg_attr(not(feature = "geth-tests"), ignore)] - async fn test_ignored_tx_broadcasts_while_syncing() { + async fn test_ignored_tx_broadcasts_while_initially_syncing() { reth_tracing::init_test_tracing(); + let net = Testnet::create(3).await; + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + + drop(handles); + let handle = net.spawn(); + let listener0 = handle0.event_listener(); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); let secret_key = SecretKey::new(&mut rand::thread_rng()); let client = NoopProvider::default(); @@ -808,7 +820,7 @@ mod tests { .disable_discovery() .listener_port(0) .build(client); - let (handle, network, mut transactions, _) = NetworkManager::new(config) + let (network_handle, network, mut transactions, _) = NetworkManager::new(config) .await .unwrap() .into_builder() @@ -817,17 +829,143 @@ mod tests { tokio::task::spawn(network); - handle.update_sync_state(SyncState::Syncing); - assert!(NetworkInfo::is_syncing(&handle)); - - let peer_id = PeerId::random(); + // go to syncing (pipeline sync) + network_handle.update_sync_state(SyncState::Syncing); + assert!(NetworkInfo::is_syncing(&network_handle)); + assert!(NetworkInfo::is_initially_syncing(&network_handle)); + // wait for all initiator connections + let mut established = listener0.take(2); + while let Some(ev) = established.next().await { + match ev { + NetworkEvent::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + } => { + // to insert a new peer in transactions peerset + transactions.on_network_event(NetworkEvent::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + }) + } + NetworkEvent::PeerAdded(_peer_id) => continue, + ev => { + panic!("unexpected event {ev:?}") + } + } + } + // random tx: + let input = hex::decode("02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76").unwrap(); + let signed_tx = TransactionSigned::decode(&mut &input[..]).unwrap(); transactions.on_network_tx_event(NetworkTransactionEvent::IncomingTransactions { - peer_id, - msg: Transactions(vec![TransactionSigned::default()]), + peer_id: *handle1.peer_id(), + msg: Transactions(vec![signed_tx.clone()]), }); - + poll_fn(|cx| { + let _ = transactions.poll_unpin(cx); + Poll::Ready(()) + }) + .await; assert!(pool.is_empty()); + handle.terminate().await; + } + + #[tokio::test(flavor = "multi_thread")] + #[cfg_attr(not(feature = "geth-tests"), ignore)] + async fn test_tx_broadcasts_through_two_syncs() { + reth_tracing::init_test_tracing(); + let net = Testnet::create(3).await; + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + + drop(handles); + let handle = net.spawn(); + + let listener0 = handle0.event_listener(); + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + let secret_key = SecretKey::new(&mut rand::thread_rng()); + + let client = NoopProvider::default(); + let pool = testing_pool(); + let config = NetworkConfigBuilder::new(secret_key) + .disable_discovery() + .listener_port(0) + .build(client); + let (network_handle, network, mut transactions, _) = NetworkManager::new(config) + .await + .unwrap() + .into_builder() + .transactions(pool.clone()) + .split_with_handle(); + + tokio::task::spawn(network); + + // go to syncing (pipeline sync) to idle and then to syncing (live) + network_handle.update_sync_state(SyncState::Syncing); + assert!(NetworkInfo::is_syncing(&network_handle)); + network_handle.update_sync_state(SyncState::Idle); + assert!(!NetworkInfo::is_syncing(&network_handle)); + network_handle.update_sync_state(SyncState::Syncing); + assert!(NetworkInfo::is_syncing(&network_handle)); + + // wait for all initiator connections + let mut established = listener0.take(2); + while let Some(ev) = established.next().await { + match ev { + NetworkEvent::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + } => { + // to insert a new peer in transactions peerset + transactions.on_network_event(NetworkEvent::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + }) + } + NetworkEvent::PeerAdded(_peer_id) => continue, + ev => { + panic!("unexpected event {ev:?}") + } + } + } + // random tx: + let input = hex::decode("02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76").unwrap(); + let signed_tx = TransactionSigned::decode(&mut &input[..]).unwrap(); + transactions.on_network_tx_event(NetworkTransactionEvent::IncomingTransactions { + peer_id: *handle1.peer_id(), + msg: Transactions(vec![signed_tx.clone()]), + }); + poll_fn(|cx| { + let _ = transactions.poll_unpin(cx); + Poll::Ready(()) + }) + .await; + assert!(!NetworkInfo::is_initially_syncing(&network_handle)); + assert!(NetworkInfo::is_syncing(&network_handle)); + assert!(!pool.is_empty()); + handle.terminate().await; } #[tokio::test(flavor = "multi_thread")] @@ -906,6 +1044,16 @@ mod tests { *handle1.peer_id(), transactions.transactions_by_peers.get(&signed_tx.hash()).unwrap()[0] ); + + // advance the transaction manager future + poll_fn(|cx| { + let _ = transactions.poll_unpin(cx); + Poll::Ready(()) + }) + .await; + + assert!(!pool.is_empty()); + assert!(pool.get(&signed_tx.hash).is_some()); handle.terminate().await; } From e925cbcf3a238c4c67b46e800449a30a7dda8453 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 9 Aug 2023 20:50:27 +0100 Subject: [PATCH 391/722] feat(pruner, metrics): `skip` attribute for metrics derive macro (#4069) --- crates/metrics/metrics-derive/src/expand.rs | 196 ++++++++++++------ .../metrics/metrics-derive/tests/metrics.rs | 20 ++ crates/prune/src/metrics.rs | 14 +- crates/prune/src/pruner.rs | 2 +- 4 files changed, 156 insertions(+), 76 deletions(-) diff --git a/crates/metrics/metrics-derive/src/expand.rs b/crates/metrics/metrics-derive/src/expand.rs index bd2f9fefd991..cc14fb1b6d57 100644 --- a/crates/metrics/metrics-derive/src/expand.rs +++ b/crates/metrics/metrics-derive/src/expand.rs @@ -2,8 +2,8 @@ use once_cell::sync::Lazy; use quote::{quote, ToTokens}; use regex::Regex; use syn::{ - punctuated::Punctuated, Attribute, Data, DeriveInput, Error, Expr, Lit, LitBool, LitStr, - MetaNameValue, Result, Token, + punctuated::Punctuated, Attribute, Data, DeriveInput, Error, Expr, Field, Lit, LitBool, LitStr, + Meta, MetaNameValue, Result, Token, }; use crate::{metric::Metric, with_attrs::WithAttrs}; @@ -17,6 +17,19 @@ static METRIC_NAME_RE: Lazy = /// Supported metrics separators const SUPPORTED_SEPARATORS: &[&str] = &[".", "_", ":"]; +enum MetricField<'a> { + Included(Metric<'a>), + Skipped(&'a Field), +} + +impl<'a> MetricField<'a> { + fn field(&self) -> &'a Field { + match self { + MetricField::Included(Metric { field, .. }) | MetricField::Skipped(field) => field, + } + } +} + pub(crate) fn derive(node: &DeriveInput) -> Result { let ty = &node.ident; let vis = &node.vis; @@ -36,30 +49,49 @@ pub(crate) fn derive(node: &DeriveInput) -> Result { let (defaults, labeled_defaults, describes): (Vec<_>, Vec<_>, Vec<_>) = metric_fields .iter() .map(|metric| { - let field_name = &metric.field.ident; - let metric_name = - format!("{}{}{}", scope.value(), metrics_attr.separator(), metric.name()); - let registrar = metric.register_stmt()?; - let describe = metric.describe_stmt()?; - let description = &metric.description; - Ok(( - quote! { - #field_name: #registrar(#metric_name), - }, - quote! { - #field_name: #registrar(#metric_name, labels.clone()), - }, - quote! { - #describe(#metric_name, #description); - }, - )) + let field_name = &metric.field().ident; + match metric { + MetricField::Included(metric) => { + let metric_name = format!( + "{}{}{}", + scope.value(), + metrics_attr.separator(), + metric.name() + ); + let registrar = metric.register_stmt()?; + let describe = metric.describe_stmt()?; + let description = &metric.description; + Ok(( + quote! { + #field_name: #registrar(#metric_name), + }, + quote! { + #field_name: #registrar(#metric_name, labels.clone()), + }, + Some(quote! { + #describe(#metric_name, #description); + }), + )) + } + MetricField::Skipped(_) => Ok(( + quote! { + #field_name: Default::default(), + }, + quote! { + #field_name: Default::default(), + }, + None, + )), + } }) .collect::>>()? .into_iter() .fold((vec![], vec![], vec![]), |mut acc, x| { acc.0.push(x.0); acc.1.push(x.1); - acc.2.push(x.2); + if let Some(describe) = x.2 { + acc.2.push(describe); + } acc }); @@ -93,35 +125,50 @@ pub(crate) fn derive(node: &DeriveInput) -> Result { let (defaults, labeled_defaults, describes): (Vec<_>, Vec<_>, Vec<_>) = metric_fields .iter() .map(|metric| { - let name = metric.name(); - let separator = metrics_attr.separator(); - let metric_name = quote! { - format!("{}{}{}", scope, #separator, #name) - }; - let field_name = &metric.field.ident; - - let registrar = metric.register_stmt()?; - let describe = metric.describe_stmt()?; - let description = &metric.description; - - Ok(( - quote! { - #field_name: #registrar(#metric_name), - }, - quote! { - #field_name: #registrar(#metric_name, labels.clone()), - }, - quote! { - #describe(#metric_name, #description); - }, - )) + let field_name = &metric.field().ident; + match metric { + MetricField::Included(metric) => { + let name = metric.name(); + let separator = metrics_attr.separator(); + let metric_name = quote! { + format!("{}{}{}", scope, #separator, #name) + }; + + let registrar = metric.register_stmt()?; + let describe = metric.describe_stmt()?; + let description = &metric.description; + + Ok(( + quote! { + #field_name: #registrar(#metric_name), + }, + quote! { + #field_name: #registrar(#metric_name, labels.clone()), + }, + Some(quote! { + #describe(#metric_name, #description); + }), + )) + } + MetricField::Skipped(_) => Ok(( + quote! { + #field_name: Default::default(), + }, + quote! { + #field_name: Default::default(), + }, + None, + )), + } }) .collect::>>()? .into_iter() .fold((vec![], vec![], vec![]), |mut acc, x| { acc.0.push(x.0); acc.1.push(x.1); - acc.2.push(x.2); + if let Some(describe) = x.2 { + acc.2.push(describe); + } acc }); @@ -246,40 +293,57 @@ fn parse_metrics_attr(node: &DeriveInput) -> Result { Ok(MetricsAttr { scope, separator }) } -fn parse_metric_fields(node: &DeriveInput) -> Result>> { +fn parse_metric_fields(node: &DeriveInput) -> Result>> { let Data::Struct(ref data) = node.data else { return Err(Error::new_spanned(node, "Only structs are supported.")) }; let mut metrics = Vec::with_capacity(data.fields.len()); for field in data.fields.iter() { - let (mut describe, mut rename) = (None, None); + let (mut describe, mut rename, mut skip) = (None, None, false); if let Some(metric_attr) = parse_single_attr(field, "metric")? { - let parsed = metric_attr - .parse_args_with(Punctuated::::parse_terminated)?; - for kv in parsed { - let lit = match kv.value { - Expr::Lit(ref expr) => &expr.lit, - _ => continue, - }; - if kv.path.is_ident("describe") { - if describe.is_some() { - return Err(Error::new_spanned(kv, "Duplicate `describe` value provided.")) - } - describe = Some(parse_str_lit(lit)?); - } else if kv.path.is_ident("rename") { - if rename.is_some() { - return Err(Error::new_spanned(kv, "Duplicate `rename` value provided.")) + let parsed = + metric_attr.parse_args_with(Punctuated::::parse_terminated)?; + for meta in parsed { + match meta { + Meta::Path(path) if path.is_ident("skip") => skip = true, + Meta::NameValue(kv) => { + let lit = match kv.value { + Expr::Lit(ref expr) => &expr.lit, + _ => continue, + }; + if kv.path.is_ident("describe") { + if describe.is_some() { + return Err(Error::new_spanned( + kv, + "Duplicate `describe` value provided.", + )) + } + describe = Some(parse_str_lit(lit)?); + } else if kv.path.is_ident("rename") { + if rename.is_some() { + return Err(Error::new_spanned( + kv, + "Duplicate `rename` value provided.", + )) + } + let rename_lit = parse_str_lit(lit)?; + validate_metric_name(&rename_lit)?; + rename = Some(rename_lit) + } else { + return Err(Error::new_spanned(kv, "Unsupported attribute entry.")) + } } - let rename_lit = parse_str_lit(lit)?; - validate_metric_name(&rename_lit)?; - rename = Some(rename_lit) - } else { - return Err(Error::new_spanned(kv, "Unsupported attribute entry.")) + _ => return Err(Error::new_spanned(meta, "Unsupported attribute entry.")), } } } + if skip { + metrics.push(MetricField::Skipped(field)); + continue + } + let description = match describe { Some(lit_str) => lit_str.value(), // Parse docs only if `describe` attribute was not provided @@ -294,7 +358,7 @@ fn parse_metric_fields(node: &DeriveInput) -> Result>> { }, }; - metrics.push(Metric::new(field, description, rename)); + metrics.push(MetricField::Included(Metric::new(field, description, rename))); } Ok(metrics) diff --git a/crates/metrics/metrics-derive/tests/metrics.rs b/crates/metrics/metrics-derive/tests/metrics.rs index cca5ef83ca56..620dc30200ea 100644 --- a/crates/metrics/metrics-derive/tests/metrics.rs +++ b/crates/metrics/metrics-derive/tests/metrics.rs @@ -10,32 +10,52 @@ use std::{collections::HashMap, sync::Mutex}; #[derive(Metrics)] #[metrics(scope = "metrics_custom")] struct CustomMetrics { + #[metric(skip)] + skipped_field_a: u8, /// A gauge with doc comment description. gauge: Gauge, #[metric(rename = "second_gauge", describe = "A gauge with metric attribute description.")] gauge2: Gauge, + #[metric(skip)] + skipped_field_b: u16, /// Some doc comment #[metric(describe = "Metric attribute description will be preferred over doc comment.")] counter: Counter, + #[metric(skip)] + skipped_field_c: u32, + #[metric(skip)] + skipped_field_d: u64, /// A renamed histogram. #[metric(rename = "histogram")] histo: Histogram, + #[metric(skip)] + skipped_field_e: u128, } #[allow(dead_code)] #[derive(Metrics)] #[metrics(dynamic = true)] struct DynamicScopeMetrics { + #[metric(skip)] + skipped_field_a: u8, /// A gauge with doc comment description. gauge: Gauge, #[metric(rename = "second_gauge", describe = "A gauge with metric attribute description.")] gauge2: Gauge, + #[metric(skip)] + skipped_field_b: u16, /// Some doc comment #[metric(describe = "Metric attribute description will be preferred over doc comment.")] counter: Counter, + #[metric(skip)] + skipped_field_c: u32, + #[metric(skip)] + skipped_field_d: u64, /// A renamed histogram. #[metric(rename = "histogram")] histo: Histogram, + #[metric(skip)] + skipped_field_e: u128, } static RECORDER: Lazy = Lazy::new(TestRecorder::new); diff --git a/crates/prune/src/metrics.rs b/crates/prune/src/metrics.rs index 8c3e768f1435..4328bd36d7d0 100644 --- a/crates/prune/src/metrics.rs +++ b/crates/prune/src/metrics.rs @@ -2,9 +2,12 @@ use reth_metrics::{metrics, metrics::Histogram, Metrics}; use reth_primitives::PrunePart; use std::collections::HashMap; -#[derive(Debug, Default)] +#[derive(Metrics)] +#[metrics(scope = "pruner")] pub(crate) struct Metrics { - pub(crate) pruner: PrunerMetrics, + /// Pruning duration + pub(crate) duration_seconds: Histogram, + #[metric(skip)] prune_parts: HashMap, } @@ -21,13 +24,6 @@ impl Metrics { } } -#[derive(Metrics)] -#[metrics(scope = "pruner")] -pub(crate) struct PrunerMetrics { - /// Pruning duration - pub(crate) duration_seconds: Histogram, -} - #[derive(Metrics)] #[metrics(scope = "pruner.parts")] pub(crate) struct PrunerPartMetrics { diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 913aede6780f..ef5ecd39d162 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -150,7 +150,7 @@ impl Pruner { self.last_pruned_block_number = Some(tip_block_number); let elapsed = start.elapsed(); - self.metrics.pruner.duration_seconds.record(elapsed); + self.metrics.duration_seconds.record(elapsed); trace!( target: "pruner", From 7426a01a93485e44f3d5583de94c6b08c65fc5d8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 9 Aug 2023 22:04:59 +0100 Subject: [PATCH 392/722] fix(storage): fallback lookups for pruned history (#4121) --- crates/prune/src/pruner.rs | 13 ++- .../src/providers/state/historical.rs | 79 +++++++++++++------ 2 files changed, 66 insertions(+), 26 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index ef5ecd39d162..dda0304d332b 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -490,6 +490,7 @@ impl Pruner { { let mut processed = 0; let mut cursor = provider.tx_ref().cursor_write::()?; + // Prune history table: // 1. If the shard has `highest_block_number` less than or equal to the target block number // for pruning, delete the shard completely. @@ -525,20 +526,24 @@ impl Pruner { // If there are no more blocks in this shard, we need to remove it, as empty // shards are not allowed. if key.as_ref().highest_block_number == u64::MAX { - // If current shard is the last shard for this sharded key, replace it - // with the previous shard. if let Some(prev_value) = cursor .prev()? .filter(|(prev_key, _)| key_matches(prev_key, &key)) .map(|(_, prev_value)| prev_value) { + // If current shard is the last shard for the sharded key that has + // previous shards, replace it with the previous shard. cursor.delete_current()?; // Upsert will replace the last shard for this sharded key with the - // previous value + // previous value. cursor.upsert(key.clone(), prev_value)?; } else { // If there's no previous shard for this sharded key, // just delete last shard completely. + + // Jump back to the original last shard. + cursor.next()?; + // Delete shard. cursor.delete_current()?; } } else { @@ -551,7 +556,7 @@ impl Pruner { } } - // Jump to the next address + // Jump to the next address. cursor.seek_exact(last_key(&key))?; } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index b87227da4384..01c9ea8b61fb 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -16,7 +16,10 @@ use reth_primitives::{ }; use std::marker::PhantomData; -/// State provider for a given transition id which takes a tx reference. +/// State provider for a given block number which takes a tx reference. +/// +/// Historical state provider accesses the state at the start of the provided block number. +/// It means that all changes made in the provided block number are not included. /// /// Historical state provider reads the following tables: /// - [tables::AccountHistory] @@ -40,6 +43,7 @@ pub enum HistoryInfo { NotYetWritten, InChangeset(u64), InPlainState, + MaybeInPlainState, } impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { @@ -71,7 +75,11 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { // history key to search IntegerList of block number changesets. let history_key = ShardedKey::new(address, self.block_number); - self.history_info::(history_key, |key| key.key == address) + self.history_info::( + history_key, + |key| key.key == address, + self.lowest_available_blocks.account_history_block_number, + ) } /// Lookup a storage key in the StorageHistory table @@ -86,12 +94,19 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { // history key to search IntegerList of block number changesets. let history_key = StorageShardedKey::new(address, storage_key, self.block_number); - self.history_info::(history_key, |key| { - key.address == address && key.sharded_key.key == storage_key - }) + self.history_info::( + history_key, + |key| key.address == address && key.sharded_key.key == storage_key, + self.lowest_available_blocks.storage_history_block_number, + ) } - fn history_info(&self, key: K, key_filter: impl Fn(&K) -> bool) -> Result + fn history_info( + &self, + key: K, + key_filter: impl Fn(&K) -> bool, + lowest_available_block_number: Option, + ) -> Result where T: Table, { @@ -106,17 +121,25 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { // Get the rank of the first entry after our block. let rank = chunk.rank(self.block_number as usize); - // If our block is before the first entry in the index chunk, it might be before - // the first write ever. To check, we look at the previous entry and check if the - // key is the same. + // If our block is before the first entry in the index chunk and this first entry + // doesn't equal to our block, it might be before the first write ever. To check, we + // look at the previous entry and check if the key is the same. // This check is worth it, the `cursor.prev()` check is rarely triggered (the if will // short-circuit) and when it passes we save a full seek into the changeset/plain state // table. - if rank == 0 && !cursor.prev()?.is_some_and(|(key, _)| key_filter(&key)) { - // The key is written to, but only after our block. - return Ok(HistoryInfo::NotYetWritten) - } - if rank < chunk.len() { + if rank == 0 && + chunk.select(rank) as u64 != self.block_number && + !cursor.prev()?.is_some_and(|(key, _)| key_filter(&key)) + { + if lowest_available_block_number.is_some() { + // The key may have been written, but due to pruning we may not have changesets + // and history, so we need to make a changeset lookup. + Ok(HistoryInfo::InChangeset(chunk.select(rank) as u64)) + } else { + // The key is written to, but only after our block. + Ok(HistoryInfo::NotYetWritten) + } + } else if rank < chunk.len() { // The chunk contains an entry for a write after our block, return it. Ok(HistoryInfo::InChangeset(chunk.select(rank) as u64)) } else { @@ -124,6 +147,10 @@ impl<'a, 'b, TX: DbTx<'a>> HistoricalStateProviderRef<'a, 'b, TX> { // happen if this is the last chunk and so we need to look in the plain state. Ok(HistoryInfo::InPlainState) } + } else if lowest_available_block_number.is_some() { + // The key may have been written, but due to pruning we may not have changesets and + // history, so we need to make a plain state lookup. + Ok(HistoryInfo::MaybeInPlainState) } else { // The key has not been written to at all. Ok(HistoryInfo::NotYetWritten) @@ -146,7 +173,9 @@ impl<'a, 'b, TX: DbTx<'a>> AccountReader for HistoricalStateProviderRef<'a, 'b, address, })? .info), - HistoryInfo::InPlainState => Ok(self.tx.get::(address)?), + HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { + Ok(self.tx.get::(address)?) + } } } } @@ -194,7 +223,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for HistoricalStateProviderRef<'a, 'b, })? .value, )), - HistoryInfo::InPlainState => Ok(self + HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self .tx .cursor_dup_read::()? .seek_by_key_subkey(address, storage_key)? @@ -219,7 +248,8 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for HistoricalStateProviderRef<'a, 'b, } } -/// State provider for a given transition +/// State provider for a given block number. +/// For more detailed description, see [HistoricalStateProviderRef]. pub struct HistoricalStateProvider<'a, TX: DbTx<'a>> { /// Database transaction tx: TX, @@ -280,9 +310,11 @@ delegate_provider_impls!(HistoricalStateProvider<'a, TX> where [TX: DbTx<'a>]); pub struct LowestAvailableBlocks { /// Lowest block number at which the account history is available. It may not be available if /// [reth_primitives::PrunePart::AccountHistory] was pruned. + /// [Option::None] means all history is available. pub account_history_block_number: Option, /// Lowest block number at which the storage history is available. It may not be available if /// [reth_primitives::PrunePart::StorageHistory] was pruned. + /// [Option::None] means all history is available. pub storage_history_block_number: Option, } @@ -489,7 +521,10 @@ mod tests { // run assert_eq!(HistoricalStateProviderRef::new(&tx, 0).storage(ADDRESS, STORAGE), Ok(None)); - assert_eq!(HistoricalStateProviderRef::new(&tx, 3).storage(ADDRESS, STORAGE), Ok(None)); + assert_eq!( + HistoricalStateProviderRef::new(&tx, 3).storage(ADDRESS, STORAGE), + Ok(Some(U256::ZERO)) + ); assert_eq!( HistoricalStateProviderRef::new(&tx, 4).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) @@ -558,10 +593,10 @@ mod tests { storage_history_block_number: Some(2), }, ); - assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::NotYetWritten)); + assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( provider.storage_history_lookup(ADDRESS, STORAGE), - Ok(HistoryInfo::NotYetWritten) + Ok(HistoryInfo::MaybeInPlainState) ); // provider block_number == lowest available block number, @@ -574,10 +609,10 @@ mod tests { storage_history_block_number: Some(1), }, ); - assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::NotYetWritten)); + assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( provider.storage_history_lookup(ADDRESS, STORAGE), - Ok(HistoryInfo::NotYetWritten) + Ok(HistoryInfo::MaybeInPlainState) ); } } From 24def9547511d4b0d6cbe846ecd5e51da924eec9 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 9 Aug 2023 17:16:13 -0400 Subject: [PATCH 393/722] chore: remove cargo deny rule for c-kzg (#4133) --- Cargo.lock | 2 +- deny.toml | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0640e9cebff..cf825eaba2ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -933,7 +933,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#13cec820c08f45318f82ed4e0da0300042758b92" +source = "git+https://github.com/ethereum/c-kzg-4844#6353f689e5d2802bbaf221253b3acafe4228331c" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "cc", diff --git a/deny.toml b/deny.toml index 3c59af057ea3..c76bbe889cc0 100644 --- a/deny.toml +++ b/deny.toml @@ -84,13 +84,6 @@ name = "rustls-webpki" expression = "LicenseRef-rustls-webpki" license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] -[[licenses.clarify]] -name = "c-kzg" -expression = "Apache-2.0" -# The crate is in `bindings/rust` so we have to go up two directories for the -# license -license-files = [{ path = "../../LICENSE", hash = 0x13cec820 }] - # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html From 4de1ff069124240c6d4b0c7d3d5d5556b74a1eae Mon Sep 17 00:00:00 2001 From: "refcell.eth" Date: Wed, 9 Aug 2023 17:46:29 -0400 Subject: [PATCH 394/722] fix(Makefile): Graceful cargo-nextest install (#4009) Co-authored-by: refcell --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 68b8fd3ba6fd..69d34910da2c 100644 --- a/Makefile +++ b/Makefile @@ -110,6 +110,7 @@ COV_FILE := lcov.info .PHONY: test-unit test-unit: ## Run unit tests. + cargo install cargo-nextest --locked cargo nextest run $(UNIT_TEST_ARGS) .PHONY: cov-unit From 7f540abde9b8a7e56ea16be37e9aa46b5ae52242 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:32:44 +0200 Subject: [PATCH 395/722] feat: add rpc server metrics into dashboard (#4078) --- etc/grafana/dashboards/overview.json | 237 +++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 58d3c9d8dc2d..39f6f07592bc 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -5065,6 +5065,243 @@ ], "title": "Pruner duration, per part", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 204 + }, + "id": 97, + "panels": [], + "title": "RPC server", + "type": "row" + }, + { + "title": "Active Requests", + "description": "The number of active requests.", + "type": "graph", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "targets": [ + { + "expr": "reth_rpc_server_requests_started - reth_rpc_server_requests_finished", + "format": "time_series", + "legendFormat": "Active Requests", + "refId": "A" + } + ], + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 205 + } + }, + { + "title": "Active Websocket Connections", + "description": "The number of active websocket connections.", + "type": "graph", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "targets": [ + { + "expr": "reth_rpc_server_ws_session_opened - reth_rpc_server_ws_session_closed", + "format": "time_series", + "legendFormat": "Active Websocket Connections", + "refId": "A" + } + ], + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 205 + } + }, + { + "title": "Request Latency time", + "type": "heatmap", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 213 + }, + "id": 42, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(reth_rpc_server_request_latency{instance=~\"$instance\"}[$__interval])) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ] + }, + { + "title": "Call Latency time", + "type": "heatmap", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 213 + }, + "id": 42, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(increase(reth_rpc_server_call_latency{instance=~\"$instance\"}[$__interval])) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ] } ], "refresh": "30s", From 072c84083c58a95b5b9245136017379501a9ceb2 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 10 Aug 2023 13:06:30 +0200 Subject: [PATCH 396/722] chore: improve ef-tests readability (#4136) --- testing/ef-tests/src/assert.rs | 10 +- testing/ef-tests/src/cases/blockchain_test.rs | 126 ++++++++---------- testing/ef-tests/src/models.rs | 2 +- testing/ef-tests/src/result.rs | 11 +- 4 files changed, 70 insertions(+), 79 deletions(-) diff --git a/testing/ef-tests/src/assert.rs b/testing/ef-tests/src/assert.rs index 8db027f6cbe1..9f1f83e001ae 100644 --- a/testing/ef-tests/src/assert.rs +++ b/testing/ef-tests/src/assert.rs @@ -6,11 +6,11 @@ use std::fmt::Debug; /// A helper like `assert_eq!` that instead returns `Err(Error::Assertion)` on failure. pub fn assert_equal(left: T, right: T, msg: &str) -> Result<(), Error> where - T: Eq + Debug, + T: PartialEq + Debug, { - if left != right { - return Err(Error::Assertion(format!("{msg}. Left {:?}, right {:?}", left, right))) + if left == right { + Ok(()) + } else { + Err(Error::Assertion(format!("{msg}\n left `{left:?}`,\n right `{right:?}`"))) } - - Ok(()) } diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index e2f7fc8ac65d..7cd8636465dd 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -9,7 +9,7 @@ use reth_primitives::{BlockBody, SealedBlock}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rlp::Decodable; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; -use std::{collections::BTreeMap, ffi::OsStr, fs, path::Path, sync::Arc}; +use std::{collections::BTreeMap, fs, path::Path, sync::Arc}; /// A handler for the blockchain test suite. #[derive(Debug)] @@ -42,14 +42,12 @@ pub struct BlockchainTestCase { impl Case for BlockchainTestCase { fn load(path: &Path) -> Result { Ok(BlockchainTestCase { - tests: fs::read_to_string(path) - .map_err(|e| Error::Io { path: path.into(), error: e.to_string() }) - .and_then(|s| { - serde_json::from_str(&s).map_err(|e| Error::CouldNotDeserialize { - path: path.into(), - error: e.to_string(), - }) - })?, + tests: { + let s = fs::read_to_string(path) + .map_err(|error| Error::Io { path: path.into(), error })?; + serde_json::from_str(&s) + .map_err(|error| Error::CouldNotDeserialize { path: path.into(), error })? + }, skip: should_skip(path), }) } @@ -130,66 +128,58 @@ impl Case for BlockchainTestCase { } } -/// Tests are test edge cases that are not possible to happen on mainnet, so we are skipping them. +/// Returns whether the test at the given path should be skipped. +/// +/// Some tests are edge cases that cannot happen on mainnet, while others are skipped for +/// convenience (e.g. they take a long time to run) or are temporarily disabled. +/// +/// The reason should be documented in a comment above the file name(s). pub fn should_skip(path: &Path) -> bool { - // funky test with `bigint 0x00` value in json :) not possible to happen on mainnet and require - // custom json parser. https://github.com/ethereum/tests/issues/971 - if path.file_name() == Some(OsStr::new("ValueOverflow.json")) { - return true - } - // txbyte is of type 02 and we dont parse tx bytes for this test to fail. - if path.file_name() == Some(OsStr::new("typeTwoBerlin.json")) { - return true - } - // Test checks if nonce overflows. We are handling this correctly but we are not parsing - // exception in testsuite There are more nonce overflow tests that are in internal - // call/create, and those tests are passing and are enabled. - if path.file_name() == Some(OsStr::new("CreateTransactionHighNonce.json")) { - return true - } - - // Test check if gas price overflows, we handle this correctly but does not match tests specific - // exception. - if path.file_name() == Some(OsStr::new("HighGasPrice.json")) { - return true - } - - // Skip test where basefee/accesslist/difficulty is present but it shouldn't be supported in - // London/Berlin/TheMerge. https://github.com/ethereum/tests/blob/5b7e1ab3ffaf026d99d20b17bb30f533a2c80c8b/GeneralStateTests/stExample/eip1559.json#L130 - // It is expected to not execute these tests. - if path.file_name() == Some(OsStr::new("accessListExample.json")) || - path.file_name() == Some(OsStr::new("basefeeExample.json")) || - path.file_name() == Some(OsStr::new("eip1559.json")) || - path.file_name() == Some(OsStr::new("mergeTest.json")) - { - return true - } - - // These tests are passing, but they take a lot of time to execute so we are going to skip them. - if path.file_name() == Some(OsStr::new("loopExp.json")) || - path.file_name() == Some(OsStr::new("Call50000_sha256.json")) || - path.file_name() == Some(OsStr::new("static_Call50000_sha256.json")) || - path.file_name() == Some(OsStr::new("loopMul.json")) || - path.file_name() == Some(OsStr::new("CALLBlake2f_MaxRounds.json")) || - path.file_name() == Some(OsStr::new("shiftCombinations.json")) - { - return true - } - + let path_str = path.to_str().expect("Path is not valid UTF-8"); + let name = path.file_name().unwrap().to_str().unwrap(); + matches!( + name, + // funky test with `bigint 0x00` value in json :) not possible to happen on mainnet and require + // custom json parser. https://github.com/ethereum/tests/issues/971 + | "ValueOverflow.json" + + // txbyte is of type 02 and we dont parse tx bytes for this test to fail. + | "typeTwoBerlin.json" + + // Test checks if nonce overflows. We are handling this correctly but we are not parsing + // exception in testsuite There are more nonce overflow tests that are in internal + // call/create, and those tests are passing and are enabled. + | "CreateTransactionHighNonce.json" + + // Test check if gas price overflows, we handle this correctly but does not match tests specific + // exception. + | "HighGasPrice.json" + + // Skip test where basefee/accesslist/difficulty is present but it shouldn't be supported in + // London/Berlin/TheMerge. https://github.com/ethereum/tests/blob/5b7e1ab3ffaf026d99d20b17bb30f533a2c80c8b/GeneralStateTests/stExample/eip1559.json#L130 + // It is expected to not execute these tests. + | "accessListExample.json" + | "basefeeExample.json" + | "eip1559.json" + | "mergeTest.json" + + // These tests are passing, but they take a lot of time to execute so we are going to skip them. + | "loopExp.json" + | "Call50000_sha256.json" + | "static_Call50000_sha256.json" + | "loopMul.json" + | "CALLBlake2f_MaxRounds.json" + | "shiftCombinations.json" + + // TODO: re-enable when blobtx are supported + | "blobtxExample.json" + ) // Ignore outdated EOF tests that haven't been updated for Cancun yet. - let eof_path = Path::new("EIPTests").join("stEOF"); - if path.to_string_lossy().contains(&*eof_path.to_string_lossy()) { - return true - } - - if path.file_name() == Some(OsStr::new("ValueOverflow.json")) { - return true - } - - // TODO: re-enable when blobtx are supported - if path.file_name() == Some(OsStr::new("blobtxExample.json")) { - return true - } + || path_contains(path_str, &["EIPTests", "stEOF"]) +} - false +/// `str::contains` but for a path. Takes into account the OS path separator (`/` or `\`). +fn path_contains(path_str: &str, rhs: &[&str]) -> bool { + let rhs = rhs.join(std::path::MAIN_SEPARATOR_STR); + path_str.contains(&rhs) } diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 44e9c9b3a944..7276174b4099 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -216,7 +216,7 @@ impl Account { Tx: DbTx<'a>, { let account = tx.get::(address)?.ok_or_else(|| { - Error::Assertion(format!("Account is missing ({address}) expected: {:?}", self)) + Error::Assertion(format!("Expected account ({address:?}) is missing from DB: {self:?}")) })?; assert_equal(self.balance.into(), account.balance, "Balance does not match")?; diff --git a/testing/ef-tests/src/result.rs b/testing/ef-tests/src/result.rs index 5aaf58a9e8bc..0aabf560a839 100644 --- a/testing/ef-tests/src/result.rs +++ b/testing/ef-tests/src/result.rs @@ -10,7 +10,7 @@ use thiserror::Error; /// # Note /// /// `Error::Skipped` should not be treated as a test failure. -#[derive(Error, Debug, Clone)] +#[derive(Debug, Error)] #[non_exhaustive] pub enum Error { /// The test was skipped @@ -22,7 +22,8 @@ pub enum Error { /// The path to the file or directory path: PathBuf, /// The specific error - error: String, + #[source] + error: std::io::Error, }, /// A deserialization error occurred #[error("An error occurred deserializing the test at {path}: {error}")] @@ -30,7 +31,8 @@ pub enum Error { /// The path to the file we wanted to deserialize path: PathBuf, /// The specific error - error: String, + #[source] + error: serde_json::Error, }, /// A database error occurred. #[error(transparent)] @@ -116,8 +118,7 @@ pub(crate) fn print_results( } for case in failed { - let error = case.result.clone().unwrap_err(); - + let error = case.result.as_ref().unwrap_err(); println!("[!] Case {} failed (description: {}): {}", case.path.display(), case.desc, error); } } From 628495ae8fadeea640b8a1c5c78ccb065b9213c3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 14:15:35 +0200 Subject: [PATCH 397/722] chore: make unit type default (#4141) --- bin/reth/src/cli/ext.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 16 +++++----------- .../additional-rpc-namespace-in-cli/src/main.rs | 2 +- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index 0eaa6a95dcbe..03350e624287 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -41,7 +41,7 @@ pub trait RethNodeCommandExt: fmt::Debug + clap::Args { &mut self, _config: &Conf, _registry: &mut RethModuleRegistry, - _modules: &mut TransportRpcModules<()>, + _modules: &mut TransportRpcModules, ) -> eyre::Result<()> where Conf: RethRpcConfig, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 6b2f7461caae..32d1665739f5 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -344,7 +344,7 @@ where module_config: TransportRpcModuleConfig, engine: EngineApi, ) -> ( - TransportRpcModules<()>, + TransportRpcModules, AuthRpcModule, RethModuleRegistry, ) @@ -1232,10 +1232,7 @@ impl RpcServerConfig { } /// Convenience function to do [RpcServerConfig::build] and [RpcServer::start] in one step - pub async fn start( - self, - modules: TransportRpcModules<()>, - ) -> Result { + pub async fn start(self, modules: TransportRpcModules) -> Result { self.build().await?.start(modules).await } @@ -1456,7 +1453,7 @@ impl TransportRpcModuleConfig { /// Holds installed modules per transport type. #[derive(Debug, Default)] -pub struct TransportRpcModules { +pub struct TransportRpcModules { /// The original config config: TransportRpcModuleConfig, /// rpcs module for http @@ -1469,7 +1466,7 @@ pub struct TransportRpcModules { // === impl TransportRpcModules === -impl TransportRpcModules<()> { +impl TransportRpcModules { /// Returns the [TransportRpcModuleConfig] used to configure this instance. pub fn module_config(&self) -> &TransportRpcModuleConfig { &self.config @@ -1694,10 +1691,7 @@ impl RpcServer { /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is /// stopped or the [RpcServerHandle] is dropped. #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint().map(|ipc|ipc.path())), target = "rpc", level = "TRACE")] - pub async fn start( - self, - modules: TransportRpcModules<()>, - ) -> Result { + pub async fn start(self, modules: TransportRpcModules) -> Result { trace!(target: "rpc", "staring RPC server"); let Self { ws_http, ipc: ipc_server } = self; let TransportRpcModules { config, http, ws, ipc } = modules; diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/additional-rpc-namespace-in-cli/src/main.rs index 7eeadcb5c044..817e6ddae629 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/additional-rpc-namespace-in-cli/src/main.rs @@ -55,7 +55,7 @@ impl RethNodeCommandExt for RethCliTxpoolExt { &mut self, _config: &Conf, registry: &mut RethModuleRegistry, - modules: &mut TransportRpcModules<()>, + modules: &mut TransportRpcModules, ) -> eyre::Result<()> where Conf: RethRpcConfig, From a808f7c22de79bbcf469e9115f1152c2cd84e26c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 14:57:39 +0200 Subject: [PATCH 398/722] fix: omit output if empty (#4138) --- crates/revm/revm-inspectors/src/tracing/types.rs | 2 +- crates/rpc/rpc-types/src/eth/trace/geth/call.rs | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 51fb1d21cb46..83a4586a4f3e 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -438,7 +438,7 @@ impl CallTraceNode { gas: U256::from(self.trace.gas_limit), gas_used: U256::from(self.trace.gas_used), input: self.trace.data.clone().into(), - output: Some(self.trace.output.clone().into()), + output: (!self.trace.output.is_empty()).then(|| self.trace.output.clone().into()), error: None, revert_reason: None, calls: Default::default(), diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs index d131f0f6a328..c0391b0c9aac 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs @@ -1,6 +1,8 @@ use reth_primitives::{serde_helper::num::from_int_or_hex, Address, Bytes, H256, U256}; use serde::{Deserialize, Serialize}; +/// The response object for `debug_traceTransaction` with `"tracer": "callTracer"` +/// /// #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct CallFrame { From 499ca3d15378d2aff13d299ff4967fcab7b009a3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 14:57:52 +0200 Subject: [PATCH 399/722] chore: enable blobtest again (#4139) --- testing/ef-tests/src/cases/blockchain_test.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 7cd8636465dd..d63124581b6b 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -170,9 +170,6 @@ pub fn should_skip(path: &Path) -> bool { | "loopMul.json" | "CALLBlake2f_MaxRounds.json" | "shiftCombinations.json" - - // TODO: re-enable when blobtx are supported - | "blobtxExample.json" ) // Ignore outdated EOF tests that haven't been updated for Cancun yet. || path_contains(path_str, &["EIPTests", "stEOF"]) From 9817a9e72420f0e7119e3a972fb2ee52da8baf3c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 10 Aug 2023 16:04:48 +0300 Subject: [PATCH 400/722] chore(cli): expose registry provider (#4144) --- crates/rpc/rpc-builder/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 32d1665739f5..581c4cbcc563 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -731,7 +731,7 @@ impl } } - /// Returns a reference to the provider + /// Returns a reference to the pool pub fn pool(&self) -> &Pool { &self.pool } @@ -746,6 +746,11 @@ impl &self.executor } + /// Returns a reference to the provider + pub fn provider(&self) -> &Provider { + &self.provider + } + /// Returns all installed methods pub fn methods(&self) -> Vec { self.modules.values().cloned().collect() From b8b7ad60ada5e808c11beba2810c108d46d849bb Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Thu, 10 Aug 2023 20:36:49 +0530 Subject: [PATCH 401/722] Signature functions moved to rpc_types_compat (#4143) --- .../rpc-types-compat/src/transaction/mod.rs | 13 ++--- .../src/transaction/signature.rs | 51 ++++++++++++++++++ .../rpc/rpc-types/src/eth/transaction/mod.rs | 2 +- .../src/eth/transaction/signature.rs | 53 +------------------ 4 files changed, 58 insertions(+), 61 deletions(-) create mode 100644 crates/rpc/rpc-types-compat/src/transaction/signature.rs diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 02026b04fbec..9cda539ef1b2 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,12 +1,12 @@ //! Compatibility functions for rpc `Transaction` type. - +mod signature; use reth_primitives::{ AccessListItem, BlockNumber, Transaction as PrimitiveTransaction, TransactionKind as PrimitiveTransactionKind, TransactionSignedEcRecovered, TxType, H256, U128, U256, U64, }; -use reth_rpc_types::{Signature, Transaction}; - +use reth_rpc_types::Transaction; +use signature::from_primitive_signature; /// Create a new rpc transaction result for a mined transaction, using the given block hash, /// number, and tx index fields to populate the corresponding fields in the rpc result. /// @@ -96,11 +96,8 @@ fn fill( ), }; - let signature = Signature::from_primitive_signature( - *signed_tx.signature(), - signed_tx.tx_type(), - signed_tx.chain_id(), - ); + let signature = + from_primitive_signature(*signed_tx.signature(), signed_tx.tx_type(), signed_tx.chain_id()); Transaction { hash: signed_tx.hash(), diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs new file mode 100644 index 000000000000..0e7fe5f5876a --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/transaction/signature.rs @@ -0,0 +1,51 @@ +use reth_primitives::{Signature as PrimitiveSignature, TxType, U256}; +use reth_rpc_types::{Parity, Signature}; + +/// Creates a new rpc signature from a legacy [primitive +/// signature](reth_primitives::Signature), using the give chain id to compute the signature's +/// recovery id. +/// +/// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). +pub(crate) fn from_legacy_primitive_signature( + signature: PrimitiveSignature, + chain_id: Option, +) -> Signature { + Signature { + r: signature.r, + s: signature.s, + v: U256::from(signature.v(chain_id)), + y_parity: None, + } +} + +/// Creates a new rpc signature from a non-legacy [primitive +/// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on +/// the signature's `odd_y_parity`. +pub(crate) fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { + Signature { + r: signature.r, + s: signature.s, + v: U256::from(signature.odd_y_parity as u8), + y_parity: Some(Parity(signature.odd_y_parity)), + } +} + +/// Creates a new rpc signature from a legacy [primitive +/// signature](reth_primitives::Signature). +/// +/// The tx type is used to determine whether or not to use the `chain_id` to compute the +/// signature's recovery id. +/// +/// If the transaction is a legacy transaction, it will use the `chain_id` to compute the +/// signature's recovery id. If the transaction is a typed transaction, it will set the `v` +/// value to `0` or `1` depending on the signature's `odd_y_parity`. +pub(crate) fn from_primitive_signature( + signature: PrimitiveSignature, + tx_type: TxType, + chain_id: Option, +) -> Signature { + match tx_type { + TxType::Legacy => from_legacy_primitive_signature(signature, chain_id), + _ => from_typed_primitive_signature(signature), + } +} diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index 1e8b09aecb1e..6b6601af62a5 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -3,7 +3,7 @@ pub use receipt::TransactionReceipt; pub use request::TransactionRequest; use reth_primitives::{AccessListItem, Address, Bytes, H256, U128, U256, U64}; use serde::{Deserialize, Serialize}; -pub use signature::Signature; +pub use signature::{Parity, Signature}; pub use typed::*; mod common; diff --git a/crates/rpc/rpc-types/src/eth/transaction/signature.rs b/crates/rpc/rpc-types/src/eth/transaction/signature.rs index 3c31a126002e..78a8f98733a3 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/signature.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/signature.rs @@ -1,5 +1,5 @@ //! Signature related RPC values -use reth_primitives::{Signature as PrimitiveSignature, TxType, U256}; +use reth_primitives::U256; use serde::{Deserialize, Serialize}; /// Container type for all signature fields in RPC @@ -23,57 +23,6 @@ pub struct Signature { pub y_parity: Option, } -impl Signature { - /// Creates a new rpc signature from a legacy [primitive - /// signature](reth_primitives::Signature), using the give chain id to compute the signature's - /// recovery id. - /// - /// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). - pub(crate) fn from_legacy_primitive_signature( - signature: PrimitiveSignature, - chain_id: Option, - ) -> Self { - Self { - r: signature.r, - s: signature.s, - v: U256::from(signature.v(chain_id)), - y_parity: None, - } - } - - /// Creates a new rpc signature from a non-legacy [primitive - /// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on - /// the signature's `odd_y_parity`. - pub(crate) fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Self { - Self { - r: signature.r, - s: signature.s, - v: U256::from(signature.odd_y_parity as u8), - y_parity: Some(Parity(signature.odd_y_parity)), - } - } - - /// Creates a new rpc signature from a legacy [primitive - /// signature](reth_primitives::Signature). - /// - /// The tx type is used to determine whether or not to use the `chain_id` to compute the - /// signature's recovery id. - /// - /// If the transaction is a legacy transaction, it will use the `chain_id` to compute the - /// signature's recovery id. If the transaction is a typed transaction, it will set the `v` - /// value to `0` or `1` depending on the signature's `odd_y_parity`. - pub fn from_primitive_signature( - signature: PrimitiveSignature, - tx_type: TxType, - chain_id: Option, - ) -> Self { - match tx_type { - TxType::Legacy => Signature::from_legacy_primitive_signature(signature, chain_id), - _ => Signature::from_typed_primitive_signature(signature), - } - } -} - /// Type that represents the signature parity byte, meant for use in RPC. /// /// This will be serialized as "0x0" if false, and "0x1" if true. From e43187bf7ffdb669c45ff8383fa156e44b070eb2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 18:07:44 +0200 Subject: [PATCH 402/722] feat: add is eip4844 fn (#4147) --- crates/primitives/src/lib.rs | 2 +- crates/primitives/src/transaction/mod.rs | 4 +++- crates/primitives/src/transaction/tx_type.rs | 3 +-- crates/transaction-pool/src/traits.rs | 8 +++++++- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 02946310f13a..96941dfadf99 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -90,7 +90,7 @@ pub use transaction::{ IntoRecoveredTransaction, InvalidTransactionError, Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a2d64517fcb7..8c7e0394c6a2 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -14,7 +14,9 @@ use reth_rlp::{ use serde::{Deserialize, Serialize}; pub use signature::Signature; use std::mem; -pub use tx_type::{TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID}; +pub use tx_type::{ + TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, +}; pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 822965a18f11..6685e96b64cf 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -13,8 +13,7 @@ pub const EIP2930_TX_TYPE_ID: u8 = 1; pub const EIP1559_TX_TYPE_ID: u8 = 2; /// Identifier for [TxEip4844](crate::TxEip4844) transaction. -#[allow(unused)] -pub(crate) const EIP4844_TX_TYPE_ID: u8 = 3; +pub const EIP4844_TX_TYPE_ID: u8 = 3; /// Transaction Type /// diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 7d07a72a8759..5cffaa30df43 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,7 +7,8 @@ use crate::{ use futures_util::{ready, Stream}; use reth_primitives::{ Address, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, Transaction, - TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, H256, U256, + TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + H256, U256, }; use reth_rlp::Encodable; use std::{ @@ -554,6 +555,11 @@ pub trait PoolTransaction: self.tx_type() == EIP1559_TX_TYPE_ID } + /// Returns true if the transaction is an EIP-4844 transaction. + fn is_eip4844(&self) -> bool { + self.tx_type() == EIP4844_TX_TYPE_ID + } + /// Returns the length of the rlp encoded object fn encoded_length(&self) -> usize; From 2338720b68b00c72e083858a2fc804f656ea0fad Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 19:45:18 +0200 Subject: [PATCH 403/722] fix: use original bytes (#4150) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 9ecddcfd45ee..073e1707f73f 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -440,7 +440,7 @@ where let code_hash = if db_acc.code_hash != KECCAK_EMPTY { db_acc.code_hash } else { continue }; - curr_ref.code = db.code_by_hash(code_hash)?.bytecode.into(); + curr_ref.code = db.code_by_hash(code_hash)?.original_bytes().into(); } Ok(()) From c412f3935dbedd047905aedb986bbd118165155e Mon Sep 17 00:00:00 2001 From: PatStiles <33334338+PatStiles@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:56:03 -0500 Subject: [PATCH 404/722] feat: add kzg_to_versioned_hash (#4085) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- Cargo.lock | 64 +++++++++---------- crates/consensus/common/src/validation.rs | 2 +- crates/primitives/Cargo.toml | 1 + crates/primitives/src/constants/eip4844.rs | 3 + .../primitives/src/{blobfee.rs => eip4844.rs} | 15 ++++- crates/primitives/src/header.rs | 2 +- crates/primitives/src/lib.rs | 2 +- 7 files changed, 53 insertions(+), 36 deletions(-) rename crates/primitives/src/{blobfee.rs => eip4844.rs} (50%) diff --git a/Cargo.lock b/Cargo.lock index cf825eaba2ec..52e30d02a76b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c" dependencies = [ "memchr", ] @@ -220,9 +220,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -420,9 +420,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ "event-listener", ] @@ -723,7 +723,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "bitflags 2.3.3", "boa_interner", @@ -736,7 +736,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -774,7 +774,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +798,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "boa_gc", "boa_macros", @@ -813,7 +813,7 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -824,7 +824,7 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" dependencies = [ "bitflags 2.3.3", "boa_ast", @@ -844,7 +844,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#9665f8be3be60f475d816ca10430631f43d6c962" +source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" [[package]] name = "brotli" @@ -933,7 +933,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#6353f689e5d2802bbaf221253b3acafe4228331c" +source = "git+https://github.com/ethereum/c-kzg-4844#9d85ed8b194eee2a70380e58b6c45909f75933ea" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "cc", @@ -1241,9 +1241,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" [[package]] name = "const-str" @@ -1363,9 +1363,9 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" +checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" [[package]] name = "crossbeam-channel" @@ -2725,7 +2725,7 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" dependencies = [ - "aho-corasick 1.0.2", + "aho-corasick 1.0.3", "bstr 1.6.0", "fnv", "log", @@ -4585,9 +4585,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -5124,7 +5124,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ - "aho-corasick 1.0.2", + "aho-corasick 1.0.3", "memchr", "regex-automata 0.3.6", "regex-syntax 0.7.4", @@ -5145,7 +5145,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ - "aho-corasick 1.0.2", + "aho-corasick 1.0.3", "memchr", "regex-syntax 0.7.4", ] @@ -5829,6 +5829,7 @@ dependencies = [ "serde", "serde_json", "serde_with", + "sha2 0.10.7", "strum 0.25.0", "sucds", "tempfile", @@ -6470,9 +6471,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.2" +version = "0.101.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" dependencies = [ "ring", "untrusted", @@ -7510,11 +7511,10 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "2d3ce25f50619af8b0aec2eb23deebe84249e19e2ddd393a6e16e3300a6dadfd" dependencies = [ - "autocfg", "backtrace", "bytes", "libc", @@ -7523,7 +7523,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.3", "tokio-macros", "windows-sys 0.48.0", ] @@ -8463,9 +8463,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acaaa1190073b2b101e15083c38ee8ec891b5e05cbee516521e94ec008f61e64" +checksum = "48f9aab5bf4474679c9908b82c245a17ee48b55e07350d439ef522020cce22ff" dependencies = [ "memchr", ] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index ee055d1ffadd..2c2bde80fa40 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,11 +1,11 @@ //! Collection of methods for block validation. use reth_interfaces::{consensus::ConsensusError, Result as RethResult}; use reth_primitives::{ - blobfee::calculate_excess_blob_gas, constants::{ self, eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, }, + eip4844::calculate_excess_blob_gas, BlockNumber, ChainSpec, Hardfork, Header, InvalidTransactionError, SealedBlock, SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 69cf6d3e0f6e..8716a1e228b4 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -61,6 +61,7 @@ once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } paste = "1.0" tempfile = "3.3" +sha2 = "0.10.7" # proof related triehash = "0.8" diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index db86b18432a9..602229d2cc85 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -37,3 +37,6 @@ pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { file.write_all(TRUSTED_SETUP_RAW.as_bytes()).unwrap(); Arc::new(KzgSettings::load_trusted_setup_file(file.path().into()).unwrap()) }); + +/// Commitment version of a KZG commitment +pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; diff --git a/crates/primitives/src/blobfee.rs b/crates/primitives/src/eip4844.rs similarity index 50% rename from crates/primitives/src/blobfee.rs rename to crates/primitives/src/eip4844.rs index e82b5d2f8c65..01d954373066 100644 --- a/crates/primitives/src/blobfee.rs +++ b/crates/primitives/src/eip4844.rs @@ -1,6 +1,19 @@ //! Helpers for working with EIP-4844 blob fee +use crate::{ + constants::eip4844::{TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG}, + kzg::KzgCommitment, + H256, +}; +use sha2::{Digest, Sha256}; -use crate::constants::eip4844::TARGET_DATA_GAS_PER_BLOCK; +/// Calculates the versioned hash for a KzgCommitment +/// +/// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) +pub fn kzg_to_versioned_hash(commitment: KzgCommitment) -> H256 { + let mut res = Sha256::digest(commitment.as_slice()); + res[0] = VERSIONED_HASH_VERSION_KZG; + H256::from_slice(&res) +} /// Calculates the excess data gas for the next block, after applying the current set of blobs on /// top of the excess data gas. diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 8aeab18b84f8..bc6d75194a2b 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -1,6 +1,6 @@ use crate::{ basefee::calculate_next_block_base_fee, - blobfee::calculate_excess_blob_gas, + eip4844::calculate_excess_blob_gas, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, BaseFeeParams, BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 96941dfadf99..300a215f4af2 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -23,13 +23,13 @@ pub mod abi; mod account; pub mod basefee; mod bits; -pub mod blobfee; mod block; pub mod bloom; mod chain; mod compression; pub mod constants; pub mod contract; +pub mod eip4844; mod forkid; pub mod fs; mod genesis; From 5c0ec6feebc824cedfdad400d651ea2775593acd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 20:01:23 +0200 Subject: [PATCH 405/722] fix: exclude single stop vm trace instruction (#4149) --- .../src/tracing/builder/parity.rs | 46 ++++++++++++++----- .../revm/revm-inspectors/src/tracing/types.rs | 6 +++ 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 073e1707f73f..38f81e17fd83 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -273,6 +273,18 @@ impl ParityTraceBuilder { self.into_transaction_traces_iter().collect() } + /// Returns the last recorded step + #[inline] + fn last_step(&self) -> Option<&CallTraceStep> { + self.nodes.last().and_then(|node| node.trace.steps.last()) + } + + /// Returns true if the last recorded step is a STOP + #[inline] + fn is_last_step_stop_op(&self) -> bool { + self.last_step().map(|step| step.is_stop()).unwrap_or(false) + } + /// Creates a VM trace by walking over `CallTraceNode`s /// /// does not have the code fields filled in @@ -283,18 +295,18 @@ impl ParityTraceBuilder { } } - /// returns a VM trace without the code filled in + /// Returns a VM trace without the code filled in /// - /// iteratively creaters a VM trace by traversing an arena + /// Iteratively creates a VM trace by traversing the recorded nodes in the arena fn make_vm_trace(&self, start: &CallTraceNode) -> VmTrace { - let mut child_idx_stack: Vec = Vec::with_capacity(self.nodes.len()); - let mut sub_stack: VecDeque> = VecDeque::with_capacity(self.nodes.len()); + let mut child_idx_stack = Vec::with_capacity(self.nodes.len()); + let mut sub_stack = VecDeque::with_capacity(self.nodes.len()); let mut current = start; let mut child_idx: usize = 0; // finds the deepest nested calls of each call frame and fills them up bottom to top - let instructions = loop { + let instructions = 'outer: loop { match current.children.get(child_idx) { Some(child) => { child_idx_stack.push(child_idx + 1); @@ -303,17 +315,23 @@ impl ParityTraceBuilder { current = self.nodes.get(*child).expect("there should be a child"); } None => { - let mut instructions: Vec = - Vec::with_capacity(current.trace.steps.len()); + let mut instructions = Vec::with_capacity(current.trace.steps.len()); for step in ¤t.trace.steps { - let maybe_sub = if step.is_calllike_op() { - sub_stack.pop_front().expect("there should be a sub trace") + let maybe_sub_call = if step.is_calllike_op() { + sub_stack.pop_front().flatten() } else { None }; - instructions.push(self.make_instruction(step, maybe_sub)); + if step.is_stop() && instructions.is_empty() && self.is_last_step_stop_op() + { + // This is a special case where there's a single STOP which is + // "optimised away", transfers for example + break 'outer instructions + } + + instructions.push(self.make_instruction(step, maybe_sub_call)); } match current.parent { @@ -338,7 +356,11 @@ impl ParityTraceBuilder { /// Creates a VM instruction from a [CallTraceStep] and a [VmTrace] for the subcall if there is /// one - fn make_instruction(&self, step: &CallTraceStep, maybe_sub: Option) -> VmInstruction { + fn make_instruction( + &self, + step: &CallTraceStep, + maybe_sub_call: Option, + ) -> VmInstruction { let maybe_storage = step.storage_change.map(|storage_change| StorageDelta { key: storage_change.key, val: storage_change.value, @@ -369,7 +391,7 @@ impl ParityTraceBuilder { pc: step.pc, cost: cost as u64, ex: maybe_execution, - sub: maybe_sub, + sub: maybe_sub_call, op: Some(step.op.to_string()), idx: None, } diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 83a4586a4f3e..552e7d33a0c7 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -599,6 +599,12 @@ impl CallTraceStep { log } + /// Returns true if the step is a STOP opcode + #[inline] + pub(crate) fn is_stop(&self) -> bool { + matches!(self.op.u8(), opcode::STOP) + } + /// Returns true if the step is a call operation, any of /// CALL, CALLCODE, DELEGATECALL, STATICCALL, CREATE, CREATE2 #[inline] From 47bf60be610183b6c48f3df2100baf4ac947d4cb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Aug 2023 20:01:37 +0200 Subject: [PATCH 406/722] fix: sub field is always serialized (#4151) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index aa2732732fc3..6821d912e65a 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -298,7 +298,6 @@ pub struct VmInstruction { /// The program counter. pub pc: usize, /// Subordinate trace of the CALL/CREATE if applicable. - #[serde(skip_serializing_if = "Option::is_none")] pub sub: Option, /// Stringified opcode. #[serde(skip_serializing_if = "Option::is_none")] From 500b0fac135fe07635d871d64467326599e2b27e Mon Sep 17 00:00:00 2001 From: Andrew Ar <118139478+siguint@users.noreply.github.com> Date: Thu, 10 Aug 2023 21:45:32 +0300 Subject: [PATCH 407/722] (feat):add transaction::is_dynamic_fee (#4152) --- crates/primitives/src/transaction/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 8c7e0394c6a2..f63c6b9e1e01 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -168,6 +168,14 @@ impl Transaction { } } + /// Returns true if the tx supports dynamic fees + pub fn is_dynamic_fee(&self) -> bool { + match self { + Transaction::Legacy(_) | Transaction::Eip2930(_) => false, + Transaction::Eip1559(_) | Transaction::Eip4844(_) => true, + } + } + /// Max fee per gas for eip1559 transaction, for legacy transactions this is gas_price. /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). From 03fc87701ac8318fe408fde94c5e38b833f10938 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 11 Aug 2023 15:39:38 +0300 Subject: [PATCH 408/722] fix(txpool): maintenance tracing target (#4160) --- crates/transaction-pool/src/maintain.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 1aa446345efd..05bb7824a09d 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -105,7 +105,7 @@ pub async fn maintain_transaction_pool( // The update loop that waits for new blocks and reorgs and performs pool updated // Listen for new chain events and derive the update action for the pool loop { - trace!(target = "txpool", state=?maintained_state, "awaiting new block or reorg"); + trace!(target: "txpool", state=?maintained_state, "awaiting new block or reorg"); metrics.set_dirty_accounts_len(dirty_addresses.len()); let pool_info = pool.block_info(); @@ -181,7 +181,7 @@ pub async fn maintain_transaction_pool( Some(Ok(Err(res))) => { // Failed to load accounts from state let (accs, err) = *res; - debug!(target = "txpool", ?err, "failed to load accounts"); + debug!(target: "txpool", ?err, "failed to load accounts"); dirty_addresses.extend(accs); } Some(Err(_)) => { @@ -238,7 +238,7 @@ pub async fn maintain_transaction_pool( Err(err) => { let (addresses, err) = *err; debug!( - target = "txpool", + target: "txpool", ?err, "failed to load missing changed accounts at new tip: {:?}", new_tip.hash @@ -295,7 +295,7 @@ pub async fn maintain_transaction_pool( let first_block = blocks.first(); trace!( - target = "txpool", + target: "txpool", first = first_block.number, tip = tip.number, pool_block = pool_info.last_seen_block_number, @@ -307,7 +307,7 @@ pub async fn maintain_transaction_pool( let depth = tip.number.abs_diff(pool_info.last_seen_block_number); if depth > max_update_depth { maintained_state = MaintainedPoolState::Drifted; - debug!(target = "txpool", ?depth, "skipping deep canonical update"); + debug!(target: "txpool", ?depth, "skipping deep canonical update"); let info = BlockInfo { last_seen_block_hash: tip.hash, last_seen_block_number: tip.number, From 427a8395f9d89aaab8ff563d6041af34cddbf425 Mon Sep 17 00:00:00 2001 From: PatStiles <33334338+PatStiles@users.noreply.github.com> Date: Fri, 11 Aug 2023 08:34:56 -0500 Subject: [PATCH 409/722] feat(txpool): Make TransactionPool trait object safe (#4156) Co-authored-by: Matthias Seitz --- crates/consensus/auto-seal/src/task.rs | 4 +++- crates/net/network/src/transactions.rs | 6 ++---- crates/transaction-pool/src/lib.rs | 7 ++----- crates/transaction-pool/src/noop.rs | 7 ++----- crates/transaction-pool/src/pool/mod.rs | 4 ++-- crates/transaction-pool/src/pool/txpool.rs | 12 ++++++------ crates/transaction-pool/src/traits.rs | 7 ++----- 7 files changed, 19 insertions(+), 28 deletions(-) diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index ddb450acea62..4abdd19b12b1 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -135,7 +135,9 @@ where { Ok((new_header, post_state)) => { // clear all transactions from pool - pool.remove_transactions(transactions.iter().map(|tx| tx.hash())); + pool.remove_transactions( + transactions.iter().map(|tx| tx.hash()).collect(), + ); let state = ForkchoiceState { head_block_hash: new_header.hash, diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 7345893d1e74..87850f1e4f51 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -204,7 +204,7 @@ where /// complete transaction object if it is unknown to them. The dissemination of complete /// transactions to a fraction of peers usually ensures that all nodes receive the transaction /// and won't need to request it. - fn on_new_transactions(&mut self, hashes: impl IntoIterator) { + fn on_new_transactions(&mut self, hashes: Vec) { // Nothing to propagate while initially syncing if self.network.is_initially_syncing() { return @@ -372,9 +372,7 @@ where /// Handles a command received from a detached [`TransactionsHandle`] fn on_command(&mut self, cmd: TransactionsCommand) { match cmd { - TransactionsCommand::PropagateHash(hash) => { - self.on_new_transactions(std::iter::once(hash)) - } + TransactionsCommand::PropagateHash(hash) => self.on_new_transactions(vec![hash]), } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index ccdf752e0dc9..8d039f66a9d5 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -401,7 +401,7 @@ where fn remove_transactions( &self, - hashes: impl IntoIterator, + hashes: Vec, ) -> Vec>> { self.pool.remove_transactions(hashes) } @@ -414,10 +414,7 @@ where self.inner().get(tx_hash) } - fn get_all( - &self, - txs: impl IntoIterator, - ) -> Vec>> { + fn get_all(&self, txs: Vec) -> Vec>> { self.inner().get_all(txs) } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 920a98b0d682..d9c26214a516 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -135,7 +135,7 @@ impl TransactionPool for NoopTransactionPool { fn remove_transactions( &self, - _hashes: impl IntoIterator, + _hashes: Vec, ) -> Vec>> { vec![] } @@ -146,10 +146,7 @@ impl TransactionPool for NoopTransactionPool { None } - fn get_all( - &self, - _txs: impl IntoIterator, - ) -> Vec>> { + fn get_all(&self, _txs: Vec) -> Vec>> { vec![] } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 93003431c5e6..827963369010 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -513,7 +513,7 @@ where /// Removes and returns all matching transactions from the pool. pub(crate) fn remove_transactions( &self, - hashes: impl IntoIterator, + hashes: Vec, ) -> Vec>> { let removed = self.pool.write().remove_transactions(hashes); @@ -552,7 +552,7 @@ where /// If no transaction exists, it is skipped. pub(crate) fn get_all( &self, - txs: impl IntoIterator, + txs: Vec, ) -> Vec>> { self.pool.read().get_all(txs).collect() } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 292206998766..92abe86c6550 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -247,10 +247,10 @@ impl TxPool { } /// Returns transactions for the multiple given hashes, if they exist. - pub(crate) fn get_all<'a>( - &'a self, - txs: impl IntoIterator + 'a, - ) -> impl Iterator>> + 'a { + pub(crate) fn get_all( + &self, + txs: Vec, + ) -> impl Iterator>> + '_ { txs.into_iter().filter_map(|tx| self.get(&tx)) } @@ -409,7 +409,7 @@ impl TxPool { /// Maintenance task to apply a series of updates. /// /// This will move/discard the given transaction according to the `PoolUpdate` - fn process_updates(&mut self, updates: impl IntoIterator) -> UpdateOutcome { + fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { let mut outcome = UpdateOutcome::default(); for update in updates { let PoolUpdate { id, hash, current, destination } = update; @@ -445,7 +445,7 @@ impl TxPool { /// any additional updates. pub(crate) fn remove_transactions( &mut self, - hashes: impl IntoIterator, + hashes: Vec, ) -> Vec>> { hashes.into_iter().filter_map(|hash| self.remove_transaction_by_hash(&hash)).collect() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 5cffaa30df43..e89c94385be3 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -227,7 +227,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: Block production fn remove_transactions( &self, - hashes: impl IntoIterator, + hashes: Vec, ) -> Vec>>; /// Retains only those hashes that are unknown to the pool. @@ -250,10 +250,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// The transactions must be in same order as in the request, but it is OK to skip transactions /// which are not available. - fn get_all( - &self, - txs: impl IntoIterator, - ) -> Vec>>; + fn get_all(&self, txs: Vec) -> Vec>>; /// Notify the pool about transactions that are propagated to peers. /// From 1c23075edbdb2d49b3759935aa7bece792c2e3e4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 11 Aug 2023 16:28:51 +0200 Subject: [PATCH 410/722] fix: remove discarded transactions (#4164) --- crates/transaction-pool/src/pool/txpool.rs | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 92abe86c6550..0aa7f84be017 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -415,7 +415,10 @@ impl TxPool { let PoolUpdate { id, hash, current, destination } = update; match destination { Destination::Discard => { + // remove the transaction from the pool and subpool + self.prune_transaction_by_hash(&hash); outcome.discarded.push(hash); + self.metrics.removed_transactions.increment(1); } Destination::Pool(move_to) => { debug_assert!(!move_to.eq(¤t), "destination must be different"); @@ -1683,4 +1686,30 @@ mod tests { assert_eq!(pool.all_transactions.txs.get(&id).unwrap().subpool, SubPool::BaseFee) } + + #[test] + fn discard_nonce_too_low() { + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx = MockTransaction::eip1559().inc_price_by(10); + let validated = f.validated(tx.clone()); + let id = *validated.id(); + pool.add_transaction(validated, U256::from(1_000), 0).unwrap(); + + let next = tx.next(); + let validated = f.validated(next.clone()); + pool.add_transaction(validated, U256::from(1_000), 0).unwrap(); + + assert_eq!(pool.pending_pool.len(), 2); + + let mut changed_senders = HashMap::new(); + changed_senders.insert( + id.sender, + SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, + ); + let outcome = pool.update_accounts(changed_senders); + assert_eq!(outcome.discarded.len(), 1); + assert_eq!(pool.pending_pool.len(), 1); + } } From 81e8ad4de667b1729e0fa3a16b12f0c5d373f4b5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 12 Aug 2023 13:17:33 +0300 Subject: [PATCH 411/722] fix(rpc): tracing target (#4161) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/api/mod.rs | 2 +- crates/rpc/rpc/src/layers/jwt_validator.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index d4f30a5ad665..e09c84969f62 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -299,7 +299,7 @@ where let pending_block = match pending.build_block(this.provider(), this.pool()) { Ok(block) => block, Err(err) => { - tracing::debug!(target = "rpc", "Failed to build pending block: {:?}", err); + tracing::debug!(target: "rpc", "Failed to build pending block: {:?}", err); return Ok(None) } }; diff --git a/crates/rpc/rpc/src/layers/jwt_validator.rs b/crates/rpc/rpc/src/layers/jwt_validator.rs index 54ee63d715e1..86901accc757 100644 --- a/crates/rpc/rpc/src/layers/jwt_validator.rs +++ b/crates/rpc/rpc/src/layers/jwt_validator.rs @@ -29,14 +29,14 @@ impl AuthValidator for JwtAuthValidator { Some(jwt) => match self.secret.validate(jwt) { Ok(_) => Ok(()), Err(e) => { - error!(target = "engine::jwt-validator", "Invalid JWT: {e}"); + error!(target: "engine::jwt-validator", "Invalid JWT: {e}"); let response = err_response(e); Err(response) } }, None => { let e = JwtError::MissingOrInvalidAuthorizationHeader; - error!(target = "engine::jwt-validator", "Invalid JWT: {e}"); + error!(target: "engine::jwt-validator", "Invalid JWT: {e}"); let response = err_response(e); Err(response) } From 2a62f2d156f5b0b5370e5693da09b8124677530e Mon Sep 17 00:00:00 2001 From: Jacob Kaufmann Date: Sat, 12 Aug 2023 13:24:26 -0600 Subject: [PATCH 412/722] docs(tx-pool): fix pending tx listener typo (#4171) --- crates/transaction-pool/src/traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index e89c94385be3..d3572c52b3ff 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -127,7 +127,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns a new stream that yields new valid transactions added to the pool. fn new_transactions_listener(&self) -> Receiver>; - /// Returns a new Stream that yields new transactions added to the basefee-pool. + /// Returns a new Stream that yields new transactions added to the pending sub-pool. /// /// This is a convenience wrapper around [Self::new_transactions_listener] that filters for /// [SubPool::Pending](crate::SubPool). From d152c9e3e7e251986a10601aa4fcba2784ed6891 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 13 Aug 2023 10:59:33 +0200 Subject: [PATCH 413/722] docs: fix smol typo (#4177) --- crates/transaction-pool/src/traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d3572c52b3ff..511ddc9a8307 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -23,7 +23,7 @@ use tokio::sync::mpsc::Receiver; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// General purpose abstraction fo a transaction-pool. +/// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, /// unverified transactions. And by block production that needs to get transactions to execute in a From 84625e4a8c78637cda8e4d95f0f26d17e152522e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 13 Aug 2023 09:12:29 +0000 Subject: [PATCH 414/722] chore(deps): weekly `cargo update` (#4173) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 66 +++++++++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 52e30d02a76b..1add55f8f9bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,9 +429,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -666,9 +666,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitvec" @@ -723,9 +723,9 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "boa_interner", "boa_macros", "indexmap 2.0.0", @@ -736,9 +736,9 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "boa_ast", "boa_gc", "boa_icu_provider", @@ -774,7 +774,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +798,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ "boa_gc", "boa_macros", @@ -813,7 +813,7 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -824,9 +824,9 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "boa_ast", "boa_icu_provider", "boa_interner", @@ -844,7 +844,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#22b014d5d5ab9fba5dc467734227558e75c66d20" +source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" [[package]] name = "brotli" @@ -933,7 +933,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#9d85ed8b194eee2a70380e58b6c45909f75933ea" +source = "git+https://github.com/ethereum/c-kzg-4844#3ce8f863415ac1b218bc7d63cc14778b570aa081" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "cc", @@ -1499,9 +1499,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.3" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436ace70fc06e06f7f689d2624dc4e2f0ea666efb5aa704215f7249ae6e047a7" +checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" dependencies = [ "cfg-if", "cpufeatures", @@ -1904,9 +1904,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0-rc.3" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa8e9049d5d72bfc12acbc05914731b5322f79b5e2f195e9f2d705fca22ab4c" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ "curve25519-dalek", "ed25519", @@ -3424,7 +3424,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.7", + "rustix 0.38.8", "windows-sys 0.48.0", ] @@ -3796,9 +3796,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru" @@ -5640,7 +5640,7 @@ dependencies = [ name = "reth-libmdbx" version = "0.1.0-alpha.6" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "byteorder", "criterion", "derive_more", @@ -6425,11 +6425,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.7" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "172891ebdceb05aa0005f533a6cbfca599ddd7d966f6f5d4d9b2e70478e70399" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "errno 0.3.2", "libc", "linux-raw-sys 0.4.5", @@ -7323,7 +7323,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.7", + "rustix 0.38.8", "windows-sys 0.48.0", ] @@ -7511,9 +7511,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.30.0" +version = "1.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3ce25f50619af8b0aec2eb23deebe84249e19e2ddd393a6e16e3300a6dadfd" +checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920" dependencies = [ "backtrace", "bytes", @@ -7674,7 +7674,7 @@ checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ "async-compression", "base64 0.21.2", - "bitflags 2.3.3", + "bitflags 2.4.0", "bytes", "futures-core", "futures-util", @@ -8463,9 +8463,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.5.5" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f9aab5bf4474679c9908b82c245a17ee48b55e07350d439ef522020cce22ff" +checksum = "5504cc7644f4b593cbc05c4a55bf9bd4e94b867c3c0bd440934174d50482427d" dependencies = [ "memchr", ] From c9bb6216cdcb7991aa1c4b339075879b2b0c498f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 14 Aug 2023 12:23:08 +0300 Subject: [PATCH 415/722] chore(cli): remove unused `debug.tip` argument from execution debug script (#4183) --- bin/reth/src/debug_cmd/execution.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 4cbe6848c2fb..3550413897ef 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -78,12 +78,6 @@ pub struct Command { #[clap(flatten)] db: DatabaseArgs, - /// Set the chain tip manually for testing purposes. - /// - /// NOTE: This is a temporary flag - #[arg(long = "debug.tip", help_heading = "Debug")] - pub tip: Option, - /// The maximum block height. #[arg(long)] pub to: u64, From d0b687b31284601e4ee82d716527d7acc5226bae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 11:23:48 +0200 Subject: [PATCH 416/722] fix: spawn js service task on blocking pool (#4180) --- crates/rpc/rpc/src/debug.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index cf7a98b74549..c3cba02e3ecf 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -538,7 +538,9 @@ where let (to_db_service, rx) = mpsc::channel(1); let (ready_tx, ready_rx) = std::sync::mpsc::channel(); let this = self.clone(); - self.inner.task_spawner.spawn(Box::pin(async move { + // this needs to be on a blocking task because it only does blocking work besides waiting + // for db requests + self.inner.task_spawner.spawn_blocking(Box::pin(async move { this.js_trace_db_service_task(at, rx, ready_tx, db).await })); // wait for initialization From 1d40f7197e7af368cdcdbf3d4c47115bacf9652a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 11:24:41 +0200 Subject: [PATCH 417/722] style: use unwrap or default (#4169) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 5 +---- crates/rpc/rpc-types/src/eth/trace/parity.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 38f81e17fd83..edc9d2b6463e 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -289,10 +289,7 @@ impl ParityTraceBuilder { /// /// does not have the code fields filled in pub fn vm_trace(&self) -> VmTrace { - match self.nodes.get(0) { - Some(current) => self.make_vm_trace(current), - None => VmTrace { code: Default::default(), ops: Vec::new() }, - } + self.nodes.first().map(|node| self.make_vm_trace(node)).unwrap_or_default() } /// Returns a VM trace without the code filled in diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 6821d912e65a..025c922d96c1 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -279,7 +279,7 @@ pub struct LocalizedTransactionTrace { } /// A record of a full VM trace for a CALL/CREATE. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct VmTrace { /// The code to be executed. From 4ff8bca977e6499f32700edfd58877261879a2ad Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:25:44 +0200 Subject: [PATCH 418/722] feat: add flag to CLI to disable colour coding of console output (#4033) Co-authored-by: Matthias Seitz --- bin/reth/src/cli/mod.rs | 44 ++++++++++++++++++++++++++++++++++++--- crates/tracing/src/lib.rs | 5 +++-- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 979ffa5e601e..50864f5bfa0a 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -10,14 +10,14 @@ use crate::{ stage, test_vectors, version::{LONG_VERSION, SHORT_VERSION}, }; -use clap::{ArgAction, Args, Parser, Subcommand}; +use clap::{ArgAction, Args, Parser, Subcommand, ValueEnum}; use reth_primitives::ChainSpec; use reth_tracing::{ tracing::{metadata::LevelFilter, Level, Subscriber}, tracing_subscriber::{filter::Directive, registry::LookupSpan, EnvFilter}, BoxedLayer, FileWorkerGuard, }; -use std::sync::Arc; +use std::{fmt, fmt::Display, sync::Arc}; pub mod config; pub mod ext; @@ -86,7 +86,8 @@ impl Cli { /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. pub fn init_tracing(&self) -> eyre::Result> { - let mut layers = vec![reth_tracing::stdout(self.verbosity.directive())]; + let mut layers = + vec![reth_tracing::stdout(self.verbosity.directive(), &self.logs.color.to_string())]; let guard = self.logs.layer()?.map(|(layer, guard)| { layers.push(layer); guard @@ -163,6 +164,16 @@ pub struct Logs { /// The filter to use for logs written to the log file. #[arg(long = "log.filter", value_name = "FILTER", global = true, default_value = "error")] filter: String, + + /// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other + /// text formatting. + #[arg( + long, + value_name = "COLOR", + global = true, + default_value_t = ColorMode::Always + )] + color: ColorMode, } impl Logs { @@ -224,11 +235,38 @@ impl Verbosity { } } +/// The color mode for the cli. +#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)] +pub enum ColorMode { + /// Colors on + Always, + /// Colors on + Auto, + /// Colors off + Never, +} + +impl Display for ColorMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ColorMode::Always => write!(f, "always"), + ColorMode::Auto => write!(f, "auto"), + ColorMode::Never => write!(f, "never"), + } + } +} + #[cfg(test)] mod tests { use super::*; use clap::CommandFactory; + #[test] + fn parse_color_mode() { + let reth = Cli::<()>::try_parse_from(["reth", "node", "--color", "always"]).unwrap(); + assert_eq!(reth.logs.color, ColorMode::Always); + } + /// Tests that the help message is parsed correctly. This ensures that clap args are configured /// correctly and no conflicts are introduced via attributes that would result in a panic at /// runtime diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index 074b6dc9f072..e6c3caacc468 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -43,13 +43,14 @@ pub fn init(layers: Vec>) { /// /// Colors can be disabled with `RUST_LOG_STYLE=never`, and event targets can be displayed with /// `RUST_LOG_TARGET=1`. -pub fn stdout(default_directive: impl Into) -> BoxedLayer +pub fn stdout(default_directive: impl Into, color: &str) -> BoxedLayer where S: Subscriber, for<'a> S: LookupSpan<'a>, { // TODO: Auto-detect - let with_ansi = std::env::var("RUST_LOG_STYLE").map(|val| val != "never").unwrap_or(true); + let with_ansi = + std::env::var("RUST_LOG_STYLE").map(|val| val != "never").unwrap_or(color != "never"); let with_target = std::env::var("RUST_LOG_TARGET").map(|val| val != "0").unwrap_or(true); let filter = From 72236490ce2e7e3079f075918bdf777e7151b7a5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 11:45:28 +0200 Subject: [PATCH 419/722] fix: restrict max active queries (#4178) --- crates/net/common/src/ratelimit.rs | 5 +++++ crates/net/dns/src/query.rs | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/net/common/src/ratelimit.rs b/crates/net/common/src/ratelimit.rs index e6d63e3331d3..26440ae3ca92 100644 --- a/crates/net/common/src/ratelimit.rs +++ b/crates/net/common/src/ratelimit.rs @@ -27,6 +27,11 @@ impl RateLimit { RateLimit { rate, state, sleep: Box::pin(tokio::time::sleep_until(until)) } } + /// Returns the configured limit of the [RateLimit] + pub fn limit(&self) -> u64 { + self.rate.limit() + } + /// Checks if the [RateLimit] is ready to handle a new call pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { match self.state { diff --git a/crates/net/dns/src/query.rs b/crates/net/dns/src/query.rs index 10e1fbf21497..a3cba5caf110 100644 --- a/crates/net/dns/src/query.rs +++ b/crates/net/dns/src/query.rs @@ -78,8 +78,8 @@ impl QueryPool { return Poll::Ready(event) } - // queue in new queries - 'queries: loop { + // queue in new queries if we have capacity + 'queries: while self.active_queries.len() < self.rate_limit.limit() as usize { if self.rate_limit.poll_ready(cx).is_ready() { if let Some(query) = self.queued_queries.pop_front() { self.rate_limit.tick(); From dfdfea8d72ca345d60143042efa3e4a02325afb0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 14 Aug 2023 07:00:48 -0400 Subject: [PATCH 420/722] feat: download blocks in merkle debug script (#4137) --- bin/reth/src/debug_cmd/merkle.rs | 122 +++++++++++++++++++++++++++++-- bin/reth/src/debug_cmd/mod.rs | 2 +- 2 files changed, 117 insertions(+), 7 deletions(-) diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 1de670e3a831..47e37c36996e 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -1,16 +1,25 @@ //! Command for debugging merkle trie calculation. use crate::{ - args::{utils::genesis_value_parser, DatabaseArgs}, + args::{get_secret_key, utils::genesis_value_parser, DatabaseArgs, NetworkArgs}, dirs::{DataDirPath, MaybePlatformPath}, + runner::CliContext, + utils::get_single_header, }; +use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx}; +use reth_beacon_consensus::BeaconConsensus; +use reth_config::Config; +use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; +use reth_discv4::DEFAULT_DISCOVERY_PORT; +use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; +use reth_network::NetworkHandle; +use reth_network_api::NetworkInfo; use reth_primitives::{ fs, stage::{StageCheckpoint, StageId}, - ChainSpec, PruneModes, + BlockHashOrNumber, ChainSpec, PruneModes, }; -use reth_provider::{ProviderFactory, StageCheckpointReader}; +use reth_provider::{BlockWriter, ProviderFactory, StageCheckpointReader}; use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, @@ -18,7 +27,13 @@ use reth_stages::{ }, ExecInput, PipelineError, Stage, }; -use std::sync::Arc; +use reth_tasks::TaskExecutor; +use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + path::PathBuf, + sync::Arc, +}; +use tracing::{debug, info, warn}; /// `reth merkle-debug` command #[derive(Debug, Parser)] @@ -53,6 +68,13 @@ pub struct Command { #[clap(flatten)] db: DatabaseArgs, + #[clap(flatten)] + network: NetworkArgs, + + /// The number of retries per request + #[arg(long, default_value = "5")] + retries: usize, + /// The height to finish at #[arg(long)] to: u64, @@ -63,21 +85,109 @@ pub struct Command { } impl Command { + async fn build_network( + &self, + config: &Config, + task_executor: TaskExecutor, + db: Arc, + network_secret_path: PathBuf, + default_peers_path: PathBuf, + ) -> eyre::Result { + let secret_key = get_secret_key(&network_secret_path)?; + let network = self + .network + .network_config(config, self.chain.clone(), secret_key, default_peers_path) + .with_task_executor(Box::new(task_executor)) + .listener_addr(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::UNSPECIFIED, + self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + ))) + .discovery_addr(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::UNSPECIFIED, + self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + ))) + .build(ProviderFactory::new(db, self.chain.clone())) + .start_network() + .await?; + info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); + debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); + Ok(network) + } + /// Execute `merkle-debug` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + let config = Config::default(); + // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let db_path = data_dir.db_path(); fs::create_dir_all(&db_path)?; + // initialize the database let db = Arc::new(init_db(db_path, self.db.log_level)?); let factory = ProviderFactory::new(&db, self.chain.clone()); let provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + // Configure and build network + let network_secret_path = + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + let network = self + .build_network( + &config, + ctx.task_executor.clone(), + db.clone(), + network_secret_path, + data_dir.known_peers_path(), + ) + .await?; + + // Initialize the fetch client + info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); + let fetch_client = network.fetch_client().await?; + + // fetch the header at `self.to` + let retries = self.retries.max(1); + let backoff = ConstantBuilder::default().with_max_times(retries); + let client = fetch_client.clone(); + let to_header = (move || { + get_single_header(client.clone(), BlockHashOrNumber::Number(self.to)) + }) + .retry(&backoff) + .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) + .await?; + info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); + + // build the full block client + let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let block_range_client = FullBlockClient::new(fetch_client, consensus); + + // get the execution checkpoint let execution_checkpoint_block = provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; assert!(execution_checkpoint_block < self.to, "Nothing to run"); + // get the block range from the network + info!(target: "reth::cli", target_block_number=?self.to, "Downloading range of blocks"); + let block_range = block_range_client + .get_full_block_range(to_header.hash_slow(), self.to - execution_checkpoint_block) + .await; + + // recover senders + let blocks_with_senders = + block_range.into_iter().map(|block| block.try_seal_with_senders()); + + // insert the blocks + for senders_res in blocks_with_senders { + let sealed_block = match senders_res { + Ok(senders) => senders, + Err(err) => { + warn!(target: "reth::cli", "Error sealing block with senders: {err:?}. Skipping..."); + continue + } + }; + provider_rw.insert_block(sealed_block.block, Some(sealed_block.senders))?; + } + // Check if any of hashing or merkle stages aren't on the same block number as // Execution stage or have any intermediate progress. let should_reset_stages = diff --git a/bin/reth/src/debug_cmd/mod.rs b/bin/reth/src/debug_cmd/mod.rs index e624307f68c7..7dfa448db43d 100644 --- a/bin/reth/src/debug_cmd/mod.rs +++ b/bin/reth/src/debug_cmd/mod.rs @@ -30,7 +30,7 @@ impl Command { pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { match self.command { Subcommands::Execution(command) => command.execute(ctx).await, - Subcommands::Merkle(command) => command.execute().await, + Subcommands::Merkle(command) => command.execute(ctx).await, Subcommands::InMemoryMerkle(command) => command.execute(ctx).await, } } From 957f9f45450b3faa05c89a36d4020fb0f12df707 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 13:07:53 +0200 Subject: [PATCH 421/722] chore: bump jsonrpsee 0.20 (#4181) --- Cargo.lock | 82 ++++++++++++++++++++++-------------------------------- Cargo.toml | 6 ++-- 2 files changed, 36 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1add55f8f9bb..b2c6efefd626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -887,16 +887,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "bstr" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "bumpalo" version = "3.13.0" @@ -2719,19 +2709,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "globset" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" -dependencies = [ - "aho-corasick 1.0.3", - "bstr 1.6.0", - "fnv", - "log", - "regex", -] - [[package]] name = "gloo-net" version = "0.3.1" @@ -3503,9 +3480,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f3783308bddc49d0218307f66a09330c106fbd792c58bac5c8dc294fdd0f98" +checksum = "8002beb64691edce321fc16cdba91916b10d798f9d480a05467b0ee98463c03b" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3520,9 +3497,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abc5630e4fa0096f00ec7b44d520701fda4504170cb85e22dca603ae5d7ad0d7" +checksum = "310f9566a32ec8db214805127c4f17e7e8e91015e4a1407fc1d0e84df0086a73" dependencies = [ "futures-channel", "futures-util", @@ -3537,14 +3514,15 @@ dependencies = [ "tokio-rustls", "tokio-util", "tracing", + "url", "webpki-roots", ] [[package]] name = "jsonrpsee-core" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa4c4d5fb801dcc316d81f76422db259809037a86b3194ae538dd026b05ed7" +checksum = "4278372ecb78ebb522c36a242209a29162f4af0997a41158c8b60450b081baf1" dependencies = [ "anyhow", "async-lock", @@ -3552,7 +3530,6 @@ dependencies = [ "beef", "futures-timer", "futures-util", - "globset", "hyper", "jsonrpsee-types", "parking_lot 0.12.1", @@ -3563,16 +3540,15 @@ dependencies = [ "soketto", "thiserror", "tokio", - "tokio-stream", "tracing", "wasm-bindgen-futures", ] [[package]] name = "jsonrpsee-http-client" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa7165efcbfbc951d180162ff28fe91b657ed81925e37a35e4a396ce12109f96" +checksum = "2393386c97ce214851a9677568c5a38223ae4eada833617cb16d8464d1128f1b" dependencies = [ "async-trait", "hyper", @@ -3585,13 +3561,14 @@ dependencies = [ "tokio", "tower", "tracing", + "url", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dc12b1d4f16a86e8c522823c4fab219c88c03eb7c924ec0501a64bf12e058b" +checksum = "985d4a3753a08aaf120429924567795b2764c5c691489316a7fd076178e708b4" dependencies = [ "heck", "proc-macro-crate", @@ -3602,17 +3579,20 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e79d78cfd5abd8394da10753723093c3ff64391602941c9c4b1d80a3414fd53" +checksum = "5fc6357836b1d7b1367fe6d9a9b8d6e5488d1f1db985dfca4cb4ceaa9f37679e" dependencies = [ "futures-util", + "http", "hyper", "jsonrpsee-core", "jsonrpsee-types", + "route-recognizer", "serde", "serde_json", "soketto", + "thiserror", "tokio", "tokio-stream", "tokio-util", @@ -3622,9 +3602,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00aa7cc87bc42e04e26c8ac3e7186142f7fd2949c763d9b6a7e64a69672d8fb2" +checksum = "bbea61f2d95b9592491228db0c4d2b1e43ea1154ed9713bb666169cf3919ea7d" dependencies = [ "anyhow", "beef", @@ -3636,9 +3616,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fe953c2801356f214d3f4051f786b3d11134512a46763ee8c39a9e3fa2cc1c0" +checksum = "051742038473f3aaada8fc1eb19c76a5354e37e886999d60061f1f303cfc45e8" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3647,14 +3627,15 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71b2597ec1c958c6d5bc94bb61b44d74eb28e69dc421731ab0035706f13882" +checksum = "9590173f77867bc96b5127e4a862e2edcb7f603c83616e9302d68aab983bc023" dependencies = [ "http", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", + "url", ] [[package]] @@ -6343,6 +6324,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "route-recognizer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" + [[package]] name = "ruint" version = "1.10.1" @@ -6975,7 +6962,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" dependencies = [ - "bstr 0.2.17", + "bstr", "unicode-segmentation", ] @@ -8259,12 +8246,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.24.0" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" -dependencies = [ - "rustls-webpki", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "which" diff --git a/Cargo.toml b/Cargo.toml index 244240b97686..80438385205d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,9 +139,9 @@ pin-project = "1.0.12" futures-util = "0.3.25" ## json -jsonrpsee = { version = "0.19" } -jsonrpsee-core = { version = "0.19" } -jsonrpsee-types = { version = "0.19" } +jsonrpsee = { version = "0.20" } +jsonrpsee-core = { version = "0.20" } +jsonrpsee-types = { version = "0.20" } ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } From 2aefbd3560995e785a49da49d7bfcd00b82853cc Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 14 Aug 2023 12:23:17 +0100 Subject: [PATCH 422/722] chore(tree): log error on unreachable (#4185) --- crates/blockchain-tree/src/blockchain_tree.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 8edee773badf..26508b4f8fd7 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -972,6 +972,12 @@ impl BlockchainTree let canon_fork: BlockNumHash = new_canon_chain.fork_block(); // sanity check if self.block_indices.canonical_hash(&canon_fork.number) != Some(canon_fork.hash) { + error!( + target: "blockchain_tree", + ?canon_fork, + ?self.block_indices, + "All chains should point to canonical chain" + ); unreachable!("all chains should point to canonical chain."); } From 4a3f42ebe704505e1280ea7329e13a4ab1ca5b48 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 14 Aug 2023 15:02:42 +0200 Subject: [PATCH 423/722] feat(payload): make the actual payload build function generic (#4153) --- crates/payload/basic/src/lib.rs | 399 ++++++++++++++++++-------------- 1 file changed, 225 insertions(+), 174 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index dea187a30b62..a67b49a0bcff 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -312,12 +312,15 @@ where // acquire the permit for executing the task let _permit = guard.0.acquire().await; build_payload( - client, - pool, - cached_reads, - payload_config, - cancel, - best_payload, + default_payload_builder, + BuildArguments { + client, + pool, + cached_reads, + config: payload_config, + cancel, + best_payload, + }, tx, ) })); @@ -516,8 +519,9 @@ struct PayloadConfig { chain_spec: Arc, } +/// The possible outcomes of a payload building attempt. #[derive(Debug)] -enum BuildOutcome { +pub enum BuildOutcome { /// Successfully built a better block. Better { /// The new payload that was built. @@ -527,6 +531,7 @@ enum BuildOutcome { }, /// Aborted payload building because resulted in worse block wrt. fees. Aborted { + /// The total fees associated with the attempted payload. fees: U256, /// The cached reads that were used to build the payload. cached_reads: CachedReads, @@ -535,199 +540,245 @@ enum BuildOutcome { Cancelled, } -/// Builds the payload and sends the result to the given channel. -fn build_payload( +/// A collection of arguments used for building payloads. +/// +/// This struct encapsulates the essential components and configuration required for the payload +/// building process. It holds references to the Ethereum client, transaction pool, cached reads, +/// payload configuration, cancellation status, and the best payload achieved so far. +pub struct BuildArguments { client: Client, pool: Pool, cached_reads: CachedReads, config: PayloadConfig, cancel: Cancelled, best_payload: Option>, - to_job: oneshot::Sender>, -) where +} + +/// A trait for building payloads that encapsulate Ethereum transactions. +/// +/// This trait provides the `try_build` method to construct a transaction payload +/// using `BuildArguments`. It returns a `Result` indicating success or a +/// `PayloadBuilderError` if building fails. +/// +/// Generic parameters `Pool` and `Client` represent the transaction pool and +/// Ethereum client types. +pub trait PayloadBuilder { + /// Tries to build a transaction payload using provided arguments. + /// + /// Constructs a transaction payload based on the given arguments, + /// returning a `Result` indicating success or an error if building fails. + /// + /// # Arguments + /// + /// - `args`: Build arguments containing necessary components. + /// + /// # Returns + /// + /// A `Result` indicating the build outcome or an error. + fn try_build( + &self, + args: BuildArguments, + ) -> Result; +} + +impl PayloadBuilder for F +where + F: Fn(BuildArguments) -> Result, +{ + fn try_build( + &self, + args: BuildArguments, + ) -> Result { + self(args) + } +} + +/// Constructs an Ethereum transaction payload using the best transactions from the pool. +/// +/// Given build arguments including an Ethereum client, transaction pool, +/// and configuration, this function creates a transaction payload. Returns +/// a result indicating success with the payload or an error in case of failure. +fn default_payload_builder( + args: BuildArguments, +) -> Result +where Client: StateProviderFactory, Pool: TransactionPool, { - #[inline(always)] - fn try_build( - client: Client, - pool: Pool, - mut cached_reads: CachedReads, - config: PayloadConfig, - cancel: Cancelled, - best_payload: Option>, - ) -> Result - where - Client: StateProviderFactory, - Pool: TransactionPool, - { - let PayloadConfig { - initialized_block_env, - initialized_cfg, - parent_block, - extra_data, - attributes, - chain_spec, - } = config; + let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + + let PayloadConfig { + initialized_block_env, + initialized_cfg, + parent_block, + extra_data, + attributes, + chain_spec, + } = config; + + debug!(parent_hash=?parent_block.hash, parent_number=parent_block.number, "building new payload"); - debug!(parent_hash=?parent_block.hash, parent_number=parent_block.number, "building new payload"); + let state = State::new(client.state_by_block_hash(parent_block.hash)?); + let mut db = CacheDB::new(cached_reads.as_db(&state)); + let mut post_state = PostState::default(); - let state = State::new(client.state_by_block_hash(parent_block.hash)?); - let mut db = CacheDB::new(cached_reads.as_db(&state)); - let mut post_state = PostState::default(); + let mut cumulative_gas_used = 0; + let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let base_fee = initialized_block_env.basefee.to::(); - let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); - let base_fee = initialized_block_env.basefee.to::(); + let mut executed_txs = Vec::new(); + let mut best_txs = pool.best_transactions_with_base_fee(base_fee); - let mut executed_txs = Vec::new(); - let mut best_txs = pool.best_transactions_with_base_fee(base_fee); + let mut total_fees = U256::ZERO; - let mut total_fees = U256::ZERO; + let block_number = initialized_block_env.number.to::(); - let block_number = initialized_block_env.number.to::(); + while let Some(pool_tx) = best_txs.next() { + // ensure we still have capacity for this transaction + if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + // we can't fit this transaction into the block, so we need to mark it as invalid + // which also removes all dependent transaction from the iterator before we can + // continue + best_txs.mark_invalid(&pool_tx); + continue + } - while let Some(pool_tx) = best_txs.next() { - // ensure we still have capacity for this transaction - if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { - // we can't fit this transaction into the block, so we need to mark it as invalid - // which also removes all dependent transaction from the iterator before we can - // continue - best_txs.mark_invalid(&pool_tx); - continue - } + // check if the job was cancelled, if so we can exit early + if cancel.is_cancelled() { + return Ok(BuildOutcome::Cancelled) + } - // check if the job was cancelled, if so we can exit early - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) - } + // convert tx to a signed transaction + let tx = pool_tx.to_recovered_transaction(); - // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); - - // Configure the environment for the block. - let env = Env { - cfg: initialized_cfg.clone(), - block: initialized_block_env.clone(), - tx: tx_env_with_recovered(&tx), - }; - - let mut evm = revm::EVM::with_env(env); - evm.database(&mut db); - - let ResultAndState { result, state } = match evm.transact() { - Ok(res) => res, - Err(err) => { - match err { - EVMError::Transaction(err) => { - if matches!(err, InvalidTransaction::NonceTooLow { .. }) { - // if the nonce is too low, we can skip this transaction - trace!(?err, ?tx, "skipping nonce too low transaction"); - } else { - // if the transaction is invalid, we can skip it and all of its - // descendants - trace!( - ?err, - ?tx, - "skipping invalid transaction and its descendants" - ); - best_txs.mark_invalid(&pool_tx); - } - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(err)) + // Configure the environment for the block. + let env = Env { + cfg: initialized_cfg.clone(), + block: initialized_block_env.clone(), + tx: tx_env_with_recovered(&tx), + }; + + let mut evm = revm::EVM::with_env(env); + evm.database(&mut db); + + let ResultAndState { result, state } = match evm.transact() { + Ok(res) => res, + Err(err) => { + match err { + EVMError::Transaction(err) => { + if matches!(err, InvalidTransaction::NonceTooLow { .. }) { + // if the nonce is too low, we can skip this transaction + trace!(?err, ?tx, "skipping nonce too low transaction"); + } else { + // if the transaction is invalid, we can skip it and all of its + // descendants + trace!(?err, ?tx, "skipping invalid transaction and its descendants"); + best_txs.mark_invalid(&pool_tx); } + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(err)) } } - }; - - let gas_used = result.gas_used(); - - // commit changes - commit_state_changes(&mut db, &mut post_state, block_number, state, true); - - // add gas used by the transaction to cumulative gas used, before creating the receipt - cumulative_gas_used += gas_used; - - // Push transaction changeset and calculate header bloom filter for receipt. - post_state.add_receipt( - block_number, - Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.logs().into_iter().map(into_reth_log).collect(), - }, - ); - - // update add to total fees - let miner_fee = tx - .effective_tip_per_gas(base_fee) - .expect("fee is always valid; execution succeeded"); - total_fees += U256::from(miner_fee) * U256::from(gas_used); - - // append transaction to the list of executed transactions - executed_txs.push(tx.into_signed()); - } + } + }; - // check if we have a better block - if !is_better_payload(best_payload.as_deref(), total_fees) { - // can skip building the block - return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) - } + let gas_used = result.gas_used(); - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &mut post_state, - &chain_spec, - block_number, - attributes.timestamp, - attributes.withdrawals, - )?; - - let receipts_root = post_state.receipts_root(block_number); - let logs_bloom = post_state.logs_bloom(block_number); - - // calculate the state root - let state_root = state.state().state_root(post_state)?; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); - - let header = Header { - parent_hash: parent_block.hash, - ommers_hash: EMPTY_OMMER_ROOT, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: attributes.timestamp, - mix_hash: attributes.prev_randao, - nonce: BEACON_NONCE, - base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - extra_data: extra_data.into(), - blob_gas_used: None, - excess_blob_gas: None, - }; + // commit changes + commit_state_changes(&mut db, &mut post_state, block_number, state, true); - // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; + // add gas used by the transaction to cumulative gas used, before creating the receipt + cumulative_gas_used += gas_used; - let sealed_block = block.seal_slow(); - Ok(BuildOutcome::Better { - payload: BuiltPayload::new(attributes.id, sealed_block, total_fees), - cached_reads, - }) + // Push transaction changeset and calculate header bloom filter for receipt. + post_state.add_receipt( + block_number, + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().into_iter().map(into_reth_log).collect(), + }, + ); + + // update add to total fees + let miner_fee = + tx.effective_tip_per_gas(base_fee).expect("fee is always valid; execution succeeded"); + total_fees += U256::from(miner_fee) * U256::from(gas_used); + + // append transaction to the list of executed transactions + executed_txs.push(tx.into_signed()); } - let _ = to_job.send(try_build(client, pool, cached_reads, config, cancel, best_payload)); + + // check if we have a better block + if !is_better_payload(best_payload.as_deref(), total_fees) { + // can skip building the block + return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) + } + + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &mut post_state, + &chain_spec, + block_number, + attributes.timestamp, + attributes.withdrawals, + )?; + + let receipts_root = post_state.receipts_root(block_number); + let logs_bloom = post_state.logs_bloom(block_number); + + // calculate the state root + let state_root = state.state().state_root(post_state)?; + + // create the block header + let transactions_root = proofs::calculate_transaction_root(&executed_txs); + + let header = Header { + parent_hash: parent_block.hash, + ommers_hash: EMPTY_OMMER_ROOT, + beneficiary: initialized_block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: attributes.timestamp, + mix_hash: attributes.prev_randao, + nonce: BEACON_NONCE, + base_fee_per_gas: Some(base_fee), + number: parent_block.number + 1, + gas_limit: block_gas_limit, + difficulty: U256::ZERO, + gas_used: cumulative_gas_used, + extra_data: extra_data.into(), + blob_gas_used: None, + excess_blob_gas: None, + }; + + // seal the block + let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; + + let sealed_block = block.seal_slow(); + Ok(BuildOutcome::Better { + payload: BuiltPayload::new(attributes.id, sealed_block, total_fees), + cached_reads, + }) +} + +fn build_payload( + builder: impl PayloadBuilder, + args: BuildArguments, + to_job: oneshot::Sender>, +) where + Client: StateProviderFactory, + Pool: TransactionPool, +{ + let result = builder.try_build(args); + let _ = to_job.send(result); } /// Builds an empty payload without any transactions. From b2be35c0ff688feb90188c1f85ca456090cbf066 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 14 Aug 2023 14:09:01 +0100 Subject: [PATCH 424/722] feat(interfaces): database write error details (#4190) --- crates/interfaces/src/db.rs | 32 ++++++++++--- .../db/src/implementation/mdbx/cursor.rs | 47 ++++++++++++++----- .../storage/db/src/implementation/mdbx/mod.rs | 36 ++++++++++++-- .../storage/db/src/implementation/mdbx/tx.rs | 11 ++++- crates/storage/db/src/lib.rs | 2 +- 5 files changed, 104 insertions(+), 24 deletions(-) diff --git a/crates/interfaces/src/db.rs b/crates/interfaces/src/db.rs index 5249a0ed5cc3..e0f44d7276e9 100644 --- a/crates/interfaces/src/db.rs +++ b/crates/interfaces/src/db.rs @@ -1,4 +1,4 @@ -/// Database error type. They are using u32 to represent error code. +/// Database error type. It uses i32 to represent an error code. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] pub enum DatabaseError { /// Failed to open database. @@ -7,13 +7,22 @@ pub enum DatabaseError { /// Failed to create a table in database. #[error("Table Creating error code: {0:?}")] TableCreation(i32), - /// Failed to insert a value into a table. - #[error("Database write error code: {0:?}")] - Write(i32), - /// Failed to get a value into a table. + /// Failed to write a value into a table. + #[error("Database write operation \"{operation:?}\" for key \"{key:?}\" in table \"{table_name}\" ended with error code: {code:?}")] + Write { + /// Database error code + code: i32, + /// Database write operation type + operation: DatabaseWriteOperation, + /// Table name + table_name: &'static str, + /// Write key + key: Box<[u8]>, + }, + /// Failed to read a value from a table. #[error("Database read error code: {0:?}")] Read(i32), - /// Failed to delete a `(key, value)` pair into a table. + /// Failed to delete a `(key, value)` pair from a table. #[error("Database delete error code: {0:?}")] Delete(i32), /// Failed to commit transaction changes into the database. @@ -36,6 +45,17 @@ pub enum DatabaseError { LogLevelUnavailable(LogLevel), } +/// Database write operation type +#[derive(Debug, PartialEq, Eq, Clone)] +#[allow(missing_docs)] +pub enum DatabaseWriteOperation { + CursorAppend, + CursorUpsert, + CursorInsert, + CursorAppendDup, + Put, +} + #[derive(Debug, PartialEq, Eq, Clone, Copy)] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Database log level. diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index ea7cfe43094b..18d8cef1e95a 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -1,5 +1,6 @@ //! Cursor wrapper for libmdbx-sys. +use reth_interfaces::db::DatabaseWriteOperation; use std::{borrow::Cow, collections::Bound, marker::PhantomData, ops::RangeBounds}; use crate::{ @@ -230,24 +231,42 @@ impl<'tx, T: Table> DbCursorRW<'tx, T> for Cursor<'tx, RW, T> { /// to properly upsert, you'll need to `seek_exact` & `delete_current` if the key+subkey was /// found, before calling `upsert`. fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + let key = key.encode(); // Default `WriteFlags` is UPSERT - self.inner - .put(key.encode().as_ref(), compress_or_ref!(self, value), WriteFlags::UPSERT) - .map_err(|e| DatabaseError::Write(e.into())) + self.inner.put(key.as_ref(), compress_or_ref!(self, value), WriteFlags::UPSERT).map_err( + |e| DatabaseError::Write { + code: e.into(), + operation: DatabaseWriteOperation::CursorUpsert, + table_name: T::NAME, + key: Box::from(key.as_ref()), + }, + ) } fn insert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + let key = key.encode(); self.inner - .put(key.encode().as_ref(), compress_or_ref!(self, value), WriteFlags::NO_OVERWRITE) - .map_err(|e| DatabaseError::Write(e.into())) + .put(key.as_ref(), compress_or_ref!(self, value), WriteFlags::NO_OVERWRITE) + .map_err(|e| DatabaseError::Write { + code: e.into(), + operation: DatabaseWriteOperation::CursorInsert, + table_name: T::NAME, + key: Box::from(key.as_ref()), + }) } /// Appends the data to the end of the table. Consequently, the append operation /// will fail if the inserted key is less than the last table key fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { - self.inner - .put(key.encode().as_ref(), compress_or_ref!(self, value), WriteFlags::APPEND) - .map_err(|e| DatabaseError::Write(e.into())) + let key = key.encode(); + self.inner.put(key.as_ref(), compress_or_ref!(self, value), WriteFlags::APPEND).map_err( + |e| DatabaseError::Write { + code: e.into(), + operation: DatabaseWriteOperation::CursorAppend, + table_name: T::NAME, + key: Box::from(key.as_ref()), + }, + ) } fn delete_current(&mut self) -> Result<(), DatabaseError> { @@ -261,8 +280,14 @@ impl<'tx, T: DupSort> DbDupCursorRW<'tx, T> for Cursor<'tx, RW, T> { } fn append_dup(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { - self.inner - .put(key.encode().as_ref(), compress_or_ref!(self, value), WriteFlags::APPEND_DUP) - .map_err(|e| DatabaseError::Write(e.into())) + let key = key.encode(); + self.inner.put(key.as_ref(), compress_or_ref!(self, value), WriteFlags::APPEND_DUP).map_err( + |e| DatabaseError::Write { + code: e.into(), + operation: DatabaseWriteOperation::CursorAppendDup, + table_name: T::NAME, + key: Box::from(key.as_ref()), + }, + ) } } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 9bf544279437..b9cce246938c 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -158,6 +158,7 @@ impl Deref for Env { mod tests { use super::*; use crate::{ + abstraction::table::{Encode, Table}, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, database::Database, models::{AccountBeforeTx, ShardedKey}, @@ -166,6 +167,7 @@ mod tests { transaction::{DbTx, DbTxMut}, AccountChangeSet, DatabaseError, }; + use reth_interfaces::db::DatabaseWriteOperation; use reth_libmdbx::{NoWriteMap, WriteMap}; use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, H160, H256, U256}; use std::{path::Path, str::FromStr, sync::Arc}; @@ -525,7 +527,15 @@ mod tests { assert_eq!(cursor.current(), Ok(Some((key_to_insert, H256::zero())))); // INSERT (failure) - assert_eq!(cursor.insert(key_to_insert, H256::zero()), Err(DatabaseError::Write(-30799))); + assert_eq!( + cursor.insert(key_to_insert, H256::zero()), + Err(DatabaseError::Write { + code: -30799, + operation: DatabaseWriteOperation::CursorInsert, + table_name: CanonicalHeaders::NAME, + key: Box::from(key_to_insert.encode().as_ref()) + }) + ); assert_eq!(cursor.current(), Ok(Some((key_to_insert, H256::zero())))); tx.commit().expect(ERROR_COMMIT); @@ -660,7 +670,15 @@ mod tests { let key_to_append = 2; let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); - assert_eq!(cursor.append(key_to_append, H256::zero()), Err(DatabaseError::Write(-30418))); + assert_eq!( + cursor.append(key_to_append, H256::zero()), + Err(DatabaseError::Write { + code: -30418, + operation: DatabaseWriteOperation::CursorAppend, + table_name: CanonicalHeaders::NAME, + key: Box::from(key_to_append.encode().as_ref()) + }) + ); assert_eq!(cursor.current(), Ok(Some((5, H256::zero())))); // the end of table tx.commit().expect(ERROR_COMMIT); @@ -735,14 +753,24 @@ mod tests { transition_id, AccountBeforeTx { address: Address::from_low_u64_be(subkey_to_append), info: None } ), - Err(DatabaseError::Write(-30418)) + Err(DatabaseError::Write { + code: -30418, + operation: DatabaseWriteOperation::CursorAppendDup, + table_name: AccountChangeSet::NAME, + key: Box::from(transition_id.encode().as_ref()) + }) ); assert_eq!( cursor.append( transition_id - 1, AccountBeforeTx { address: Address::from_low_u64_be(subkey_to_append), info: None } ), - Err(DatabaseError::Write(-30418)) + Err(DatabaseError::Write { + code: -30418, + operation: DatabaseWriteOperation::CursorAppend, + table_name: AccountChangeSet::NAME, + key: Box::from((transition_id - 1).encode().as_ref()) + }) ); assert_eq!( cursor.append( diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 3a49cc04a97a..6e8558726c80 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -8,6 +8,7 @@ use crate::{ DatabaseError, }; use parking_lot::RwLock; +use reth_interfaces::db::DatabaseWriteOperation; use reth_libmdbx::{ffi::DBI, EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW}; use reth_metrics::metrics::{self, histogram}; use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant}; @@ -124,9 +125,15 @@ impl<'tx, K: TransactionKind, E: EnvironmentKind> DbTx<'tx> for Tx<'tx, K, E> { impl DbTxMut<'_> for Tx<'_, RW, E> { fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + let key = key.encode(); self.inner - .put(self.get_dbi::()?, &key.encode(), &value.compress(), WriteFlags::UPSERT) - .map_err(|e| DatabaseError::Write(e.into())) + .put(self.get_dbi::()?, key.as_ref(), &value.compress(), WriteFlags::UPSERT) + .map_err(|e| DatabaseError::Write { + code: e.into(), + operation: DatabaseWriteOperation::Put, + table_name: T::NAME, + key: Box::from(key.as_ref()), + }) } fn delete( diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 76650de0385c..725b2c9dbf6b 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -84,7 +84,7 @@ pub mod mdbx { } pub use abstraction::*; -pub use reth_interfaces::db::DatabaseError; +pub use reth_interfaces::db::{DatabaseError, DatabaseWriteOperation}; pub use tables::*; pub use utils::is_database_empty; From 97404cf26974849ccb6999fa8b916d1bdedae57f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 16:48:59 +0200 Subject: [PATCH 425/722] feat: make build function configurable (#4193) --- crates/payload/basic/src/lib.rs | 88 +++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index a67b49a0bcff..16be06110baa 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -61,7 +61,7 @@ use tracing::{debug, trace}; mod metrics; /// The [PayloadJobGenerator] that creates [BasicPayloadJob]s. -pub struct BasicPayloadJobGenerator { +pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, /// txpool @@ -74,6 +74,10 @@ pub struct BasicPayloadJobGenerator { payload_task_guard: PayloadTaskGuard, /// The chain spec. chain_spec: Arc, + /// The type responsible for building payloads. + /// + /// See [PayloadBuilder] + builder: Builder, } // === impl BasicPayloadJobGenerator === @@ -86,6 +90,20 @@ impl BasicPayloadJobGenerator { executor: Tasks, config: BasicPayloadJobGeneratorConfig, chain_spec: Arc, + ) -> Self { + BasicPayloadJobGenerator::with_builder(client, pool, executor, config, chain_spec, ()) + } +} + +impl BasicPayloadJobGenerator { + /// Creates a new [BasicPayloadJobGenerator] with the given config and custom [PayloadBuilder] + pub fn with_builder( + client: Client, + pool: Pool, + executor: Tasks, + config: BasicPayloadJobGeneratorConfig, + chain_spec: Arc, + builder: Builder, ) -> Self { Self { client, @@ -94,21 +112,22 @@ impl BasicPayloadJobGenerator { payload_task_guard: PayloadTaskGuard::new(config.max_payload_tasks), config, chain_spec, + builder, } } } // === impl BasicPayloadJobGenerator === -impl BasicPayloadJobGenerator {} - -impl PayloadJobGenerator for BasicPayloadJobGenerator +impl PayloadJobGenerator + for BasicPayloadJobGenerator where Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, + Builder: PayloadBuilder + Unpin + 'static, { - type Job = BasicPayloadJob; + type Job = BasicPayloadJob; fn new_payload_job( &self, @@ -158,6 +177,7 @@ where cached_reads: None, payload_task_guard: self.payload_task_guard.clone(), metrics: Default::default(), + builder: self.builder.clone(), }) } } @@ -248,7 +268,7 @@ impl Default for BasicPayloadJobGeneratorConfig { } /// A basic payload job that continuously builds a payload with the best transactions from the pool. -pub struct BasicPayloadJob { +pub struct BasicPayloadJob { /// The configuration for how the payload will be created. config: PayloadConfig, /// The client that can interact with the chain. @@ -274,13 +294,18 @@ pub struct BasicPayloadJob { cached_reads: Option, /// metrics for this type metrics: PayloadBuilderMetrics, + /// The type responsible for building payloads. + /// + /// See [PayloadBuilder] + builder: Builder, } -impl Future for BasicPayloadJob +impl Future for BasicPayloadJob where Client: StateProviderFactory + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, + Builder: PayloadBuilder + Unpin + 'static, { type Output = Result<(), PayloadBuilderError>; @@ -308,21 +333,20 @@ where let best_payload = this.best_payload.clone(); this.metrics.inc_initiated_payload_builds(); let cached_reads = this.cached_reads.take().unwrap_or_default(); + let builder = this.builder.clone(); this.executor.spawn_blocking(Box::pin(async move { // acquire the permit for executing the task let _permit = guard.0.acquire().await; - build_payload( - default_payload_builder, - BuildArguments { - client, - pool, - cached_reads, - config: payload_config, - cancel, - best_payload, - }, - tx, - ) + let args = BuildArguments { + client, + pool, + cached_reads, + config: payload_config, + cancel, + best_payload, + }; + let result = builder.try_build(args); + let _ = tx.send(result); })); this.pending_block = Some(PendingPayload { _cancel, payload: rx }); } @@ -364,11 +388,12 @@ where } } -impl PayloadJob for BasicPayloadJob +impl PayloadJob for BasicPayloadJob where Client: StateProviderFactory + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, + Builder: PayloadBuilder + Unpin + 'static, { type ResolvePayloadFuture = ResolveBestPayload; @@ -562,7 +587,7 @@ pub struct BuildArguments { /// /// Generic parameters `Pool` and `Client` represent the transaction pool and /// Ethereum client types. -pub trait PayloadBuilder { +pub trait PayloadBuilder: Send + Sync + Clone { /// Tries to build a transaction payload using provided arguments. /// /// Constructs a transaction payload based on the given arguments, @@ -581,15 +606,17 @@ pub trait PayloadBuilder { ) -> Result; } -impl PayloadBuilder for F +// Default implementation of [PayloadBuilder] for unit type +impl PayloadBuilder for () where - F: Fn(BuildArguments) -> Result, + Client: StateProviderFactory, + Pool: TransactionPool, { fn try_build( &self, args: BuildArguments, ) -> Result { - self(args) + default_payload_builder(args) } } @@ -598,6 +625,7 @@ where /// Given build arguments including an Ethereum client, transaction pool, /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. +#[inline] fn default_payload_builder( args: BuildArguments, ) -> Result @@ -769,18 +797,6 @@ where }) } -fn build_payload( - builder: impl PayloadBuilder, - args: BuildArguments, - to_job: oneshot::Sender>, -) where - Client: StateProviderFactory, - Pool: TransactionPool, -{ - let result = builder.try_build(args); - let _ = to_job.send(result); -} - /// Builds an empty payload without any transactions. fn build_empty_payload( client: &Client, From 6eb170f8d78767bbc1229b38dab3a94c716eabd8 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 14 Aug 2023 18:58:06 +0200 Subject: [PATCH 426/722] feat(doc): document ARM limitations (#4196) --- book/installation/build-for-arm-devices.md | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/book/installation/build-for-arm-devices.md b/book/installation/build-for-arm-devices.md index ac95f16939f8..94da76eaea68 100644 --- a/book/installation/build-for-arm-devices.md +++ b/book/installation/build-for-arm-devices.md @@ -11,6 +11,35 @@ First, you must have a 64-bit CPU and Operating System, otherwise some of the pr Then, you must setup the virtual memory layout in such a way that the user space is sufficiently large. From [the Linux Kernel documentation](https://www.kernel.org/doc/html/v5.3/arm64/memory.html#:~:text=AArch64%20Linux%20uses%20either%203,for%20both%20user%20and%20kernel.), you can see that the memory layout with 4KB pages and a level-3 translation table limits the user space to 512GB, which is too low for Reth to sync on Ethereum mainnet. +## ARM Board Virtual Memory Limitation + +### Issue Description + +Some ARM boards are equipped with only 3-level paging, which imposes a virtual memory limitation of 256GB for user space on Linux. This limitation can be a challenge for running applications like "reth", as the MDBX (Memory-mapped Database eXtreme) library requires a larger virtual memory allocation by design. + +### Understanding the Limitation + +To determine if a specific ARM board is affected by this virtual memory limitation: + +1. **Check Specifications:** When considering an ARM board, review its specifications for information on paging levels. Boards with 3-level paging may have a 256GB virtual memory limit. + +2. **Manufacturer Documentation:** Consult the official ARM board documentation for details on supported paging levels. + +3. **Community Discussions:** Search online ARM and Linux forums for insights into virtual memory limitations of specific boards. + +### Additional Context + +According to MDBX documentation, changing this upper bound, which dictates the maximum size the database can reach, is a costly operation. Therefore, a reasonably large value was chosen. Given that the upper bound is currently set to 4TB, the assumption was that growth to 3TB might occur relatively soon. If the upper bound size is set to only 342GB, then "reth" cannot store more than 342GB of data, which is insufficient for a full sync. + +It's worth noting that on x86_64 architecture, there is a 48-bit address space divided in half between user space and the kernel, providing each with 128TB of address space. In contrast, AArch64 architecture features a user space address space of 512GB and a kernel address space of 256TB. + +Some newer versions of ARM architecture offer support for Large Virtual Address space, but enabling this requires running with a 64KB page size. The specifics of how to enable this functionality might vary. + +### Additional Resources + +- [ARM developer documentation](https://developer.arm.com/documentation/ddi0406/cb/Appendixes/ARMv4-and-ARMv5-Differences/System-level-memory-model/Virtual-memory-support) +- [ARM Community Forums](https://community.arm.com) + ## Build Reth If both your CPU architecture and the memory layout are valid, the instructions for building Reth will not differ from [the standard process](https://paradigmxyz.github.io/reth/installation/source.html). From 3c41e9ffc7266707f44ea8247602f15e5dae9e13 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 19:26:39 +0200 Subject: [PATCH 427/722] fix: return engine on fatal error (#4184) --- crates/consensus/beacon/src/engine/mod.rs | 39 +++++++++++++++++++---- crates/interfaces/src/executor.rs | 2 ++ 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index faab247c422d..942d2fe6d0c3 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -551,15 +551,23 @@ where state: ForkchoiceState, attrs: Option, tx: oneshot::Sender>, - ) -> bool { + ) -> OnForkchoiceUpdateOutcome { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); let on_updated = match self.forkchoice_updated(state, attrs) { Ok(response) => response, Err(error) => { + if let Error::Execution(ref err) = error { + if err.is_fatal() { + // FCU resulted in a fatal error from which we can't recover + let err = err.clone(); + let _ = tx.send(Err(error)); + return OnForkchoiceUpdateOutcome::Fatal(err.clone()) + } + } let _ = tx.send(Err(error)); - return false + return OnForkchoiceUpdateOutcome::Processed } }; @@ -583,11 +591,11 @@ where // check if we reached the maximum configured block let tip_number = self.blockchain.canonical_tip().number; if self.sync.has_reached_max_block(tip_number) { - return true + return OnForkchoiceUpdateOutcome::ReachedMaxBlock } } - false + OnForkchoiceUpdateOutcome::Processed } /// Called to resolve chain forks and ensure that the Execution layer is working with the latest @@ -1700,8 +1708,16 @@ where match this.engine_message_rx.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - if this.on_forkchoice_updated(state, payload_attrs, tx) { - return Poll::Ready(Ok(())) + match this.on_forkchoice_updated(state, payload_attrs, tx) { + OnForkchoiceUpdateOutcome::Processed => {} + OnForkchoiceUpdateOutcome::ReachedMaxBlock => { + // reached the max block, we can terminate the future + return Poll::Ready(Ok(())) + } + OnForkchoiceUpdateOutcome::Fatal(err) => { + // fatal error, we can terminate the future + return Poll::Ready(Err(Error::Execution(err).into())) + } } } BeaconEngineMessage::NewPayload { payload, tx } => { @@ -1763,6 +1779,17 @@ where } } +/// Represents all outcomes of an applied fork choice update. +#[derive(Debug)] +enum OnForkchoiceUpdateOutcome { + /// FCU was processed successfully. + Processed, + /// FCU was processed successfully and reached max block. + ReachedMaxBlock, + /// FCU resulted in a __fatal__ block execution error from which we can't recover. + Fatal(BlockExecutionError), +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index cc747e29fdbc..f57d26fb58d4 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -61,6 +61,8 @@ pub enum BlockExecutionError { impl BlockExecutionError { /// Returns `true` if the error is fatal. + /// + /// This represents an unrecoverable database related error. pub fn is_fatal(&self) -> bool { matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. }) } From 9a80f32a66e3fae04a07847ab1041ea849bbf17b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Aug 2023 19:58:29 +0200 Subject: [PATCH 428/722] chore(clippy): make clippy happy (#4163) --- crates/transaction-pool/benches/reorder.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/benches/reorder.rs b/crates/transaction-pool/benches/reorder.rs index 129f1c17ff56..b50598e2992c 100644 --- a/crates/transaction-pool/benches/reorder.rs +++ b/crates/transaction-pool/benches/reorder.rs @@ -1,5 +1,5 @@ use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, + criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; use proptest::{ prelude::*, @@ -78,7 +78,7 @@ fn txpool_reordering_bench( ); group.bench_function(group_id, |b| { b.iter_with_setup(setup, |(mut txpool, new_txs)| { - black_box({ + { // Reorder with new base fee let bigger_base_fee = base_fee.saturating_add(10); txpool.reorder(bigger_base_fee); @@ -88,8 +88,9 @@ fn txpool_reordering_bench( txpool.add_transaction(new_tx); } let smaller_base_fee = base_fee.saturating_sub(10); - txpool.reorder(smaller_base_fee); - }) + txpool.reorder(smaller_base_fee) + }; + std::hint::black_box(()); }); }); } @@ -177,7 +178,7 @@ mod implementations { impl PartialOrd for MockTransactionWithPriority { fn partial_cmp(&self, other: &Self) -> Option { - self.priority.partial_cmp(&other.priority) + Some(self.cmp(other)) } } From 8c56cc2f88504b548705c50f6bc4455d1c1a7280 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 15 Aug 2023 14:25:15 +0300 Subject: [PATCH 429/722] chore(txpool): bench requires arbitrary feature (#4204) --- crates/transaction-pool/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 5cf13b257542..ff7e1ee799f5 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -62,5 +62,5 @@ arbitrary = ["proptest", "reth-primitives/arbitrary"] [[bench]] name = "reorder" -required-features = ["test-utils"] +required-features = ["test-utils", "arbitrary"] harness = false From 92361ad77f2b1b49c845268ea182d6a4144dcd34 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Aug 2023 14:14:21 +0200 Subject: [PATCH 430/722] perf: let sync status mirror FCU (#4203) --- crates/consensus/beacon/src/engine/message.rs | 5 -- crates/consensus/beacon/src/engine/mod.rs | 51 ++++++++++--------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 76808558595f..f894603ab4b4 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -34,11 +34,6 @@ pub struct OnForkChoiceUpdated { // === impl OnForkChoiceUpdated === impl OnForkChoiceUpdated { - /// Returns true if this update is valid - pub(crate) fn is_valid_update(&self) -> bool { - self.forkchoice_status.is_valid() - } - /// Returns the determined status of the received ForkchoiceState. pub fn forkchoice_status(&self) -> ForkchoiceStatus { self.forkchoice_status diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 942d2fe6d0c3..6f6dfb35dd50 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -571,30 +571,39 @@ where } }; - let status = on_updated.forkchoice_status(); + let fcu_status = on_updated.forkchoice_status(); // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, status); + self.forkchoice_state_tracker.set_latest(state, fcu_status); - let is_valid_response = on_updated.is_valid_update(); + // send the response to the CL ASAP let _ = tx.send(Ok(on_updated)); - // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); - - // Terminate the sync early if it's reached the maximum user - // configured block. - if is_valid_response { - // node's fully synced, clear active download requests - self.sync.clear_block_download_requests(); - - // check if we reached the maximum configured block - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - return OnForkchoiceUpdateOutcome::ReachedMaxBlock + match fcu_status { + ForkchoiceStatus::Invalid => {} + ForkchoiceStatus::Valid => { + // FCU head is valid, we're no longer syncing + self.sync_state_updater.update_sync_state(SyncState::Idle); + // node's fully synced, clear active download requests + self.sync.clear_block_download_requests(); + + // check if we reached the maximum configured block + let tip_number = self.blockchain.canonical_tip().number; + if self.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached the maximum user + // configured block. + return OnForkchoiceUpdateOutcome::ReachedMaxBlock + } + } + ForkchoiceStatus::Syncing => { + // we're syncing + self.sync_state_updater.update_sync_state(SyncState::Syncing); } } + // notify listeners about new processed FCU + self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, fcu_status)); + OnForkchoiceUpdateOutcome::Processed } @@ -1375,8 +1384,8 @@ where /// Attempt to form a new canonical chain based on the current sync target. /// - /// This is invoked when we successfully downloaded a new block from the network which resulted - /// in either [BlockStatus::Accepted] or [BlockStatus::Valid]. + /// This is invoked when we successfully __downloaded__ a new block from the network which + /// resulted in either [BlockStatus::Accepted] or [BlockStatus::Valid]. /// /// Note: This will not succeed if the sync target has changed since the block download request /// was issued and the new target is still disconnected and additional missing blocks are @@ -1569,10 +1578,7 @@ where sync_target_state.finalized_block_hash, ) { Ok(synced) => { - if synced { - // we're consider this synced and transition to live sync - self.sync_state_updater.update_sync_state(SyncState::Idle); - } else { + if !synced { // We don't have the finalized block in the database, so // we need to run another pipeline. self.sync.set_pipeline_sync_target( @@ -1618,7 +1624,6 @@ where } EnginePruneEvent::Finished { result } => { trace!(target: "consensus::engine", ?result, "Pruner finished"); - self.sync_state_updater.update_sync_state(SyncState::Idle); match result { Ok(_) => { // Update the state and hashes of the blockchain tree if possible. From 0846d855133214b63e4e8aa9d53446154c4594ca Mon Sep 17 00:00:00 2001 From: Aditya Pandey Date: Tue, 15 Aug 2023 17:59:28 +0530 Subject: [PATCH 431/722] Splitting Reth Node Command Ext (#4158) Co-authored-by: Matthias Seitz --- bin/reth/src/args/rpc_server_args.rs | 10 +- bin/reth/src/cli/ext.rs | 126 +++++++++++++++++- bin/reth/src/node/mod.rs | 2 +- .../src/main.rs | 4 +- 4 files changed, 129 insertions(+), 13 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 0d2cd9bd6620..3b2c463788c3 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -2,7 +2,7 @@ use crate::{ args::GasPriceOracleArgs, - cli::{config::RethRpcConfig, ext::RethNodeCommandExt}, + cli::{config::RethRpcConfig, ext::RethNodeCommandConfig}, }; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -223,7 +223,7 @@ impl RpcServerArgs { /// for the auth server that handles the `engine_` API that's accessed by the consensus /// layer. #[allow(clippy::too_many_arguments)] - pub async fn start_servers( + pub async fn start_servers( &self, provider: Provider, pool: Pool, @@ -232,7 +232,7 @@ impl RpcServerArgs { events: Events, engine_api: Engine, jwt_secret: JwtSecret, - ext: &mut Ext, + conf: &mut Conf, ) -> eyre::Result<(RpcServerHandle, AuthServerHandle)> where Provider: BlockReaderIdExt @@ -249,7 +249,7 @@ impl RpcServerArgs { Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, Engine: EngineApiServer, - Ext: RethNodeCommandExt, + Conf: RethNodeCommandConfig, { let auth_config = self.auth_server_config(jwt_secret)?; @@ -265,7 +265,7 @@ impl RpcServerArgs { .build_with_auth_server(module_config, engine_api); // apply configured customization - ext.extend_rpc_modules(self, &mut registry, &mut rpc_modules)?; + conf.extend_rpc_modules(self, &mut registry, &mut rpc_modules)?; let server_config = self.rpc_server_config(); let launch_rpc = rpc_modules.start_server(server_config).map_ok(|handle| { diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index 03350e624287..663328cda025 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -23,6 +23,8 @@ pub trait RethCliExt { /// Provides additional configuration for the node CLI command. /// /// This supports additional CLI arguments that can be used to modify the node configuration. + /// + /// If no additional CLI arguments are required, the [NoArgs] wrapper type can be used. type Node: RethNodeCommandExt; } @@ -31,8 +33,9 @@ impl RethCliExt for () { type Node = DefaultRethNodeCommandConfig; } -/// A trait that allows for extending parts of the CLI with additional functionality. -pub trait RethNodeCommandExt: fmt::Debug + clap::Args { +/// A trait that allows for extending and customizing parts of the node command +/// [NodeCommand](crate::node::NodeCommand). +pub trait RethNodeCommandConfig: fmt::Debug { /// Allows for registering additional RPC modules for the transports. /// /// This is expected to call the merge functions of [TransportRpcModules], for example @@ -98,12 +101,125 @@ pub trait RethNodeCommandExt: fmt::Debug + clap::Args { Ok(payload_builder) } - - // TODO move network related functions here } +/// A trait that allows for extending parts of the CLI with additional functionality. +pub trait RethNodeCommandExt: RethNodeCommandConfig + fmt::Debug + clap::Args {} + +// blanket impl for all types that implement the required traits. +impl RethNodeCommandExt for T where T: RethNodeCommandConfig + fmt::Debug + clap::Args {} + /// The default configuration for the reth node command [Command](crate::node::NodeCommand). +/// +/// This is a convenience type for [NoArgs<()>]. #[derive(Debug, Clone, Copy, Default, Args)] pub struct DefaultRethNodeCommandConfig; -impl RethNodeCommandExt for DefaultRethNodeCommandConfig {} +impl RethNodeCommandConfig for DefaultRethNodeCommandConfig {} + +impl RethNodeCommandConfig for () {} + +/// A helper struct that allows for wrapping a [RethNodeCommandConfig] value without providing +/// additional CLI arguments. +/// +/// Note: This type must be manually filled with a [RethNodeCommandConfig] manually before executing +/// the [NodeCommand](crate::node::NodeCommand). +#[derive(Debug, Clone, Copy, Default, Args)] +pub struct NoArgs { + #[clap(skip)] + inner: Option, +} + +impl NoArgs { + /// Creates a new instance of the wrapper type. + pub fn with(inner: T) -> Self { + Self { inner: Some(inner) } + } + + /// Sets the inner value. + pub fn set(&mut self, inner: T) { + self.inner = Some(inner) + } + + /// Transforms the configured value. + pub fn map(self, inner: U) -> NoArgs { + NoArgs::with(inner) + } + + /// Returns the inner value if it exists. + pub fn inner(&self) -> Option<&T> { + self.inner.as_ref() + } + + /// Returns a mutable reference to the inner value if it exists. + pub fn inner_mut(&mut self) -> Option<&mut T> { + self.inner.as_mut() + } + + /// Consumes the wrapper and returns the inner value if it exists. + pub fn into_inner(self) -> Option { + self.inner + } +} + +impl RethNodeCommandConfig for NoArgs { + fn extend_rpc_modules( + &mut self, + config: &Conf, + registry: &mut RethModuleRegistry, + modules: &mut TransportRpcModules<()>, + ) -> eyre::Result<()> + where + Conf: RethRpcConfig, + Provider: BlockReaderIdExt + + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + ChangeSetReader + + Clone + + Unpin + + 'static, + Pool: TransactionPool + Clone + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, + { + if let Some(conf) = self.inner_mut() { + conf.extend_rpc_modules(config, registry, modules) + } else { + Ok(()) + } + } + + fn spawn_payload_builder_service( + &mut self, + conf: &Conf, + provider: Provider, + pool: Pool, + executor: Tasks, + chain_spec: Arc, + ) -> eyre::Result + where + Conf: PayloadBuilderConfig, + Provider: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Pool: TransactionPool + Unpin + 'static, + Tasks: TaskSpawner + Clone + Unpin + 'static, + { + self.inner_mut() + .ok_or_else(|| eyre::eyre!("config value must be set"))? + .spawn_payload_builder_service(conf, provider, pool, executor, chain_spec) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn assert_ext() {} + + #[test] + fn ensure_ext() { + assert_ext::(); + assert_ext::>(); + } +} diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 83e6cd1c86dc..0fd37caac6b0 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -8,7 +8,7 @@ use crate::{ DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, - cli::ext::{RethCliExt, RethNodeCommandExt}, + cli::ext::{RethCliExt, RethNodeCommandConfig}, dirs::{DataDirPath, MaybePlatformPath}, init::init_genesis, node::cl_events::ConsensusLayerHealthEvents, diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/additional-rpc-namespace-in-cli/src/main.rs index 817e6ddae629..e929fc5a7ec9 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/additional-rpc-namespace-in-cli/src/main.rs @@ -16,7 +16,7 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth::{ cli::{ config::RethRpcConfig, - ext::{RethCliExt, RethNodeCommandExt}, + ext::{RethCliExt, RethNodeCommandConfig}, Cli, }, network::{NetworkInfo, Peers}, @@ -49,7 +49,7 @@ struct RethCliTxpoolExt { pub enable_ext: bool, } -impl RethNodeCommandExt for RethCliTxpoolExt { +impl RethNodeCommandConfig for RethCliTxpoolExt { // This is the entrypoint for the CLI to extend the RPC server with custom rpc namespaces. fn extend_rpc_modules( &mut self, From 0840acd87b4c784542d9200a050d79194c4e5031 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Tue, 15 Aug 2023 14:38:59 +0200 Subject: [PATCH 432/722] docs: update table layout (#4194) --- docs/design/database.md | 89 +++++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 22 deletions(-) diff --git a/docs/design/database.md b/docs/design/database.md index b6878c84954c..eae89bf9d011 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -21,9 +21,7 @@ * Passthrough (called `no_codec` in the codebase) * We made implementation of these traits easy via a derive macro called [`main_codec`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/lib.rs#L15) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Bmain_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. - - -# Table design +### Table layout Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod). @@ -48,22 +46,43 @@ BlockBodyIndices { u64 first_tx_num u64 tx_count } -Receipts { - u64 TxNumber "PK" - Receipt Data +BlockOmmers { + u64 BlockNumber "PK" + Header[] Ommers +} +BlockWithdrawals { + u64 BlockNumber "PK" + Withdrawal[] Withdrawals } Transactions { u64 TxNumber "PK" TransactionSignedNoHash Data } -TransactionHash { +TxHashNumber { H256 TxHash "PK" u64 TxNumber } TransactionBlock { - u64 TxNumber "PK" + u64 MaxTxNumber "PK" u64 BlockNumber } +Receipts { + u64 TxNumber "PK" + Receipt Data +} +Bytecodes { + H256 CodeHash "PK" + Bytes Code +} +PlainAccountState { + Address Account "PK" + Account Data +} +PlainStorageState { + Address Account "PK" + H256 StorageKey "PK" + U256 StorageValue +} AccountHistory { H256 Account "PK" BlockNumberList BlockNumberList "List of transitions where account was changed" @@ -84,18 +103,44 @@ StorageChangeSet { H256 StorageKey "PK" ChangeSet StorageChangeSet "Storage entry before transition" } -EVM ||--o{ AccountHistory: "Load Account by first greater BlockNumber" -EVM ||--o{ StorageHistory: "Load Storage Entry by first greater BlockNumber" -TransactionHash ||--o{ Transactions : index -Transactions ||--o{ TransactionBlock : index -BlockBodyIndices ||--o{ TransactionBlock : "index" -TransactionBlock ||--o{ BlockBodyIndices : "index" -Headers ||--o{ AccountChangeSet : "unique index" -AccountHistory ||--o{ AccountChangeSet : index -StorageHistory ||--o{ StorageChangeSet : index -Headers ||--o{ StorageChangeSet : "unique index" -BlockBodyIndices ||--o{ Headers : "index" -Headers ||--o{ HeaderNumbers : "Calculate hash from header" -CanonicalHeaders ||--o{ Headers : "index" -Transactions ||--o{ Receipts : index +HashedAccount { + H256 HashedAddress "PK" + Account Data +} +HashedStorage { + H256 HashedAddress "PK" + H256 HashedStorageKey "PK" + U256 StorageValue +} +AccountsTrie { + StoredNibbles Nibbles "PK" + BranchNodeCompact Node +} +StoragesTrie { + H256 HashedAddress "PK" + StoredNibblesSubKey NibblesSubKey "PK" + StorageTrieEntry Node +} +TxSenders { + u64 TxNumber "PK" + Address Sender +} +TxHashNumber ||--|| Transactions : "hash -> tx id" +TransactionBlock ||--|{ Transactions : "tx id -> block number" +BlockBodyIndices ||--o{ Transactions : "block number -> tx ids" +Headers ||--o{ AccountChangeSet : "each block has zero or more changesets" +Headers ||--o{ StorageChangeSet : "each block has zero or more changesets" +AccountHistory }|--|{ AccountChangeSet : index +StorageHistory }|--|{ StorageChangeSet : index +Headers ||--o| BlockOmmers : "each block has 0 or more ommers" +BlockBodyIndices ||--|| Headers : "index" +HeaderNumbers |o--|| Headers : "block hash -> block number" +CanonicalHeaders |o--|| Headers : "canonical chain block number -> block hash" +Transactions ||--|| Receipts : "each tx has a receipt" +PlainAccountState }o--o| Bytecodes : "an account can have a bytecode" +PlainAccountState ||--o{ PlainStorageState : "an account has 0 or more storage slots" +Transactions ||--|| TxSenders : "a tx has exactly 1 sender" + +PlainAccountState ||--|| HashedAccount : "hashed representation" +PlainStorageState ||--|| HashedStorage : "hashed representation" ``` From bdcc56bbf667d562da2715f232a2e70da36250c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Aug 2023 16:20:36 +0200 Subject: [PATCH 433/722] chore: rm some unused (#4208) --- crates/storage/provider/src/providers/chain_info.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/storage/provider/src/providers/chain_info.rs b/crates/storage/provider/src/providers/chain_info.rs index b15db2af4d21..2532ad866deb 100644 --- a/crates/storage/provider/src/providers/chain_info.rs +++ b/crates/storage/provider/src/providers/chain_info.rs @@ -56,19 +56,16 @@ impl ChainInfoTracker { } /// Returns the canonical head of the chain. - #[allow(unused)] pub(crate) fn get_canonical_head(&self) -> SealedHeader { self.inner.canonical_head.read().clone() } /// Returns the safe header of the chain. - #[allow(unused)] pub(crate) fn get_safe_header(&self) -> Option { self.inner.safe_block.read().clone() } /// Returns the finalized header of the chain. - #[allow(unused)] pub(crate) fn get_finalized_header(&self) -> Option { self.inner.finalized_block.read().clone() } From 7f83d0a05a05c4d536582c18127b5837e745e484 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Aug 2023 18:29:53 +0200 Subject: [PATCH 434/722] chore: handle full peer request channel (#4211) --- crates/net/network/src/metrics.rs | 2 ++ crates/net/network/src/transactions.rs | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 43c3b03f481d..9c527ea01f0a 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -67,6 +67,8 @@ pub struct TransactionsManagerMetrics { pub(crate) messages_with_already_seen_transactions: Counter, /// Number of transactions about to be imported into the pool. pub(crate) pending_pool_imports: Gauge, + /// How often we failed to send a request to the peer because the channel was full. + pub(crate) egress_peer_channel_full: Counter, } /// Metrics for Disconnection types diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 87850f1e4f51..6199b46ecc85 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -341,6 +341,10 @@ where if peer.request_tx.try_send(req).is_ok() { self.inflight_requests.push(GetPooledTxRequest { peer_id, response: rx }) + } else { + // peer channel is saturated, drop the request + self.metrics.egress_peer_channel_full.increment(1); + return } if num_already_seen > 0 { From 981873dca9b81e7c0c255d3953cb9fb90de56a99 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Aug 2023 18:36:39 +0200 Subject: [PATCH 435/722] chore: enforce recommended soft limit for get pooled requests (#4210) --- crates/net/network/src/transactions.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 6199b46ecc85..b67675a6f022 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -47,6 +47,11 @@ const NEW_POOLED_TRANSACTION_HASHES_SOFT_LIMIT: usize = 4096; /// The target size for the message of full transactions. const MAX_FULL_TRANSACTIONS_PACKET_SIZE: usize = 100 * 1024; +/// Recommended soft limit for the number of hashes in a GetPooledTransactions message (8kb) +/// +/// +const GET_POOLED_TRANSACTION_SOFT_LIMIT_NUM_HASHES: usize = 256; + /// The future for inserting a function into the pool pub type PoolImportFuture = Pin> + Send + 'static>>; @@ -332,6 +337,10 @@ where return } + // enforce recommended soft limit, however the peer may enforce an arbitrary limit on + // the response (2MB) + hashes.truncate(GET_POOLED_TRANSACTION_SOFT_LIMIT_NUM_HASHES); + // request the missing transactions let (response, rx) = oneshot::channel(); let req = PeerRequest::GetPooledTransactions { From 90d2a00a2da0b9baf9c5bdb95420ab933148cb66 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 15 Aug 2023 18:19:52 +0100 Subject: [PATCH 436/722] fix(pruner): percentage progress (#4197) --- crates/prune/src/pruner.rs | 8 ++------ .../storage/provider/src/providers/database/provider.rs | 6 +++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index dda0304d332b..e39cd5b4c29d 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -240,16 +240,14 @@ impl Pruner { }; let total = range.clone().count(); - let mut processed = 0; provider.prune_table_with_iterator_in_batches::( range, self.batch_sizes.receipts, |rows| { - processed += rows; trace!( target: "pruner", %rows, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), "Pruned receipts" ); }, @@ -348,16 +346,14 @@ impl Pruner { }; let total = range.clone().count(); - let mut processed = 0; provider.prune_table_with_range_in_batches::( range, self.batch_sizes.transaction_senders, |rows, _| { - processed += rows; trace!( target: "pruner", %rows, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), "Pruned transaction senders" ); }, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 2f122fbe2378..ba1474afd6db 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -630,7 +630,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { } /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after - /// every `batch_size` pruned rows. + /// every `batch_size` pruned rows with number of total rows pruned. /// /// Returns number of rows pruned. pub fn prune_table_with_iterator_in_batches( @@ -649,12 +649,12 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { deleted += 1; if deleted % batch_size == 0 { - batch_callback(batch_size); + batch_callback(deleted); } } if deleted % batch_size != 0 { - batch_callback(deleted % batch_size); + batch_callback(deleted); } Ok(deleted) From c743acde77cf86e2147499fe0fb69de3a1d664ab Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:28:30 +0100 Subject: [PATCH 437/722] feat(db): add search to `reth db list` command (#4165) --- Cargo.lock | 16 +++ bin/reth/Cargo.toml | 1 + bin/reth/src/db/list.rs | 59 +++++++++-- bin/reth/src/db/tui.rs | 18 ++-- bin/reth/src/init.rs | 4 +- bin/reth/src/test_vectors/tables.rs | 6 +- bin/reth/src/utils.rs | 104 +++++++++++++++++--- crates/stages/src/test_utils/test_db.rs | 6 +- crates/storage/db/benches/hash_keys.rs | 6 +- crates/storage/db/benches/utils.rs | 2 +- crates/storage/db/src/abstraction/cursor.rs | 10 +- crates/storage/db/src/abstraction/table.rs | 3 + crates/storage/db/src/tables/mod.rs | 2 +- crates/storage/db/src/tables/raw.rs | 17 +++- crates/storage/db/src/tables/utils.rs | 4 +- 15 files changed, 206 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2c6efefd626..ceac36ad042b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -846,6 +846,15 @@ name = "boa_profiler" version = "0.17.0" source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +[[package]] +name = "boyer-moore-magiclen" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c77eb6b3a37f71fcd40e49b56c028ea8795c0e550afd8021e3e6a2369653035" +dependencies = [ + "debug-helper", +] + [[package]] name = "brotli" version = "3.3.4" @@ -1604,6 +1613,12 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "debug-helper" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f578e8e2c440e7297e008bb5486a3a8a194775224bbc23729b0dbdfaeebf162e" + [[package]] name = "debugid" version = "0.8.0" @@ -5202,6 +5217,7 @@ name = "reth" version = "0.1.0-alpha.6" dependencies = [ "backon", + "boyer-moore-magiclen", "clap", "comfy-table", "confy", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 6f636cbb5143..24e35172428a 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -89,6 +89,7 @@ thiserror.workspace = true pretty_assertions = "1.3.0" humantime = "2.1.0" const-str = "0.5.6" +boyer-moore-magiclen = "0.2.16" [target.'cfg(not(windows))'.dependencies] jemallocator = { version = "0.5.0", optional = true } diff --git a/bin/reth/src/db/list.rs b/bin/reth/src/db/list.rs index b7fe572ccc42..4fc1fb7cad65 100644 --- a/bin/reth/src/db/list.rs +++ b/bin/reth/src/db/list.rs @@ -1,9 +1,9 @@ -use crate::utils::DbTool; -use clap::Parser; - use super::tui::DbListTUI; +use crate::utils::{DbTool, ListFilter}; +use clap::Parser; use eyre::WrapErr; use reth_db::{database::Database, table::Table, DatabaseEnvRO, TableType, TableViewer, Tables}; +use std::cell::RefCell; use tracing::error; const DEFAULT_NUM_ITEMS: &str = "5"; @@ -22,6 +22,16 @@ pub struct Command { /// How many items to take from the walker #[arg(long, short, default_value = DEFAULT_NUM_ITEMS)] len: usize, + /// Search parameter for both keys and values. Prefix it with `0x` to search for binary data, + /// and text otherwise. + /// + /// ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be + /// missing results since the search uses the raw uncompressed value from the database. + #[arg(long)] + search: Option, + /// Returns the number of rows found. + #[arg(long, short)] + count: bool, /// Dump as JSON instead of using TUI. #[arg(long, short)] json: bool, @@ -38,6 +48,28 @@ impl Command { Ok(()) } + + /// Generate [`ListFilter`] from command. + pub fn list_filter(&self) -> ListFilter { + let search = self + .search + .as_ref() + .map(|search| { + if let Some(search) = search.strip_prefix("0x") { + return hex::decode(search).unwrap() + } + search.as_bytes().to_vec() + }) + .unwrap_or_default(); + + ListFilter { + skip: self.skip, + len: self.len, + search, + reverse: self.reverse, + only_count: self.count, + } + } } struct ListTableViewer<'a> { @@ -64,13 +96,24 @@ impl TableViewer<()> for ListTableViewer<'_> { return Ok(()); } - if self.args.json { - let list_result = self.tool.list::(self.args.skip, self.args.len, self.args.reverse)?.into_iter().collect::>(); - println!("{}", serde_json::to_string_pretty(&list_result)?); + + let list_filter = self.args.list_filter(); + + if self.args.json || self.args.count { + let (list, count) = self.tool.list::(&list_filter)?; + + if self.args.count { + println!("{count} entries found.") + }else { + println!("{}", serde_json::to_string_pretty(&list)?); + } Ok(()) + } else { - DbListTUI::<_, T>::new(|skip, count| { - self.tool.list::(skip, count, self.args.reverse).unwrap() + let list_filter = RefCell::new(list_filter); + DbListTUI::<_, T>::new(|skip, len| { + list_filter.borrow_mut().update_page(skip, len); + self.tool.list::(&list_filter.borrow()).unwrap().0 }, self.args.skip, self.args.len, total_entries).run() } })??; diff --git a/bin/reth/src/db/tui.rs b/bin/reth/src/db/tui.rs index 36072a039e6f..0f23f26110f0 100644 --- a/bin/reth/src/db/tui.rs +++ b/bin/reth/src/db/tui.rs @@ -3,7 +3,7 @@ use crossterm::{ execute, terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, }; -use reth_db::table::Table; +use reth_db::table::{Table, TableRow}; use std::{ io, time::{Duration, Instant}, @@ -45,7 +45,7 @@ pub(crate) enum ViewMode { #[derive(Default)] pub(crate) struct DbListTUI where - F: FnMut(usize, usize) -> Vec<(T::Key, T::Value)>, + F: FnMut(usize, usize) -> Vec>, { /// Fetcher for the next page of items. /// @@ -65,12 +65,12 @@ where /// The state of the key list. list_state: ListState, /// Entries to show in the TUI. - entries: Vec<(T::Key, T::Value)>, + entries: Vec>, } impl DbListTUI where - F: FnMut(usize, usize) -> Vec<(T::Key, T::Value)>, + F: FnMut(usize, usize) -> Vec>, { /// Create a new database list TUI pub(crate) fn new(fetch: F, skip: usize, count: usize, total_entries: usize) -> Self { @@ -188,7 +188,7 @@ fn event_loop( tick_rate: Duration, ) -> io::Result<()> where - F: FnMut(usize, usize) -> Vec<(T::Key, T::Value)>, + F: FnMut(usize, usize) -> Vec>, { let mut last_tick = Instant::now(); let mut running = true; @@ -216,7 +216,7 @@ where /// Handle incoming events fn handle_event(app: &mut DbListTUI, event: Event) -> io::Result where - F: FnMut(usize, usize) -> Vec<(T::Key, T::Value)>, + F: FnMut(usize, usize) -> Vec>, { if app.mode == ViewMode::GoToPage { if let Event::Key(key) = event { @@ -282,7 +282,7 @@ where /// Render the UI fn ui(f: &mut Frame<'_, B>, app: &mut DbListTUI) where - F: FnMut(usize, usize) -> Vec<(T::Key, T::Value)>, + F: FnMut(usize, usize) -> Vec>, { let outer_chunks = Layout::default() .direction(Direction::Vertical) @@ -296,7 +296,7 @@ where .constraints([Constraint::Percentage(50), Constraint::Percentage(50)]) .split(outer_chunks[0]); - let key_length = format!("{}", app.skip + app.count - 1).len(); + let key_length = format!("{}", (app.skip + app.count).saturating_sub(1)).len(); let entries: Vec<_> = app.entries.iter().map(|(k, _)| k).collect(); @@ -312,7 +312,7 @@ where .block(Block::default().borders(Borders::ALL).title(format!( "Keys (Showing entries {}-{} out of {} entries)", app.skip, - app.skip + app.entries.len() - 1, + (app.skip + app.entries.len()).saturating_sub(1), app.total_entries ))) .style(Style::default().fg(Color::White)) diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 5296c249762c..0f46139a675a 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -180,7 +180,7 @@ mod tests { use reth_db::{ models::{storage_sharded_key::StorageShardedKey, ShardedKey}, - table::Table, + table::{Table, TableRow}, test_utils::create_test_rw_db, DatabaseEnv, }; @@ -193,7 +193,7 @@ mod tests { #[allow(clippy::type_complexity)] fn collect_table_entries( tx: &>::TX, - ) -> Result, InitDatabaseError> + ) -> Result>, InitDatabaseError> where DB: Database, T: Table, diff --git a/bin/reth/src/test_vectors/tables.rs b/bin/reth/src/test_vectors/tables.rs index 0b4cde9501f3..2fa4f760f890 100644 --- a/bin/reth/src/test_vectors/tables.rs +++ b/bin/reth/src/test_vectors/tables.rs @@ -8,7 +8,7 @@ use proptest::{ test_runner::TestRunner, }; use reth_db::{ - table::{DupSort, Table}, + table::{DupSort, Table, TableRow}, tables, }; use reth_primitives::fs; @@ -81,7 +81,7 @@ where let mut rows = vec![]; let mut seen_keys = HashSet::new(); let strat = proptest::collection::vec( - any_with::<(T::Key, T::Value)>(( + any_with::>(( ::Parameters::default(), ::Parameters::default(), )), @@ -154,7 +154,7 @@ where } /// Save rows to file. -fn save_to_file(rows: Vec<(T::Key, T::Value)>) -> eyre::Result<()> +fn save_to_file(rows: Vec>) -> eyre::Result<()> where T::Key: serde::Serialize, T::Value: serde::Serialize, diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 32cd073ce076..a40e92625ead 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -1,12 +1,14 @@ //! Common CLI utility functions. +use boyer_moore_magiclen::BMByte; use eyre::Result; use reth_consensus_common::validation::validate_block_standalone; use reth_db::{ cursor::DbCursorRO, database::Database, - table::Table, + table::{Table, TableRow}, transaction::{DbTx, DbTxMut}, + DatabaseError, RawTable, TableRawRow, }; use reth_interfaces::p2p::{ bodies::client::BodiesClient, @@ -19,6 +21,7 @@ use reth_primitives::{ use std::{ env::VarError, path::{Path, PathBuf}, + rc::Rc, sync::Arc, }; use tracing::info; @@ -103,23 +106,65 @@ impl<'a, DB: Database> DbTool<'a, DB> { /// Grabs the contents of the table within a certain index range and places the /// entries into a [`HashMap`][std::collections::HashMap]. - pub fn list( - &self, - skip: usize, - len: usize, - reverse: bool, - ) -> Result> { - let data = self.db.view(|tx| { - let mut cursor = tx.cursor_read::().expect("Was not able to obtain a cursor."); + /// + /// [`ListFilter`] can be used to further + /// filter down the desired results. (eg. List only rows which include `0xd3adbeef`) + pub fn list(&self, filter: &ListFilter) -> Result<(Vec>, usize)> { + let bmb = Rc::new(BMByte::from(&filter.search)); + if bmb.is_none() && filter.has_search() { + eyre::bail!("Invalid search.") + } + + let mut hits = 0; - if reverse { - cursor.walk_back(None)?.skip(skip).take(len).collect::>() + let data = self.db.view(|tx| { + let mut cursor = + tx.cursor_read::>().expect("Was not able to obtain a cursor."); + + let map_filter = |row: Result, _>| { + if let Ok((k, v)) = row { + let result = || { + if filter.only_count { + return None + } + Some((k.key().unwrap(), v.value().unwrap())) + }; + match &*bmb { + Some(searcher) => { + if searcher.find_first_in(v.raw_value()).is_some() || + searcher.find_first_in(k.raw_key()).is_some() + { + hits += 1; + return result() + } + } + None => { + hits += 1; + return result() + } + } + } + None + }; + + if filter.reverse { + Ok(cursor + .walk_back(None)? + .skip(filter.skip) + .filter_map(map_filter) + .take(filter.len) + .collect::>()) } else { - cursor.walk(None)?.skip(skip).take(len).collect::>() + Ok(cursor + .walk(None)? + .skip(filter.skip) + .filter_map(map_filter) + .take(filter.len) + .collect::>()) } })?; - data.map_err(|e| eyre::eyre!(e)) + Ok((data.map_err(|e: DatabaseError| eyre::eyre!(e))?, hits)) } /// Grabs the content of the table for the given key @@ -147,3 +192,36 @@ impl<'a, DB: Database> DbTool<'a, DB> { pub fn parse_path(value: &str) -> Result> { shellexpand::full(value).map(|path| PathBuf::from(path.into_owned())) } + +/// Filters the results coming from the database. +#[derive(Debug)] +pub struct ListFilter { + /// Skip first N entries. + pub skip: usize, + /// Take N entries. + pub len: usize, + /// Sequence of bytes that will be searched on values and keys from the database. + pub search: Vec, + /// Reverse order of entries. + pub reverse: bool, + /// Only counts the number of filtered entries without decoding and returning them. + pub only_count: bool, +} + +impl ListFilter { + /// Creates a new [`ListFilter`]. + pub fn new(skip: usize, len: usize, search: Vec, reverse: bool, only_count: bool) -> Self { + ListFilter { skip, len, search, reverse, only_count } + } + + /// If `search` has a list of bytes, then filter for rows that have this sequence. + pub fn has_search(&self) -> bool { + !self.search.is_empty() + } + + /// Updates the page with new `skip` and `len` values. + pub fn update_page(&mut self, skip: usize, len: usize) { + self.skip = skip; + self.len = len; + } +} diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 630ba97f7c06..8537c47bcb55 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -3,7 +3,7 @@ use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::DatabaseGAT, models::{AccountBeforeTx, StoredBlockBodyIndices}, - table::Table, + table::{Table, TableRow}, tables, test_utils::{create_test_rw_db, create_test_rw_db_with_path}, transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, @@ -122,7 +122,7 @@ impl TestTransaction { where T: Table, S: Clone, - F: FnMut(&S) -> (T::Key, T::Value), + F: FnMut(&S) -> TableRow, { self.commit(|tx| { values.iter().try_for_each(|src| { @@ -147,7 +147,7 @@ impl TestTransaction { T: Table, ::Value: Clone, S: Clone, - F: FnMut(&Option<::Value>, &S) -> (T::Key, T::Value), + F: FnMut(&Option<::Value>, &S) -> TableRow, { self.commit(|tx| { let mut cursor = tx.cursor_write::()?; diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 49440da7cc40..d00384a6e3c3 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -106,7 +106,7 @@ where // Iteration to be benchmarked let execution = |(input, db)| { - let mut input: Vec<(T::Key, T::Value)> = input; + let mut input: Vec> = input; if scenario_str.contains("_sorted") || scenario_str.contains("append") { input.sort_by(|a, b| a.0.cmp(&b.0)); } @@ -134,14 +134,14 @@ where /// Generates two batches. The first is to be inserted into the database before running the /// benchmark. The second is to be benchmarked with. #[allow(clippy::type_complexity)] -fn generate_batches(size: usize) -> (Vec<(T::Key, T::Value)>, Vec<(T::Key, T::Value)>) +fn generate_batches(size: usize) -> (Vec>, Vec>) where T: Table + Default, T::Key: std::hash::Hash + Arbitrary, T::Value: Arbitrary, { let strat = proptest::collection::vec( - any_with::<(T::Key, T::Value)>(( + any_with::>(( ::Parameters::default(), ::Parameters::default(), )), diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index ea330295bdca..5951b7381520 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -25,7 +25,7 @@ where T::Key: Default + Clone + for<'de> serde::Deserialize<'de>, T::Value: Default + Clone + for<'de> serde::Deserialize<'de>, { - let list: Vec<(T::Key, T::Value)> = serde_json::from_reader(std::io::BufReader::new( + let list: Vec> = serde_json::from_reader(std::io::BufReader::new( std::fs::File::open(format!( "{}/../../../testdata/micro/db/{}.json", env!("CARGO_MANIFEST_DIR"), diff --git a/crates/storage/db/src/abstraction/cursor.rs b/crates/storage/db/src/abstraction/cursor.rs index af54eb0879d4..7414190b19c8 100644 --- a/crates/storage/db/src/abstraction/cursor.rs +++ b/crates/storage/db/src/abstraction/cursor.rs @@ -5,7 +5,7 @@ use std::{ use crate::{ common::{IterPairResult, PairResult, ValueOnlyResult}, - table::{DupSort, Table}, + table::{DupSort, Table, TableRow}, DatabaseError, }; @@ -151,7 +151,7 @@ pub struct Walker<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> { impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> std::iter::Iterator for Walker<'cursor, 'tx, T, CURSOR> { - type Item = Result<(T::Key, T::Value), DatabaseError>; + type Item = Result, DatabaseError>; fn next(&mut self) -> Option { let start = self.start.take(); if start.is_some() { @@ -220,7 +220,7 @@ impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRW<'tx, T> + DbCursorRO<'tx, T>> impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> std::iter::Iterator for ReverseWalker<'cursor, 'tx, T, CURSOR> { - type Item = Result<(T::Key, T::Value), DatabaseError>; + type Item = Result, DatabaseError>; fn next(&mut self) -> Option { let start = self.start.take(); @@ -250,7 +250,7 @@ pub struct RangeWalker<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> { impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> std::iter::Iterator for RangeWalker<'cursor, 'tx, T, CURSOR> { - type Item = Result<(T::Key, T::Value), DatabaseError>; + type Item = Result, DatabaseError>; fn next(&mut self) -> Option { if self.is_done { return None @@ -334,7 +334,7 @@ impl<'cursor, 'tx, T: DupSort, CURSOR: DbCursorRW<'tx, T> + DbDupCursorRO<'tx, T impl<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> std::iter::Iterator for DupWalker<'cursor, 'tx, T, CURSOR> { - type Item = Result<(T::Key, T::Value), DatabaseError>; + type Item = Result, DatabaseError>; fn next(&mut self) -> Option { let start = self.start.take(); if start.is_some() { diff --git a/crates/storage/db/src/abstraction/table.rs b/crates/storage/db/src/abstraction/table.rs index 18e66fe0e179..668bdf6998bb 100644 --- a/crates/storage/db/src/abstraction/table.rs +++ b/crates/storage/db/src/abstraction/table.rs @@ -81,6 +81,9 @@ pub trait Table: Send + Sync + Debug + 'static { type Value: Value; } +/// Tuple with `T::Key` and `T::Value`. +pub type TableRow = (::Key, ::Value); + /// DupSort allows for keys to be repeated in the database. /// /// Upstream docs: diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 4c8c0c4909ed..1c3c148c23c3 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -18,7 +18,7 @@ mod raw; pub(crate) mod utils; use crate::abstraction::table::Table; -pub use raw::{RawDupSort, RawKey, RawTable, RawValue}; +pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; use std::{fmt::Display, str::FromStr}; /// Declaration of all Database tables. diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index a1cf04ff3d4d..00f5db2a142f 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -4,6 +4,9 @@ use crate::{ }; use serde::{Deserialize, Serialize}; +/// Tuple with `RawKey` and `RawValue`. +pub type TableRawRow = (RawKey<::Key>, RawValue<::Value>); + /// Raw table that can be used to access any table and its data in raw mode. /// This is useful for delayed decoding/encoding of data. #[derive(Default, Copy, Clone, Debug)] @@ -41,6 +44,7 @@ impl DupSort for RawDupSort { /// Raw table key. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct RawKey { + /// Inner encoded key key: Vec, _phantom: std::marker::PhantomData, } @@ -50,10 +54,14 @@ impl RawKey { pub fn new(key: K) -> Self { Self { key: K::encode(key).as_ref().to_vec(), _phantom: std::marker::PhantomData } } - /// Returns the raw key. + /// Returns the decoded value. pub fn key(&self) -> Result { K::decode(&self.key) } + /// Returns the raw key as seen on the database. + pub fn raw_key(&self) -> &Vec { + &self.key + } } impl From for RawKey { @@ -87,6 +95,7 @@ impl Decode for RawKey { /// Raw table value. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Serialize, Ord, Hash)] pub struct RawValue { + /// Inner compressed value value: Vec, _phantom: std::marker::PhantomData, } @@ -96,10 +105,14 @@ impl RawValue { pub fn new(value: V) -> Self { Self { value: V::compress(value).as_ref().to_vec(), _phantom: std::marker::PhantomData } } - /// Returns the raw value. + /// Returns the decompressed value. pub fn value(&self) -> Result { V::decompress(&self.value) } + /// Returns the raw value as seen on the database. + pub fn raw_value(&self) -> &Vec { + &self.value + } } impl AsRef<[u8]> for RawValue> { diff --git a/crates/storage/db/src/tables/utils.rs b/crates/storage/db/src/tables/utils.rs index f05ed6a28740..13bd1ce278e6 100644 --- a/crates/storage/db/src/tables/utils.rs +++ b/crates/storage/db/src/tables/utils.rs @@ -1,6 +1,6 @@ //! Small database table utilities and helper functions. use crate::{ - table::{Decode, Decompress, Table}, + table::{Decode, Decompress, Table, TableRow}, DatabaseError, }; use std::borrow::Cow; @@ -42,7 +42,7 @@ macro_rules! impl_fixed_arbitrary { /// Helper function to decode a `(key, value)` pair. pub(crate) fn decoder<'a, T>( kv: (Cow<'a, [u8]>, Cow<'a, [u8]>), -) -> Result<(T::Key, T::Value), DatabaseError> +) -> Result, DatabaseError> where T: Table, T::Key: Decode, From 44874bc557567174d18ca02f3e7ae141a3de492a Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:40:09 +0100 Subject: [PATCH 438/722] feat: add `TransactionSigned::recover_signers` with the same order (#4120) --- Cargo.lock | 1 + crates/consensus/auto-seal/src/lib.rs | 6 +- crates/primitives/Cargo.toml | 1 + crates/primitives/src/block.rs | 2 +- crates/primitives/src/transaction/mod.rs | 61 ++++++++++++++++++- crates/revm/src/executor.rs | 7 +-- .../src/providers/database/provider.rs | 18 +++--- 7 files changed, 76 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ceac36ad042b..4a8d706e779a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5817,6 +5817,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", + "rayon", "reth-codecs", "reth-rlp", "reth-rlp-derive", diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 7b14f544a483..cadfb5593b7c 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -349,10 +349,8 @@ impl StorageInner { let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; - let senders = - block.body.iter().map(|tx| tx.recover_signer()).collect::>>().ok_or( - BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), - )?; + let senders = TransactionSigned::recover_signers(&block.body, block.body.len()) + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 8716a1e228b4..da697b8b25fc 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -60,6 +60,7 @@ impl-serde = "0.4.0" once_cell = "1.17.0" zstd = { version = "0.12", features = ["experimental"] } paste = "1.0" +rayon = "1.7" tempfile = "3.3" sha2 = "0.10.7" diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 0c9866ba28ef..5248787c15d6 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -163,7 +163,7 @@ impl SealedBlock { /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. pub fn senders(&self) -> Option> { - self.body.iter().map(|tx| tx.recover_signer()).collect::>>() + TransactionSigned::recover_signers(&self.body, self.body.len()) } /// Seal sealed block with recovered transaction senders. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f63c6b9e1e01..ed347ce3f46a 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -7,6 +7,8 @@ use bytes::{Buf, BytesMut}; use derive_more::{AsRef, Deref}; pub use error::InvalidTransactionError; pub use meta::TransactionMeta; +use once_cell::sync::Lazy; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use reth_rlp::{ length_of_length, Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, @@ -34,6 +36,15 @@ mod signature; mod tx_type; pub(crate) mod util; +// Expected number of transactions where we can expect a speed-up by recovering the senders in +// parallel. +pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: Lazy = + Lazy::new(|| match rayon::current_num_threads() { + 0..=1 => usize::MAX, + 2..=8 => 10, + _ => 5, + }); + /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -939,6 +950,21 @@ impl TransactionSigned { self.signature.recover_signer(signature_hash) } + /// Recovers a list of signers from a transaction list iterator + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [Self::recover_signer]. + pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> + where + T: IntoParallelIterator + IntoIterator + Send, + { + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } + } + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] /// /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. @@ -1323,12 +1349,17 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TransactionKind, TxEip1559, TxLegacy}, + sign_message, + transaction::{ + signature::Signature, TransactionKind, TxEip1559, TxLegacy, + PARALLEL_SENDER_RECOVERY_THRESHOLD, + }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, H256, U256, }; use bytes::BytesMut; use ethers_core::utils::hex; use reth_rlp::{Decodable, DecodeError, Encodable}; + use secp256k1::{KeyPair, Secp256k1}; use std::str::FromStr; #[test] @@ -1564,4 +1595,32 @@ mod tests { tx.encode(&mut b); assert_eq!(s, hex::encode(&b)); } + + proptest::proptest! { + #![proptest_config(proptest::prelude::ProptestConfig::with_cases(1))] + + #[test] + fn test_parallel_recovery_order(txes in proptest::collection::vec(proptest::prelude::any::(), *PARALLEL_SENDER_RECOVERY_THRESHOLD * 5)) { + let mut rng =rand::thread_rng(); + let secp = Secp256k1::new(); + let txes: Vec = txes.into_iter().map(|mut tx| { + if let Some(chain_id) = tx.chain_id() { + // Otherwise we might overflow when calculating `v` on `recalculate_hash` + tx.set_chain_id(chain_id % (u64::MAX / 2 - 36)); + } + + let key_pair = KeyPair::new(&secp, &mut rng); + + let signature = + sign_message(H256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); + + TransactionSigned::from_transaction_and_signature(tx, signature) + }).collect(); + + let parallel_senders = TransactionSigned::recover_signers(&txes, txes.len()).unwrap(); + let seq_senders = txes.iter().map(|tx| tx.recover_signer()).collect::>>().unwrap(); + + assert_eq!(parallel_senders, seq_senders); + } + } } diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index 5ce714521fba..990e674bb7c5 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -84,11 +84,8 @@ where Err(BlockValidationError::SenderRecoveryError.into()) } } else { - body.iter() - .map(|tx| { - tx.recover_signer().ok_or(BlockValidationError::SenderRecoveryError.into()) - }) - .collect() + TransactionSigned::recover_signers(body, body.len()) + .ok_or(BlockValidationError::SenderRecoveryError.into()) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ba1474afd6db..1d80bf4662f8 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -22,7 +22,10 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; -use reth_interfaces::Result; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError}, + Result, +}; use reth_primitives::{ keccak256, stage::{StageCheckpoint, StageId}, @@ -1908,14 +1911,11 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' let tx_iter = if Some(block.body.len()) == senders_len { block.body.into_iter().zip(senders.unwrap()).collect::>() } else { - block - .body - .into_iter() - .map(|tx| { - let signer = tx.recover_signer(); - (tx, signer.unwrap_or_default()) - }) - .collect::>() + let senders = TransactionSigned::recover_signers(&block.body, block.body.len()).ok_or( + BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), + )?; + debug_assert_eq!(senders.len(), block.body.len(), "missing one or more senders"); + block.body.into_iter().zip(senders).collect() }; for (transaction, sender) in tx_iter { From 7cd7859a2be3504ffcd61bdc385abee78ac26114 Mon Sep 17 00:00:00 2001 From: libevm <95674753+libevm@users.noreply.github.com> Date: Tue, 15 Aug 2023 20:15:47 -0700 Subject: [PATCH 439/722] (fix)db commit on debug_traceCallMany (#4219) Co-authored-by: Ubuntu --- crates/rpc/rpc/src/debug.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index c3cba02e3ecf..147c2a9f359c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -420,7 +420,9 @@ where &mut db, )?; - if bundles.peek().is_none() && transactions.peek().is_none() { + // If there is more transactions, commit the database + // If there is no transactions, but more bundles, commit to the database too + if transactions.peek().is_some() || bundles.peek().is_some() { db.commit(state); } results.push(trace); From 43601e349666a17a2563bf2ba8c6069628c07284 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 14:17:47 +0200 Subject: [PATCH 440/722] chore: move bitflags to workspace (#4220) --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- crates/transaction-pool/src/pool/state.rs | 8 ++++---- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a8d706e779a..f3540c434fb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6165,7 +6165,7 @@ dependencies = [ "assert_matches", "async-trait", "auto_impl", - "bitflags 1.3.2", + "bitflags 2.4.0", "criterion", "fnv", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index 80438385205d..ff8f498c24a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,6 +113,7 @@ ethers-middleware = { version = "2.0.8", default-features = false } ## misc bytes = "1.4" +bitflags = "2.3" tracing = "0.1.0" tracing-appender = "0.2" thiserror = "1.0.37" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 2895c56f2ed0..7b90987e71cf 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true name = "reth_libmdbx" [dependencies] -bitflags = "2" +bitflags.workspace = true byteorder = "1" derive_more = "0.99" indexmap = "1" diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index ff7e1ee799f5..b427eee0261b 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -38,7 +38,7 @@ thiserror.workspace = true tracing.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } fnv = "1.0.7" -bitflags = "1.3" +bitflags.workspace = true auto_impl = "1.0" # testing diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index 581611df99d3..05c8fd4c6a57 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -2,7 +2,7 @@ bitflags::bitflags! { /// Marker to represents the current state of a transaction in the pool and from which the corresponding sub-pool is derived, depending on what bits are set. /// /// This mirrors [erigon's ephemeral state field](https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design#ordering-function). - #[derive(Default)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, PartialOrd, Ord)] pub(crate) struct TxState: u8 { /// Set to `1` if all ancestor transactions are pending. const NO_PARKED_ANCESTORS = 0b100000; @@ -20,11 +20,11 @@ bitflags::bitflags! { /// Set to 1 if `feeCap` of the transaction meets the requirement of the pending block. const ENOUGH_FEE_CAP_BLOCK = 0b000010; - const PENDING_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits | Self::NO_NONCE_GAPS.bits | Self::ENOUGH_BALANCE.bits | Self::NOT_TOO_MUCH_GAS.bits | Self::ENOUGH_FEE_CAP_BLOCK.bits; + const PENDING_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits()| Self::NO_NONCE_GAPS.bits() | Self::ENOUGH_BALANCE.bits() | Self::NOT_TOO_MUCH_GAS.bits() | Self::ENOUGH_FEE_CAP_BLOCK.bits(); - const BASE_FEE_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits | Self::NO_NONCE_GAPS.bits | Self::ENOUGH_BALANCE.bits | Self::NOT_TOO_MUCH_GAS.bits; + const BASE_FEE_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits() | Self::NO_NONCE_GAPS.bits() | Self::ENOUGH_BALANCE.bits() | Self::NOT_TOO_MUCH_GAS.bits(); - const QUEUED_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits; + const QUEUED_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits(); } } From d643d0303071467139609a53bbfd82f18178bd31 Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 16 Aug 2023 21:32:42 +0800 Subject: [PATCH 441/722] Move parking_lot dependency to workspace dep (#4228) --- Cargo.toml | 1 + crates/blockchain-tree/Cargo.toml | 4 ++-- crates/interfaces/Cargo.toml | 2 +- crates/net/dns/Cargo.toml | 2 +- crates/net/network/Cargo.toml | 2 +- crates/storage/db/Cargo.toml | 2 +- crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 4 ++-- crates/transaction-pool/Cargo.toml | 2 +- 9 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ff8f498c24a6..83f08b0f835e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,6 +123,7 @@ rand = "0.8.5" strum = "0.25" rayon = "1.7" itertools = "0.11" +parking_lot = "0.12" ### proc-macros proc-macro2 = "1.0" diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index ba025605557a..aacd3b9b380e 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -23,7 +23,7 @@ reth-provider.workspace = true reth-stages = { path = "../stages" } # common -parking_lot = { version = "0.12" } +parking_lot.workspace = true lru = "0.10" tracing.workspace = true @@ -36,7 +36,7 @@ reth-db = { path = "../storage/db", features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } -parking_lot = "0.12" +parking_lot.workspace = true assert_matches = "1.5" tokio = { workspace = true, features = ["macros", "sync"] } diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 9705f00bbff1..26bec4881da6 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -37,7 +37,7 @@ secp256k1 = { workspace = true, default-features = false, features = [ "rand", ], optional = true } modular-bitfield = "0.11.2" -parking_lot = "0.12.1" +parking_lot.workspace = true clap = { version = "4", features = ["derive"], optional = true } [dev-dependencies] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 14054205de95..eeb6d81fd7a7 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -32,7 +32,7 @@ linked_hash_set = "0.1" schnellru = "0.2" thiserror.workspace = true tracing.workspace = true -parking_lot = "0.12" +parking_lot.workspace = true serde = { workspace = true, optional = true } serde_with = { version = "2.1.0", optional = true } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index a4008f5caabd..438b96cf9602 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -52,7 +52,7 @@ aquamarine = "0.3.0" tracing.workspace = true fnv = "1.0" thiserror.workspace = true -parking_lot = "0.12" +parking_lot.workspace = true async-trait.workspace = true linked_hash_set = "0.1" linked-hash-map = "0.5.6" diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index e980a8e71e0f..19b7a8a509e5 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -36,7 +36,7 @@ bytes.workspace = true page_size = "0.4.2" thiserror.workspace = true tempfile = { version = "3.3.0", optional = true } -parking_lot = "0.12" +parking_lot.workspace = true derive_more = "0.99" eyre = "0.6.8" diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 7b90987e71cf..94c8c015781b 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -17,7 +17,7 @@ byteorder = "1" derive_more = "0.99" indexmap = "1" libc = "0.2" -parking_lot = "0.12" +parking_lot.workspace = true thiserror.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 62ceb6fa182a..3bd4bc57d3a8 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -28,7 +28,7 @@ auto_impl = "1.0" itertools.workspace = true pin-project.workspace = true derive_more = "0.99" -parking_lot = "0.12" +parking_lot.workspace = true # test-utils reth-rlp = { workspace = true, optional = true } @@ -38,7 +38,7 @@ reth-db = { path = "../db", features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } reth-rlp.workspace = true reth-trie = { path = "../../trie", features = ["test-utils"] } -parking_lot = "0.12" +parking_lot.workspace = true tempfile = "3.3" [features] diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index b427eee0261b..6a2704d8af38 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -28,7 +28,7 @@ reth-tasks.workspace = true # async/futures async-trait.workspace = true futures-util.workspace = true -parking_lot = "0.12" +parking_lot.workspace = true tokio = { workspace = true, default-features = false, features = ["sync"] } tokio-stream.workspace = true From b232e05b2645e11c21be61e114fba14bea1045fe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 18:17:20 +0200 Subject: [PATCH 442/722] fix: track established bonds (#4229) --- crates/net/discv4/src/config.rs | 15 ++++-- crates/net/discv4/src/lib.rs | 91 +++++++++++++++++++++------------ crates/net/discv4/src/table.rs | 35 +++++++++++++ 3 files changed, 106 insertions(+), 35 deletions(-) create mode 100644 crates/net/discv4/src/table.rs diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 924a7b62371e..7a3addc39f15 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -31,7 +31,7 @@ pub struct Discv4Config { /// The number of allowed failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: - /// 300sec, 5min. + /// 10min. pub ping_interval: Duration, /// The duration of we consider a ping timed out. pub ping_expiration: Duration, @@ -69,6 +69,8 @@ pub struct Discv4Config { /// If configured and a `external_ip_resolver` is configured, try to resolve the external ip /// using this interval. pub resolve_external_ip_interval: Option, + /// The duration after which we consider a bond expired. + pub bond_expiration: Duration, } impl Discv4Config { @@ -121,16 +123,17 @@ impl Default for Discv4Config { /// Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, - ping_interval: Duration::from_secs(300), + ping_interval: Duration::from_secs(60 * 10), /// unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), + bond_expiration: Duration::from_secs(60 * 60), enr_expiration: Duration::from_secs(20), neighbours_expiration: Duration::from_secs(20), request_timeout: Duration::from_secs(20), lookup_interval: Duration::from_secs(20), ban_list: Default::default(), - ban_duration: Some(Duration::from_secs(3600)), // 1 hour + ban_duration: Some(Duration::from_secs(60 * 60)), // 1 hour bootstrap_nodes: Default::default(), enable_dht_random_walk: true, enable_lookup: true, @@ -200,6 +203,12 @@ impl Discv4ConfigBuilder { self } + /// Sets the expiration duration for a bond with a peer + pub fn bond_expiration(&mut self, duration: Duration) -> &mut Self { + self.config.bond_expiration = duration; + self + } + /// Whether to discover random nodes in the DHT. pub fn enable_dht_random_walk(&mut self, enable_dht_random_walk: bool) -> &mut Self { self.config.enable_dht_random_walk = enable_dht_random_walk; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index f5bff545f754..1ddedb1b3a68 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -77,12 +77,15 @@ pub use config::{Discv4Config, Discv4ConfigBuilder}; mod node; use node::{kad_key, NodeKey}; +mod table; + // reexport NodeRecord primitive pub use reth_primitives::NodeRecord; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; +use crate::table::PongTable; use reth_net_nat::ResolveNatInterval; /// reexport to get public ip. pub use reth_net_nat::{external_ip, NatResolver}; @@ -117,6 +120,9 @@ const SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS: usize = (MAX_PACKET_SIZE - 109) / 91; /// Mirrors geth's `bondExpiration` of 24h const ENDPOINT_PROOF_EXPIRATION: Duration = Duration::from_secs(24 * 60 * 60); +/// Duration used to expire nodes from the routing table 1hr +const EXPIRE_DURATION: Duration = Duration::from_secs(60 * 60); + type EgressSender = mpsc::Sender<(Bytes, SocketAddr)>; type EgressReceiver = mpsc::Receiver<(Bytes, SocketAddr)>; @@ -405,6 +411,10 @@ pub struct Discv4Service { config: Discv4Config, /// Buffered events populated during poll. queued_events: VecDeque, + /// Keeps track of nodes from which we have received a `Pong` message. + received_pongs: PongTable, + /// Interval used to expire additionally tracked nodes + expire_interval: Interval, } impl Discv4Service { @@ -498,6 +508,8 @@ impl Discv4Service { resolve_external_ip_interval: config.resolve_external_ip_interval(), config, queued_events: Default::default(), + received_pongs: Default::default(), + expire_interval: tokio::time::interval(EXPIRE_DURATION), } } @@ -582,7 +594,7 @@ impl Discv4Service { ) { InsertResult::Failed(_) => {} _ => { - self.try_ping(record, PingReason::Initial); + self.try_ping(record, PingReason::InitialInsert); } } } @@ -735,6 +747,16 @@ impl Discv4Service { self.kbuckets.buckets_iter().fold(0, |count, bucket| count + bucket.num_connected()) } + /// Check if the peer has a bond + fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { + if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { + if timestamp.elapsed() < self.config.bond_expiration { + return true + } + } + false + } + /// Update the entry on RE-ping /// /// On re-ping we check for a changed enr_seq if eip868 is enabled and when it changed we sent a @@ -860,7 +882,7 @@ impl Discv4Service { } _ => return false, } - self.try_ping(record, PingReason::Initial); + self.try_ping(record, PingReason::InitialInsert); true } @@ -903,6 +925,7 @@ impl Discv4Service { // Note: we only mark if the node is absent because the `last 12h` condition is handled by // the ping interval let mut is_new_insert = false; + let mut needs_bond = false; let old_enr = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => entry.value_mut().update_with_enr(ping.enr_sq), @@ -928,13 +951,14 @@ impl Discv4Service { // full, we can't add any additional peers to that bucket, but we still want // to emit an event that we discovered the node debug!(target : "discv4", ?record, "discovered new record but bucket is full"); - self.notify(DiscoveryUpdate::DiscoveredAtCapacity(record)) + self.notify(DiscoveryUpdate::DiscoveredAtCapacity(record)); + needs_bond = true; } - BucketInsertResult::FailedFilter | - BucketInsertResult::TooManyIncoming | - BucketInsertResult::NodeExists => { + BucketInsertResult::TooManyIncoming | BucketInsertResult::NodeExists => { + needs_bond = true; // insert unsuccessful but we still want to send the pong } + BucketInsertResult::FailedFilter => return, } None @@ -955,7 +979,9 @@ impl Discv4Service { // if node was absent also send a ping to establish the endpoint proof from our end if is_new_insert { - self.try_ping(record, PingReason::Initial); + self.try_ping(record, PingReason::InitialInsert); + } else if needs_bond { + self.try_ping(record, PingReason::EstablishBond); } else { // Request ENR if included in the ping match (ping.enr_sq, old_enr) { @@ -1053,10 +1079,16 @@ impl Discv4Service { Entry::Vacant(_) => return, }; + // keep track of the pong + self.received_pongs.on_pong(remote_id, remote_addr.ip()); + match reason { - PingReason::Initial => { + PingReason::InitialInsert => { self.update_on_pong(node, pong.enr_sq); } + PingReason::EstablishBond => { + // nothing to do here + } PingReason::RePing => { self.update_on_reping(node, pong.enr_sq); } @@ -1079,25 +1111,13 @@ impl Discv4Service { // ping's expiration timestamp is in the past return } + if node_id == *self.local_peer_id() { + // ignore find node requests to ourselves + return + } - let key = kad_key(node_id); - - match self.kbuckets.entry(&key) { - kbucket::Entry::Present(_, status) => { - if status.is_connected() { - self.respond_closest(msg.id, remote_addr) - } - } - kbucket::Entry::Pending(_, status) => { - if status.is_connected() { - self.respond_closest(msg.id, remote_addr) - } - } - kbucket::Entry::Absent(_) => { - // no existing endpoint proof - // > To guard against traffic amplification attacks, Neighbors replies should only be sent if the sender of FindNode has been verified by the endpoint proof procedure. - } - kbucket::Entry::SelfEntry => {} + if self.has_bond(node_id, remote_addr.ip()) { + self.respond_closest(msg.id, remote_addr) } } @@ -1144,8 +1164,7 @@ impl Discv4Service { return } - let key = kad_key(id); - if let BucketEntry::Present(_, _) = self.kbuckets.entry(&key) { + if self.has_bond(id, remote_addr.ip()) { self.send_packet( Message::EnrResponse(EnrResponse { request_hash, @@ -1543,10 +1562,15 @@ impl Discv4Service { self.ping_buffered(); // evict expired nodes - if self.evict_expired_requests_interval.poll_tick(cx).is_ready() { + while self.evict_expired_requests_interval.poll_tick(cx).is_ready() { self.evict_expired_requests(Instant::now()) } + // evict expired nodes + while self.expire_interval.poll_tick(cx).is_ready() { + self.received_pongs.evict_expired(Instant::now(), EXPIRE_DURATION) + } + if self.queued_events.is_empty() { return Poll::Pending } @@ -1956,8 +1980,11 @@ impl NodeEntry { /// Represents why a ping is issued enum PingReason { - /// Initial ping to a previously unknown peer. - Initial, + /// Initial ping to a previously unknown peer that was inserted into the table. + InitialInsert, + /// Initial ping to a previously unknown peer that didn't fit into the table. But we still want + /// to establish a bond. + EstablishBond, /// Re-ping a peer.. RePing, /// Part of a lookup to ensure endpoint is proven. @@ -2315,7 +2342,7 @@ mod tests { sent_at: Instant::now(), node: service_2.local_node_record, echo_hash, - reason: PingReason::Initial, + reason: PingReason::InitialInsert, }; service_1.pending_pings.insert(*service_2.local_peer_id(), ping_request); diff --git a/crates/net/discv4/src/table.rs b/crates/net/discv4/src/table.rs new file mode 100644 index 000000000000..c7d75778ca75 --- /dev/null +++ b/crates/net/discv4/src/table.rs @@ -0,0 +1,35 @@ +//! Additional support for tracking nodes. + +use reth_primitives::PeerId; +use std::{collections::HashMap, net::IpAddr, time::Instant}; + +/// Keeps track of nodes from which we have received a `Pong` message. +#[derive(Debug, Clone, Default)] +pub(crate) struct PongTable { + /// The nodes we have received a `Pong` from. + nodes: HashMap, +} + +impl PongTable { + /// Updates the timestamp we received a `Pong` from the given node. + pub(crate) fn on_pong(&mut self, remote_id: PeerId, remote_ip: IpAddr) { + let key = NodeKey { remote_id, remote_ip }; + self.nodes.insert(key, Instant::now()); + } + + /// Returns the timestamp we received a `Pong` from the given node. + pub(crate) fn last_pong(&self, remote_id: PeerId, remote_ip: IpAddr) -> Option { + self.nodes.get(&NodeKey { remote_id, remote_ip }).copied() + } + + /// Removes all nodes from the table that have not sent a `Pong` for at least `timeout`. + pub(crate) fn evict_expired(&mut self, now: Instant, timeout: std::time::Duration) { + self.nodes.retain(|_, last_pong| now - *last_pong < timeout); + } +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub(crate) struct NodeKey { + pub(crate) remote_id: PeerId, + pub(crate) remote_ip: IpAddr, +} From 491b453edbd7bccb1a50539e9c2d2e8a119b15aa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 18:26:06 +0200 Subject: [PATCH 443/722] feat: track node record (#4224) --- Cargo.lock | 1 + crates/net/discv4/Cargo.toml | 1 + crates/net/discv4/src/lib.rs | 185 ++++++++++++++++++++--------------- 3 files changed, 106 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f3540c434fb7..968bff1ad9c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5458,6 +5458,7 @@ dependencies = [ "enr 0.8.1", "generic-array", "hex", + "parking_lot 0.12.1", "rand 0.8.5", "reth-net-common", "reth-net-nat", diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 2c5bd92cbe98..bd9a1f95dc54 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -30,6 +30,7 @@ tokio-stream.workspace = true # misc tracing.workspace = true thiserror.workspace = true +parking_lot.workspace = true hex = "0.4" rand = { workspace = true, optional = true } generic-array = "0.14" diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 1ddedb1b3a68..478363da40bb 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -41,6 +41,7 @@ use discv5::{ ConnectionDirection, ConnectionState, }; use enr::{Enr, EnrBuilder}; +use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse, EnrWrapper}; use reth_primitives::{ bytes::{Bytes, BytesMut}, @@ -137,7 +138,11 @@ pub struct Discv4 { /// The address of the udp socket local_addr: SocketAddr, /// channel to send commands over to the service - to_service: mpsc::Sender, + to_service: mpsc::UnboundedSender, + /// Tracks the local node record. + /// + /// This includes the currently tracked external IP address of the node. + node_record: Arc>, } // === impl Discv4 === @@ -163,10 +168,17 @@ impl Discv4 { /// NOTE: this is only intended for test setups. #[cfg(feature = "test-utils")] pub fn noop() -> Self { - let (to_service, _rx) = mpsc::channel(1); + let (to_service, _rx) = mpsc::unbounded_channel(); let local_addr = (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into(); - Self { local_addr, to_service } + Self { + local_addr, + to_service, + node_record: Arc::new(Mutex::new(NodeRecord::new( + "127.0.0.1:3030".parse().unwrap(), + PeerId::random(), + ))), + } } /// Binds a new UdpSocket and creates the service @@ -218,10 +230,8 @@ impl Discv4 { local_node_record.udp_port = local_addr.port(); trace!( target : "discv4", ?local_addr,"opened UDP socket"); - let (to_service, rx) = mpsc::channel(100); - let service = - Discv4Service::new(socket, local_addr, local_node_record, secret_key, config, Some(rx)); - let discv4 = Discv4 { local_addr, to_service }; + let service = Discv4Service::new(socket, local_addr, local_node_record, secret_key, config); + let discv4 = service.handle(); Ok((discv4, service)) } @@ -230,9 +240,21 @@ impl Discv4 { self.local_addr } + /// Returns the [NodeRecord] of the local node. + /// + /// This includes the currently tracked external IP address of the node. + pub fn node_record(&self) -> NodeRecord { + *self.node_record.lock() + } + + /// Returns the currently tracked external IP of the node. + pub fn external_ip(&self) -> IpAddr { + self.node_record.lock().address + } + /// Sets the [Interval] used for periodically looking up targets over the network pub fn set_lookup_interval(&self, duration: Duration) { - self.safe_send_to_service(Discv4Command::SetLookupInterval(duration)) + self.send_to_service(Discv4Command::SetLookupInterval(duration)) } /// Starts a `FindNode` recursive lookup that locates the closest nodes to the given node id. See also: @@ -261,7 +283,7 @@ impl Discv4 { async fn lookup_node(&self, node_id: Option) -> Result, Discv4Error> { let (tx, rx) = oneshot::channel(); let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) }; - self.to_service.send(cmd).await?; + self.to_service.send(cmd)?; Ok(rx.await?) } @@ -274,13 +296,13 @@ impl Discv4 { /// Removes the peer from the table, if it exists. pub fn remove_peer(&self, node_id: PeerId) { let cmd = Discv4Command::Remove(node_id); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Adds the node to the table, if it is not already present. pub fn add_node(&self, node_record: NodeRecord) { let cmd = Discv4Command::Add(node_record); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Adds the peer and id to the ban list. @@ -288,14 +310,14 @@ impl Discv4 { /// This will prevent any future inclusion in the table pub fn ban(&self, node_id: PeerId, ip: IpAddr) { let cmd = Discv4Command::Ban(node_id, ip); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Adds the ip to the ban list. /// /// This will prevent any future inclusion in the table pub fn ban_ip(&self, ip: IpAddr) { let cmd = Discv4Command::BanIp(ip); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Adds the peer to the ban list. @@ -303,7 +325,7 @@ impl Discv4 { /// This will prevent any future inclusion in the table pub fn ban_node(&self, node_id: PeerId) { let cmd = Discv4Command::BanPeer(node_id); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Sets the tcp port @@ -311,7 +333,7 @@ impl Discv4 { /// This will update our [`NodeRecord`]'s tcp port. pub fn set_tcp_port(&self, port: u16) { let cmd = Discv4Command::SetTcpPort(port); - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Sets the pair in the EIP-868 [`Enr`] of the node. @@ -321,7 +343,7 @@ impl Discv4 { /// CAUTION: The value **must** be rlp encoded pub fn set_eip868_rlp_pair(&self, key: Vec, rlp: Bytes) { let cmd = Discv4Command::SetEIP868RLPPair { key, rlp }; - self.safe_send_to_service(cmd); + self.send_to_service(cmd); } /// Sets the pair in the EIP-868 [`Enr`] of the node. @@ -333,13 +355,9 @@ impl Discv4 { self.set_eip868_rlp_pair(key, buf.freeze()) } - fn safe_send_to_service(&self, cmd: Discv4Command) { - // we want this message to always arrive, so we clone the sender - let _ = self.to_service.clone().try_send(cmd); - } - + #[inline] fn send_to_service(&self, cmd: Discv4Command) { - let _ = self.to_service.try_send(cmd).map_err(|err| { + let _ = self.to_service.send(cmd).map_err(|err| { debug!( target : "discv4", %err, @@ -352,7 +370,7 @@ impl Discv4 { pub async fn update_stream(&self) -> Result, Discv4Error> { let (tx, rx) = oneshot::channel(); let cmd = Discv4Command::Updates(tx); - self.to_service.send(cmd).await?; + self.to_service.send(cmd)?; Ok(rx.await?) } } @@ -366,6 +384,8 @@ pub struct Discv4Service { local_eip_868_enr: Enr, /// Local ENR of the server. local_node_record: NodeRecord, + /// Keeps track of the node record of the local node. + shared_node_record: Arc>, /// The secret key used to sign payloads secret_key: SecretKey, /// The UDP socket for sending and receiving messages. @@ -393,8 +413,10 @@ pub struct Discv4Service { pending_find_nodes: HashMap, /// Currently active ENR requests pending_enr_requests: HashMap, - /// Commands listener - commands_rx: Option>, + /// Copy of he sender half of the commands channel for [Discv4] + to_service: mpsc::UnboundedSender, + /// Receiver half of the commands channel for [Discv4] + commands_rx: mpsc::UnboundedReceiver, /// All subscribers for table updates update_listeners: Vec>, /// The interval when to trigger lookups @@ -425,7 +447,6 @@ impl Discv4Service { local_node_record: NodeRecord, secret_key: SecretKey, config: Discv4Config, - commands_rx: Option>, ) -> Self { let socket = Arc::new(socket); let (ingress_tx, ingress_rx) = mpsc::channel(config.udp_ingress_message_buffer); @@ -485,10 +506,15 @@ impl Discv4Service { builder.build(&secret_key).expect("v4 is set; qed") }; + let (to_service, commands_rx) = mpsc::unbounded_channel(); + + let shared_node_record = Arc::new(Mutex::new(local_node_record)); + Discv4Service { local_address, local_eip_868_enr, local_node_record, + shared_node_record, _socket: socket, kbuckets, secret_key, @@ -500,6 +526,7 @@ impl Discv4Service { pending_find_nodes: Default::default(), pending_enr_requests: Default::default(), commands_rx, + to_service, update_listeners: Vec::with_capacity(1), lookup_interval: self_lookup_interval, ping_interval, @@ -513,6 +540,15 @@ impl Discv4Service { } } + /// Returns the frontend handle that can communicate with the service via commands. + pub fn handle(&self) -> Discv4 { + Discv4 { + local_addr: self.local_address, + to_service: self.to_service.clone(), + node_record: self.shared_node_record.clone(), + } + } + /// Returns the current enr sequence fn enr_seq(&self) -> Option { (self.config.enable_eip868).then(|| self.local_eip_868_enr.seq()) @@ -530,6 +566,8 @@ impl Discv4Service { debug!(target : "discv4", ?external_ip, "Updating external ip"); self.local_node_record.address = external_ip; let _ = self.local_eip_868_enr.set_ip(external_ip, &self.secret_key); + let mut lock = self.shared_node_record.lock(); + *lock = self.local_node_record; debug!(target : "discv4", enr=?self.local_eip_868_enr, "Updated local ENR"); } } @@ -1457,64 +1495,49 @@ impl Discv4Service { self.set_external_ip_addr(ip); } - // process all incoming commands - if let Some(mut rx) = self.commands_rx.take() { - let mut is_done = false; - while let Poll::Ready(cmd) = rx.poll_recv(cx) { - if let Some(cmd) = cmd { - match cmd { - Discv4Command::Add(enr) => { - self.add_node(enr); - } - Discv4Command::Lookup { node_id, tx } => { - let node_id = node_id.unwrap_or(self.local_node_record.id); - self.lookup_with(node_id, tx); - } - Discv4Command::SetLookupInterval(duration) => { - self.set_lookup_interval(duration); - } - Discv4Command::Updates(tx) => { - let rx = self.update_stream(); - let _ = tx.send(rx); - } - Discv4Command::BanPeer(node_id) => self.ban_node(node_id), - Discv4Command::Remove(node_id) => { - self.remove_node(node_id); - } - Discv4Command::Ban(node_id, ip) => { - self.ban_node(node_id); - self.ban_ip(ip); - } - Discv4Command::BanIp(ip) => { - self.ban_ip(ip); - } - Discv4Command::SetEIP868RLPPair { key, rlp } => { - debug!(target: "discv4", key=%String::from_utf8_lossy(&key), "Update EIP-868 extension pair"); - - let _ = self.local_eip_868_enr.insert_raw_rlp( - key, - rlp, - &self.secret_key, - ); - } - Discv4Command::SetTcpPort(port) => { - debug!(target: "discv4", %port, "Update tcp port"); - self.local_node_record.tcp_port = port; - if self.local_node_record.address.is_ipv4() { - let _ = self.local_eip_868_enr.set_tcp4(port, &self.secret_key); - } else { - let _ = self.local_eip_868_enr.set_tcp6(port, &self.secret_key); - } - } + // process all incoming commands, this channel can never close + while let Poll::Ready(Some(cmd)) = self.commands_rx.poll_recv(cx) { + match cmd { + Discv4Command::Add(enr) => { + self.add_node(enr); + } + Discv4Command::Lookup { node_id, tx } => { + let node_id = node_id.unwrap_or(self.local_node_record.id); + self.lookup_with(node_id, tx); + } + Discv4Command::SetLookupInterval(duration) => { + self.set_lookup_interval(duration); + } + Discv4Command::Updates(tx) => { + let rx = self.update_stream(); + let _ = tx.send(rx); + } + Discv4Command::BanPeer(node_id) => self.ban_node(node_id), + Discv4Command::Remove(node_id) => { + self.remove_node(node_id); + } + Discv4Command::Ban(node_id, ip) => { + self.ban_node(node_id); + self.ban_ip(ip); + } + Discv4Command::BanIp(ip) => { + self.ban_ip(ip); + } + Discv4Command::SetEIP868RLPPair { key, rlp } => { + debug!(target: "discv4", key=%String::from_utf8_lossy(&key), "Update EIP-868 extension pair"); + + let _ = self.local_eip_868_enr.insert_raw_rlp(key, rlp, &self.secret_key); + } + Discv4Command::SetTcpPort(port) => { + debug!(target: "discv4", %port, "Update tcp port"); + self.local_node_record.tcp_port = port; + if self.local_node_record.address.is_ipv4() { + let _ = self.local_eip_868_enr.set_tcp4(port, &self.secret_key); + } else { + let _ = self.local_eip_868_enr.set_tcp6(port, &self.secret_key); } - } else { - is_done = true; - break } } - if !is_done { - self.commands_rx = Some(rx); - } } // process all incoming datagrams From 7a922a6ed0e50ee268d478a9fdfabae1534e2e05 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 18:26:16 +0200 Subject: [PATCH 444/722] perf: create buffer once (#4226) --- crates/net/discv4/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 478363da40bb..20b729330e9d 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -1656,8 +1656,8 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i }); }; + let mut buf = [0; MAX_PACKET_SIZE]; loop { - let mut buf = [0; MAX_PACKET_SIZE]; let res = udp.recv_from(&mut buf).await; match res { Err(err) => { From 6edbc0eeaf30aa9cc2e12a03ab93167be8e809e8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 18:28:13 +0200 Subject: [PATCH 445/722] chore(deps): bump boa + thiserror (#4234) --- Cargo.lock | 51 ++++++++++++++++++-------- Cargo.toml | 2 +- crates/revm/revm-inspectors/Cargo.toml | 4 +- 3 files changed, 39 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 968bff1ad9c3..c89d13f23238 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,7 +723,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -736,7 +736,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -755,7 +755,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "num_enum", + "num_enum 0.7.0", "once_cell", "pollster", "rand 0.8.5", @@ -774,7 +774,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +798,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "boa_gc", "boa_macros", @@ -813,7 +813,7 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", @@ -824,7 +824,7 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -844,7 +844,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#a3b46545a2a09f9ac81fd83ac6b180934c728f61" +source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" [[package]] name = "boyer-moore-magiclen" @@ -2265,7 +2265,7 @@ dependencies = [ "generic-array", "hex", "k256", - "num_enum", + "num_enum 0.6.1", "once_cell", "open-fastrlp", "rand 0.8.5", @@ -4245,7 +4245,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.6.1", +] + +[[package]] +name = "num_enum" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +dependencies = [ + "num_enum_derive 0.7.0", ] [[package]] @@ -4260,6 +4269,18 @@ dependencies = [ "syn 2.0.28", ] +[[package]] +name = "num_enum_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +dependencies = [ + "proc-macro-crate", + "proc-macro2 1.0.66", + "quote 1.0.32", + "syn 2.0.28", +] + [[package]] name = "num_threads" version = "0.1.6" @@ -7412,18 +7433,18 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "d9207952ae1a003f42d3d5e892dac3c6ba42aa6ac0c79a6a91a2b5cb4253e75c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "f1728216d3244de4f14f14f8c15c79be1a7c67867d28d69b719690e2a19fb445" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.32", diff --git a/Cargo.toml b/Cargo.toml index 83f08b0f835e..2e60c4b85770 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,7 +116,7 @@ bytes = "1.4" bitflags = "2.3" tracing = "0.1.0" tracing-appender = "0.2" -thiserror = "1.0.37" +thiserror = "1.0" serde_json = "1.0.94" serde = { version = "1.0", default-features = false } rand = "0.8.5" diff --git a/crates/revm/revm-inspectors/Cargo.toml b/crates/revm/revm-inspectors/Cargo.toml index 0c411cccdb07..3627a826545b 100644 --- a/crates/revm/revm-inspectors/Cargo.toml +++ b/crates/revm/revm-inspectors/Cargo.toml @@ -18,8 +18,8 @@ revm.workspace = true hashbrown = "0.13" serde = { workspace = true, features = ["derive"] } -thiserror = {version = "1.0", optional = true } -serde_json = { version = "1.0", optional = true } +thiserror = { workspace = true, optional = true } +serde_json = { workspace = true, optional = true } # js-tracing-inspector boa_engine = { git = "https://github.com/boa-dev/boa", optional = true } From 8a2c3abd2a5c8b3d3b299d9f40916fec50fec207 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 16 Aug 2023 17:38:36 +0100 Subject: [PATCH 446/722] feat(pipeline): prune receipts based on log emitters (#4044) --- bin/reth/src/args/pruning_args.rs | 18 ++- bin/reth/src/debug_cmd/execution.rs | 2 +- bin/reth/src/node/mod.rs | 7 +- crates/config/src/config.rs | 2 +- crates/primitives/src/lib.rs | 5 +- crates/primitives/src/prune/mod.rs | 40 ++++++- crates/primitives/src/prune/mode.rs | 109 +++++++++++++++++- crates/primitives/src/prune/part.rs | 6 +- crates/primitives/src/prune/target.rs | 45 +++----- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/mod.rs | 12 +- crates/storage/provider/src/post_state/mod.rs | 53 ++++++++- 12 files changed, 247 insertions(+), 54 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index 1c33ce644726..b9a783798901 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -2,7 +2,9 @@ use clap::Args; use reth_config::config::PruneConfig; -use reth_primitives::{ChainSpec, PruneMode, PruneModes}; +use reth_primitives::{ + ChainSpec, ContractLogsPruneConfig, PruneMode, PruneModes, MINIMUM_PRUNING_DISTANCE, +}; use std::sync::Arc; /// Parameters for pruning and full node @@ -25,14 +27,22 @@ impl PruningArgs { Some(PruneConfig { block_interval: 5, parts: PruneModes { - sender_recovery: Some(PruneMode::Distance(128)), + sender_recovery: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), transaction_lookup: None, receipts: _chain_spec .deposit_contract .as_ref() .map(|contract| PruneMode::Before(contract.block)), - account_history: Some(PruneMode::Distance(128)), - storage_history: Some(PruneMode::Distance(128)), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + contract_logs_filter: ContractLogsPruneConfig( + _chain_spec + .deposit_contract + .as_ref() + .map(|contract| (contract.address, PruneMode::Before(contract.block))) + .into_iter() + .collect(), + ), }, }) } else { diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 3550413897ef..db6f2146aa31 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -141,7 +141,7 @@ impl Command { .clean_threshold .max(stage_conf.account_hashing.clean_threshold) .max(stage_conf.storage_hashing.clean_threshold), - config.prune.map(|prune| prune.parts).unwrap_or_default(), + config.prune.as_ref().map(|prune| prune.parts.clone()).unwrap_or_default(), )), ) .build(db, self.chain.clone()); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 0fd37caac6b0..0837cfe291ba 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -337,7 +337,8 @@ impl NodeCommand { None }; - let prune_config = self.pruning.prune_config(Arc::clone(&self.chain))?.or(config.prune); + let prune_config = + self.pruning.prune_config(Arc::clone(&self.chain))?.or(config.prune.clone()); // Configure the pipeline let (mut pipeline, client) = if self.dev.dev { @@ -373,7 +374,7 @@ impl NodeCommand { db.clone(), &ctx.task_executor, metrics_tx, - prune_config, + prune_config.clone(), max_block, ) .await?; @@ -393,7 +394,7 @@ impl NodeCommand { db.clone(), &ctx.task_executor, metrics_tx, - prune_config, + prune_config.clone(), max_block, ) .await?; diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 3fb414e8ee4e..cc54312553ef 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -279,7 +279,7 @@ impl Default for IndexHistoryConfig { } /// Pruning configuration. -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] #[serde(default)] pub struct PruneConfig { /// Minimum pruning interval measured in blocks. diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 300a215f4af2..065dd2500cfc 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -79,7 +79,10 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; -pub use prune::{PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError}; +pub use prune::{ + ContractLogsPruneConfig, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, + MINIMUM_PRUNING_DISTANCE, +}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index a3bcb959627e..bec2bfa26254 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -3,7 +3,45 @@ mod mode; mod part; mod target; +use crate::{Address, BlockNumber}; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; pub use part::{PrunePart, PrunePartError}; -pub use target::PruneModes; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; + +/// Configuration for pruning receipts not associated with logs emitted by the specified contracts. +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +pub struct ContractLogsPruneConfig(pub BTreeMap); + +impl ContractLogsPruneConfig { + /// Checks if the configuration is empty + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Given the `tip` block number, consolidates the structure so it can easily be queried for + /// filtering across a range of blocks. + /// + /// The [`BlockNumber`] key of the map should be viewed as `PruneMode::Before(block)`. + pub fn group_by_block( + &self, + tip: BlockNumber, + ) -> Result>, PrunePartError> { + let mut map = BTreeMap::new(); + for (address, mode) in self.0.iter() { + // Getting `None`, means that there is nothing to prune yet, so we need it to include in + // the BTreeMap (block = 0), otherwise it will be excluded. + // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all + // other receipts. + let block = mode + .prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? + .map(|(block, _)| block) + .unwrap_or_default(); + + map.entry(block).or_insert_with(Vec::new).push(address) + } + Ok(map) + } +} diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index b62a39041b8c..3e39f876d7ce 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -1,4 +1,4 @@ -use crate::BlockNumber; +use crate::{BlockNumber, PrunePart, PrunePartError}; use reth_codecs::{main_codec, Compact}; /// Prune mode. @@ -14,6 +14,43 @@ pub enum PruneMode { Before(BlockNumber), } +impl PruneMode { + /// Returns block up to which variant pruning needs to be done, inclusive, according to the + /// provided tip. + pub fn prune_target_block( + &self, + tip: BlockNumber, + min_blocks: u64, + prune_part: PrunePart, + ) -> Result, PrunePartError> { + let result = match self { + PruneMode::Full if min_blocks == 0 => Some((tip, *self)), + PruneMode::Distance(distance) if *distance > tip => None, // Nothing to prune yet + PruneMode::Distance(distance) if *distance >= min_blocks => { + Some((tip - distance, *self)) + } + PruneMode::Before(n) if *n > tip => None, // Nothing to prune yet + PruneMode::Before(n) if tip - n >= min_blocks => Some((n - 1, *self)), + _ => return Err(PrunePartError::Configuration(prune_part)), + }; + Ok(result) + } + + /// Check if target block should be pruned according to the provided prune mode and tip. + pub fn should_prune(&self, block: BlockNumber, tip: BlockNumber) -> bool { + match self { + PruneMode::Full => true, + PruneMode::Distance(distance) => { + if *distance > tip { + return false + } + block < tip - *distance + } + PruneMode::Before(n) => *n > block, + } + } +} + #[cfg(test)] impl Default for PruneMode { fn default() -> Self { @@ -23,10 +60,78 @@ impl Default for PruneMode { #[cfg(test)] mod tests { - use crate::prune::PruneMode; + use crate::{prune::PruneMode, PrunePart, PrunePartError, MINIMUM_PRUNING_DISTANCE}; use assert_matches::assert_matches; use serde::Deserialize; + #[test] + fn test_prune_target_block() { + let tip = 1000; + let min_blocks = MINIMUM_PRUNING_DISTANCE; + let prune_part = PrunePart::Receipts; + + let tests = vec![ + // MINIMUM_PRUNING_DISTANCE makes this impossible + (PruneMode::Full, Err(PrunePartError::Configuration(prune_part))), + // Nothing to prune + (PruneMode::Distance(tip + 1), Ok(None)), + (PruneMode::Distance(min_blocks + 1), Ok(Some(tip - (min_blocks + 1)))), + // Nothing to prune + (PruneMode::Before(tip + 1), Ok(None)), + ( + PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE), + Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 1)), + ), + ( + PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE - 1), + Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 2)), + ), + // MINIMUM_PRUNING_DISTANCE is 128 + (PruneMode::Before(tip - 1), Err(PrunePartError::Configuration(prune_part))), + ]; + + for (index, (mode, expected_result)) in tests.into_iter().enumerate() { + assert_eq!( + mode.prune_target_block(tip, min_blocks, prune_part), + expected_result.map(|r| r.map(|b| (b, mode))), + "Test {} failed", + index + 1, + ); + } + + // Test for a scenario where there are no minimum blocks and Full can be used + assert_eq!( + PruneMode::Full.prune_target_block(tip, 0, prune_part), + Ok(Some((tip, PruneMode::Full))), + ); + } + + #[test] + fn test_should_prune() { + let tip = 1000; + let should_prune = true; + + let tests = vec![ + (PruneMode::Distance(tip + 1), 1, !should_prune), + ( + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1), + tip - MINIMUM_PRUNING_DISTANCE - 1, + !should_prune, + ), + ( + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1), + tip - MINIMUM_PRUNING_DISTANCE - 2, + should_prune, + ), + (PruneMode::Before(tip + 1), 1, should_prune), + (PruneMode::Before(tip + 1), tip + 1, !should_prune), + ]; + + for (index, (mode, block, expected_result)) in tests.into_iter().enumerate() { + assert_eq!(mode.should_prune(block, tip), expected_result, "Test {} failed", index + 1,); + } + } + #[test] fn prune_mode_deserialize() { #[derive(Debug, Deserialize)] diff --git a/crates/primitives/src/prune/part.rs b/crates/primitives/src/prune/part.rs index db49870735a7..03b64b916309 100644 --- a/crates/primitives/src/prune/part.rs +++ b/crates/primitives/src/prune/part.rs @@ -10,8 +10,10 @@ pub enum PrunePart { SenderRecovery, /// Prune part responsible for the `TxHashNumber` table. TransactionLookup, - /// Prune part responsible for the `Receipts` table. + /// Prune part responsible for all `Receipts`. Receipts, + /// Prune part responsible for some `Receipts` filtered by logs. + ContractLogs, /// Prune part responsible for the `AccountChangeSet` and `AccountHistory` tables. AccountHistory, /// Prune part responsible for the `StorageChangeSet` and `StorageHistory` tables. @@ -19,7 +21,7 @@ pub enum PrunePart { } /// PrunePart error type. -#[derive(Debug, Error)] +#[derive(Debug, Error, PartialEq, Eq)] pub enum PrunePartError { /// Invalid configuration of a prune part. #[error("The configuration provided for {0} is invalid.")] diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 939de0f3a227..9620569760e2 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -1,12 +1,15 @@ use crate::{ prune::PrunePartError, serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, - PruneMode, PrunePart, + ContractLogsPruneConfig, PruneMode, PrunePart, }; use paste::paste; use serde::{Deserialize, Serialize}; +/// Minimum distance necessary from the tip so blockchain tree can work correctly. +pub const MINIMUM_PRUNING_DISTANCE: u64 = 128; + /// Pruning configuration for every part of the data that can be pruned. -#[derive(Debug, Clone, Default, Copy, Deserialize, Eq, PartialEq, Serialize)] +#[derive(Debug, Clone, Default, Deserialize, Eq, PartialEq, Serialize)] #[serde(default)] pub struct PruneModes { /// Sender Recovery pruning configuration. @@ -20,7 +23,8 @@ pub struct PruneModes { /// Transaction Lookup pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] pub transaction_lookup: Option, - /// Receipts pruning configuration. + /// Configuration for pruning of receipts. This setting overrides + /// `PruneModes::contract_logs_filter` and offers improved performance. #[serde( skip_serializing_if = "Option::is_none", deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" @@ -38,6 +42,12 @@ pub struct PruneModes { deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" )] pub storage_history: Option, + /// Retains only those receipts that contain logs emitted by the specified addresses, + /// discarding all others. Note that this setting is overridden by `PruneModes::receipts`. + /// + /// The [`BlockNumber`] represents the starting block from which point onwards the receipts are + /// preserved. + pub contract_logs_filter: ContractLogsPruneConfig, } macro_rules! impl_prune_parts { @@ -51,7 +61,7 @@ macro_rules! impl_prune_parts { )] pub fn [](&self, block: BlockNumber, tip: BlockNumber) -> bool { if let Some(mode) = &self.$part { - return self.should_prune(mode, block, tip) + return mode.should_prune(block, tip) } false } @@ -66,16 +76,8 @@ macro_rules! impl_prune_parts { " pruning needs to be done, inclusive, according to the provided tip." )] pub fn [](&self, tip: BlockNumber) -> Result, PrunePartError> { - let min_blocks: u64 = $min_blocks.unwrap_or_default(); - match self.$part { - Some(mode) => Ok(match mode { - PruneMode::Full if min_blocks == 0 => Some((tip, mode)), - PruneMode::Distance(distance) if distance > tip => None, // Nothing to prune yet - PruneMode::Distance(distance) if distance >= min_blocks => Some((tip - distance, mode)), - PruneMode::Before(n) if n > tip => None, // Nothing to prune yet - PruneMode::Before(n) if tip - n >= min_blocks => Some((n - 1, mode)), - _ => return Err(PrunePartError::Configuration(PrunePart::$variant)), - }), + match self.$part { + Some(mode) => mode.prune_target_block(tip, $min_blocks.unwrap_or_default(), PrunePart::$variant), None => Ok(None) } } @@ -88,6 +90,7 @@ macro_rules! impl_prune_parts { $( $part: Some(PruneMode::Full), )+ + contract_logs_filter: Default::default() } } @@ -100,20 +103,6 @@ impl PruneModes { PruneModes::default() } - /// Check if target block should be pruned according to the provided prune mode and tip. - pub fn should_prune(&self, mode: &PruneMode, block: BlockNumber, tip: BlockNumber) -> bool { - match mode { - PruneMode::Full => true, - PruneMode::Distance(distance) => { - if *distance > tip { - return false - } - block < tip - *distance - } - PruneMode::Before(n) => *n > block, - } - } - impl_prune_parts!( (sender_recovery, SenderRecovery, Some(64)), (transaction_lookup, TransactionLookup, None), diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 2d0332a2ade0..6fe07013b90d 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -199,7 +199,7 @@ impl ExecutionStage { start_block: u64, max_block: u64, ) -> Result { - let mut prune_modes = self.prune_modes; + let mut prune_modes = self.prune_modes.clone(); // If we're not executing MerkleStage from scratch (by threshold or first-sync), then erase // changeset related pruning configurations diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 777041fbcac1..1adc72bdf969 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -173,34 +173,34 @@ mod tests { // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed // storage. let mut prune = PruneModes::none(); - check_pruning(factory.clone(), prune, 1, 3, 1).await; + check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Full); prune.account_history = Some(PruneMode::Full); prune.storage_history = Some(PruneMode::Full); // This will result in error for account_history and storage_history, which is caught. - check_pruning(factory.clone(), prune, 0, 0, 0).await; + check_pruning(factory.clone(), prune.clone(), 0, 0, 0).await; prune.receipts = Some(PruneMode::Before(1)); prune.account_history = Some(PruneMode::Before(1)); prune.storage_history = Some(PruneMode::Before(1)); - check_pruning(factory.clone(), prune, 1, 3, 1).await; + check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Before(2)); prune.account_history = Some(PruneMode::Before(2)); prune.storage_history = Some(PruneMode::Before(2)); // The one account is the miner - check_pruning(factory.clone(), prune, 0, 1, 0).await; + check_pruning(factory.clone(), prune.clone(), 0, 1, 0).await; prune.receipts = Some(PruneMode::Distance(66)); prune.account_history = Some(PruneMode::Distance(66)); prune.storage_history = Some(PruneMode::Distance(66)); - check_pruning(factory.clone(), prune, 1, 3, 1).await; + check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Distance(64)); prune.account_history = Some(PruneMode::Distance(64)); prune.storage_history = Some(PruneMode::Distance(64)); // The one account is the miner - check_pruning(factory.clone(), prune, 0, 1, 0).await; + check_pruning(factory.clone(), prune.clone(), 0, 1, 0).await; } } diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 1aed0e666934..4032531fff38 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -6,9 +6,11 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseError as DbError, }; +use reth_interfaces::Error; use reth_primitives::{ bloom::logs_bloom, keccak256, proofs::calculate_receipt_root_ref, Account, Address, - BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneModes, Receipt, StorageEntry, H256, U256, + BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneModes, Receipt, StorageEntry, H256, + MINIMUM_PRUNING_DISTANCE, U256, }; use reth_trie::{ hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, @@ -600,7 +602,7 @@ impl PostState { mut self, tx: &TX, tip: BlockNumber, - ) -> Result<(), DbError> { + ) -> Result<(), Error> { self.write_history_to_db(tx, tip)?; // Write new storage state @@ -657,21 +659,64 @@ impl PostState { let mut bodies_cursor = tx.cursor_read::()?; let mut receipts_cursor = tx.cursor_write::()?; + let contract_log_pruner = self + .prune_modes + .contract_logs_filter + .group_by_block(tip) + .map_err(|e| Error::Custom(e.to_string()))?; + + // Empty implies that there is going to be + // addresses to include in the filter in a future block. None means there isn't any kind + // of configuration. + let mut address_filter: Option<(u64, Vec<&Address>)> = None; + for (block, receipts) in self.receipts { - if self.prune_modes.should_prune_receipts(block, tip) { + // [`PrunePart::Receipts`] takes priority over [`PrunePart::ContractLogs`] + if receipts.is_empty() || self.prune_modes.should_prune_receipts(block, tip) { continue } + // All receipts from the last 128 blocks are required for blockchain tree, even with + // [`PrunePart::ContractLogs`]. + let prunable_receipts = + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(block, tip); + + if prunable_receipts && !contract_log_pruner.is_empty() { + if address_filter.is_none() { + address_filter = Some((0, vec![])); + } + + // Get all addresses higher than the previous checked block up to the current + // one + if let Some((prev_block, filter)) = &mut address_filter { + for (_, addresses) in contract_log_pruner.range(*prev_block..=block) { + filter.extend_from_slice(addresses.as_slice()) + } + + *prev_block = block; + } + } + let (_, body_indices) = bodies_cursor.seek_exact(block)?.expect("body indices exist"); let tx_range = body_indices.tx_num_range(); assert_eq!(receipts.len(), tx_range.clone().count(), "Receipt length mismatch"); + for (tx_num, receipt) in tx_range.zip(receipts) { + if prunable_receipts { + // If there is an address_filter, and it does not contain any of the + // contract addresses, then skip writing this + // receipt. + if let Some((_, filter)) = &address_filter { + if !receipt.logs.iter().any(|log| filter.contains(&&log.address)) { + continue + } + } + } receipts_cursor.append(tx_num, receipt)?; } } } - Ok(()) } } From 1fda268a4e2b7785ac681a8d3e3dc0e99e03d717 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 20:02:46 +0200 Subject: [PATCH 447/722] perf: use futures unordered for active requests (#4231) --- crates/net/network/src/transactions.rs | 70 +++++++++++++++++++------- 1 file changed, 51 insertions(+), 19 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index b67675a6f022..fe2e5e579d8c 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -34,7 +34,7 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tracing::{debug, trace}; @@ -101,7 +101,7 @@ pub struct TransactionsManager { /// From which we get all new incoming transaction related messages. network_events: UnboundedReceiverStream, /// All currently active requests for pooled transactions. - inflight_requests: Vec, + inflight_requests: FuturesUnordered, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same @@ -349,7 +349,7 @@ where }; if peer.request_tx.try_send(req).is_ok() { - self.inflight_requests.push(GetPooledTxRequest { peer_id, response: rx }) + self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, rx)) } else { // peer channel is saturated, drop the request self.metrics.egress_peer_channel_full.increment(1); @@ -574,28 +574,23 @@ where } // Advance all requests. - // We remove each request one by one and add them back. - for idx in (0..this.inflight_requests.len()).rev() { - let mut req = this.inflight_requests.swap_remove(idx); - match req.response.poll_unpin(cx) { - Poll::Pending => { - this.inflight_requests.push(req); + while let Poll::Ready(Some(GetPooledTxResponse { peer_id, result })) = + this.inflight_requests.poll_next_unpin(cx) + { + match result { + Ok(Ok(txs)) => { + this.import_transactions(peer_id, txs.0, TransactionSource::Response); } - Poll::Ready(Ok(Ok(txs))) => { - this.import_transactions(req.peer_id, txs.0, TransactionSource::Response); + Ok(Err(req_err)) => { + this.on_request_error(peer_id, req_err); } - Poll::Ready(Ok(Err(req_err))) => { - this.on_request_error(req.peer_id, req_err); - } - Poll::Ready(Err(_)) => { + Err(_) => { // request channel closed/dropped - this.on_request_error(req.peer_id, RequestError::ChannelClosed) + this.on_request_error(peer_id, RequestError::ChannelClosed) } } } - this.inflight_requests.shrink_to_fit(); - this.update_import_metrics(); // Advance all imports @@ -756,12 +751,49 @@ impl TransactionSource { } /// An inflight request for `PooledTransactions` from a peer -#[allow(missing_docs)] struct GetPooledTxRequest { peer_id: PeerId, response: oneshot::Receiver>, } +struct GetPooledTxResponse { + peer_id: PeerId, + result: Result, RecvError>, +} + +#[must_use = "futures do nothing unless polled"] +#[pin_project::pin_project] +struct GetPooledTxRequestFut { + #[pin] + inner: Option, +} + +impl GetPooledTxRequestFut { + fn new( + peer_id: PeerId, + response: oneshot::Receiver>, + ) -> Self { + Self { inner: Some(GetPooledTxRequest { peer_id, response }) } + } +} + +impl Future for GetPooledTxRequestFut { + type Output = GetPooledTxResponse; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut req = self.as_mut().project().inner.take().expect("polled after completion"); + match req.response.poll_unpin(cx) { + Poll::Ready(result) => { + Poll::Ready(GetPooledTxResponse { peer_id: req.peer_id, result }) + } + Poll::Pending => { + self.project().inner.set(Some(req)); + Poll::Pending + } + } + } +} + /// Tracks a single peer struct Peer { /// Keeps track of transactions that we know the peer has seen. From 45db5a6368018bb377832401d941383a09eed025 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Aug 2023 21:02:51 +0200 Subject: [PATCH 448/722] feat: support custom PoolTransaction errors (#4237) --- crates/rpc/rpc/src/eth/error.rs | 6 +++++- crates/transaction-pool/src/error.rs | 18 +++++++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 3b3d179b48ae..d4c8813f5dd3 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -8,7 +8,7 @@ use jsonrpsee::{ use reth_primitives::{abi::decode_revert_reason, Address, Bytes, U256}; use reth_revm::tracing::js::JsInspectorError; use reth_rpc_types::{error::EthRpcErrorCode, BlockError, CallInputError}; -use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError}; +use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError, PoolTransactionError}; use revm::primitives::{EVMError, ExecutionResult, Halt, OutOfGasError}; use std::time::Duration; @@ -468,6 +468,9 @@ pub enum RpcPoolError { ExceedsMaxInitCodeSize, #[error(transparent)] Invalid(#[from] RpcInvalidTransactionError), + /// Custom pool error + #[error("{0:?}")] + PoolTransactionError(Box), #[error(transparent)] Other(Box), } @@ -505,6 +508,7 @@ impl From for RpcPoolError { } InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, + InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 12e0fec66240..75b2f66316f2 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -5,6 +5,18 @@ use reth_primitives::{Address, InvalidTransactionError, TxHash}; /// Transaction pool result type. pub type PoolResult = Result; +/// A trait for additional errors that can be thrown by the transaction pool. +/// +/// For example during validation +/// [TransactionValidator::validate_transaction](crate::validate::TransactionValidator::validate_transaction) +pub trait PoolTransactionError: std::error::Error + Send + Sync { + /// Returns `true` if the error was caused by a transaction that is considered bad in the + /// context of the transaction pool and warrants peer penalization. + /// + /// See [PoolError::is_bad_transaction]. + fn is_bad_transaction(&self) -> bool; +} + /// All errors the Transaction pool can throw. #[derive(Debug, thiserror::Error)] pub enum PoolError { @@ -105,7 +117,7 @@ impl PoolError { /// Represents errors that can happen when validating transactions for the pool /// /// See [TransactionValidator](crate::TransactionValidator). -#[derive(Debug, Clone, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum InvalidPoolTransactionError { /// Hard consensus errors #[error(transparent)] @@ -126,6 +138,9 @@ pub enum InvalidPoolTransactionError { /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, + /// Any other error that occurred while inserting/validating that is transaction specific + #[error("{0:?}")] + Other(Box), } // === impl InvalidPoolTransactionError === @@ -178,6 +193,7 @@ impl InvalidPoolTransactionError { // local setting false } + InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), } } } From 40f9576c3afe68ef58a50369b0455ef19eb2f946 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 16 Aug 2023 19:10:33 -0400 Subject: [PATCH 449/722] feat: implement network encoding for blob transactions (#4172) Co-authored-by: Matthias Seitz --- Cargo.lock | 121 +++--- Cargo.toml | 5 +- crates/net/eth-wire/src/types/transactions.rs | 149 +------ crates/net/eth-wire/testdata/blob_transaction | 1 + .../testdata/pooled_transactions_with_blob | 1 + .../net/eth-wire/tests/pooled_transactions.rs | 23 ++ crates/net/network/src/message.rs | 4 +- crates/net/network/src/transactions.rs | 9 +- crates/primitives/src/lib.rs | 12 +- crates/primitives/src/transaction/eip1559.rs | 29 ++ crates/primitives/src/transaction/eip2930.rs | 27 ++ crates/primitives/src/transaction/eip4844.rs | 374 +++++++++++++++++- crates/primitives/src/transaction/mod.rs | 54 +-- crates/primitives/src/transaction/pooled.rs | 178 +++++++++ .../primitives/src/transaction/signature.rs | 6 +- 15 files changed, 745 insertions(+), 248 deletions(-) create mode 100644 crates/net/eth-wire/testdata/blob_transaction create mode 100644 crates/net/eth-wire/testdata/pooled_transactions_with_blob create mode 100644 crates/net/eth-wire/tests/pooled_transactions.rs create mode 100644 crates/primitives/src/transaction/pooled.rs diff --git a/Cargo.lock b/Cargo.lock index c89d13f23238..c22e923952df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -230,9 +230,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "8c6f84b74db2535ebae81eede2f39b947dcbf01d093ae5f791e5dd414a1bf289" [[package]] name = "aquamarine" @@ -932,7 +932,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#3ce8f863415ac1b218bc7d63cc14778b570aa081" +source = "git+https://github.com/rjected/c-kzg-4844?branch=dan/add-serde-feature#4c95d6b8850f4f22a25fed0cf207560711cefe2b" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "cc", @@ -1899,9 +1899,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8", "signature", @@ -2505,9 +2505,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", @@ -3008,9 +3008,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human_bytes" @@ -3105,7 +3105,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows 0.48.0", ] [[package]] @@ -3957,9 +3957,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006271a8019ad7a9a28cfac2cc40e3ee104d54be763c4a0901e228a63f49d706" +checksum = "1c93f6ad342d3f7bc14724147e2dbc6eb6fdbe5a832ace16ea23b73618e8cc17" dependencies = [ "libproc", "mach2", @@ -3967,7 +3967,7 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows", + "windows 0.51.0", ] [[package]] @@ -4169,9 +4169,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" dependencies = [ "num-traits", ] @@ -4469,7 +4469,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec 1.11.0", - "windows-targets 0.48.1", + "windows-targets 0.48.2", ] [[package]] @@ -5141,7 +5141,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ - "aho-corasick 1.0.3", + "aho-corasick 1.0.4", "memchr", "regex-automata 0.3.6", "regex-syntax 0.7.4", @@ -5162,7 +5162,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ - "aho-corasick 1.0.3", + "aho-corasick 1.0.4", "memchr", "regex-syntax 0.7.4", ] @@ -6334,9 +6334,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a29d87a652dc4d43c586328706bb5cdff211f3f39a530f240b53f7221dab8e" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" dependencies = [ "libc", ] @@ -6754,9 +6754,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", @@ -8349,7 +8349,26 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.1", + "windows-targets 0.48.2", +] + +[[package]] +name = "windows" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9763fb813068e9f4ab70a92a0c6ad61ff6b342f693b1ed0e5387c854386e670" +dependencies = [ + "windows-core", + "windows-targets 0.48.2", +] + +[[package]] +name = "windows-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b81650771e76355778637954dc9d7eb8d991cd89ad64ba26f21eeb3c22d8d836" +dependencies = [ + "windows-targets 0.48.2", ] [[package]] @@ -8367,7 +8386,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.1", + "windows-targets 0.48.2", ] [[package]] @@ -8387,17 +8406,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "d1eeca1c172a285ee6c2c84c341ccea837e7c01b12fbb2d0fe3c9e550ce49ec8" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.2", + "windows_aarch64_msvc 0.48.2", + "windows_i686_gnu 0.48.2", + "windows_i686_msvc 0.48.2", + "windows_x86_64_gnu 0.48.2", + "windows_x86_64_gnullvm 0.48.2", + "windows_x86_64_msvc 0.48.2", ] [[package]] @@ -8408,9 +8427,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "b10d0c968ba7f6166195e13d593af609ec2e3d24f916f081690695cf5eaffb2f" [[package]] name = "windows_aarch64_msvc" @@ -8420,9 +8439,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "571d8d4e62f26d4932099a9efe89660e8bd5087775a2ab5cdd8b747b811f1058" [[package]] name = "windows_i686_gnu" @@ -8432,9 +8451,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "2229ad223e178db5fbbc8bd8d3835e51e566b8474bfca58d2e6150c48bb723cd" [[package]] name = "windows_i686_msvc" @@ -8444,9 +8463,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "600956e2d840c194eedfc5d18f8242bc2e17c7775b6684488af3a9fff6fe3287" [[package]] name = "windows_x86_64_gnu" @@ -8456,9 +8475,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "ea99ff3f8b49fb7a8e0d305e5aec485bd068c2ba691b6e277d29eaeac945868a" [[package]] name = "windows_x86_64_gnullvm" @@ -8468,9 +8487,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "8f1a05a1ece9a7a0d5a7ccf30ba2c33e3a61a30e042ffd247567d1de1d94120d" [[package]] name = "windows_x86_64_msvc" @@ -8480,15 +8499,15 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "d419259aba16b663966e29e6d7c6ecfa0bb8425818bb96f6f1f3c3eb71a6e7b9" [[package]] name = "winnow" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5504cc7644f4b593cbc05c4a55bf9bd4e94b867c3c0bd440934174d50482427d" +checksum = "1e461589e194280efaa97236b73623445efa195aa633fd7004f39805707a9d53" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 2e60c4b85770..a18a52a0d32b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -152,4 +152,7 @@ c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } ### misc-testing proptest = "1.0" -arbitrary = "1.1" \ No newline at end of file +arbitrary = "1.1" + +[patch."https://github.com/ethereum/c-kzg-4844"] +c-kzg = { git = "https://github.com/rjected/c-kzg-4844", branch = "dan/add-serde-feature" } \ No newline at end of file diff --git a/crates/net/eth-wire/src/types/transactions.rs b/crates/net/eth-wire/src/types/transactions.rs index 35a8a7649127..4cb1abab2807 100644 --- a/crates/net/eth-wire/src/types/transactions.rs +++ b/crates/net/eth-wire/src/types/transactions.rs @@ -1,27 +1,11 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. -use reth_codecs::{add_arbitrary_tests, derive_arbitrary}; -use reth_primitives::{ - kzg::{self, Blob, Bytes48, KzgProof, KzgSettings}, - TransactionSigned, H256, -}; -use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; +use reth_codecs::derive_arbitrary; +use reth_primitives::{PooledTransactionsElement, TransactionSigned, H256}; +use reth_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[cfg(any(test, feature = "arbitrary"))] -use proptest::{ - arbitrary::{any as proptest_any, ParamsFor}, - collection::vec as proptest_vec, - strategy::{BoxedStrategy, Strategy}, -}; - -#[cfg(any(test, feature = "arbitrary"))] -use reth_primitives::{ - constants::eip4844::{FIELD_ELEMENTS_PER_BLOB, KZG_TRUSTED_SETUP}, - kzg::{KzgCommitment, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}, -}; - /// A list of transaction hashes that the peer would like transaction bodies for. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -47,141 +31,20 @@ where /// as the request's hashes. Hashes may be skipped, and the client should ensure that each body /// corresponds to a requested hash. Hashes may need to be re-requested if the bodies are not /// included in the response. -#[derive_arbitrary(rlp, 10)] +// #[derive_arbitrary(rlp, 10)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. - pub Vec, + pub Vec, ); impl From> for PooledTransactions { fn from(txs: Vec) -> Self { - PooledTransactions(txs) - } -} - -impl From for Vec { - fn from(txs: PooledTransactions) -> Self { - txs.0 - } -} - -/// A response to [`GetPooledTransactions`] that includes blob data, their commitments, and their -/// corresponding proofs. -/// -/// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element -/// of a [PooledTransactions] response. -#[add_arbitrary_tests(rlp, 20)] -#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] -pub struct BlobTransaction { - /// The transaction payload. - pub transaction: TransactionSigned, - /// The transaction's blob data. - pub blobs: Vec, - /// The transaction's blob commitments. - pub commitments: Vec, - /// The transaction's blob proofs. - pub proofs: Vec, -} - -impl BlobTransaction { - /// Verifies that the transaction's blob data, commitments, and proofs are all valid. - /// - /// Takes as input the [KzgSettings], which should contain the the parameters derived from the - /// KZG trusted setup. - /// - /// This ensures that the blob transaction payload has the same number of blob data elements, - /// commitments, and proofs. Each blob data element is verified against its commitment and - /// proof. - /// - /// Returns `false` if any blob KZG proof in the response fails to verify. - pub fn validate(&self, proof_settings: &KzgSettings) -> Result { - // Verify as a batch - KzgProof::verify_blob_kzg_proof_batch( - self.blobs.as_slice(), - self.commitments.as_slice(), - self.proofs.as_slice(), - proof_settings, - ) + PooledTransactions(txs.into_iter().map(Into::into).collect()) } } -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlobTransaction { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut arr = [0u8; BYTES_PER_BLOB]; - let blobs: Vec = (0..u.int_in_range(1..=16)?) - .map(|_| { - arr = arbitrary::Arbitrary::arbitrary(u).unwrap(); - - // Ensure that the blob is canonical by ensuring that - // each field element contained in the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - arr[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - Blob::from(arr) - }) - .collect(); - - Ok(generate_blob_transaction(blobs, TransactionSigned::arbitrary(u)?)) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for BlobTransaction { - type Parameters = ParamsFor; - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - ( - proptest_vec(proptest_vec(proptest_any::(), BYTES_PER_BLOB), 1..=5), - proptest_any::(), - ) - .prop_map(move |(blobs, tx)| { - let blobs = blobs - .into_iter() - .map(|mut blob| { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Ensure that the blob is canonical by ensuring that - // each field element contained in the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - blob[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - arr.copy_from_slice(blob.as_slice()); - arr.into() - }) - .collect(); - - generate_blob_transaction(blobs, tx) - }) - .boxed() - } - - type Strategy = BoxedStrategy; -} - -#[cfg(any(test, feature = "arbitrary"))] -fn generate_blob_transaction(blobs: Vec, transaction: TransactionSigned) -> BlobTransaction { - let kzg_settings = KZG_TRUSTED_SETUP.clone(); - - let commitments: Vec = blobs - .iter() - .map(|blob| KzgCommitment::blob_to_kzg_commitment(blob.clone(), &kzg_settings).unwrap()) - .map(|commitment| commitment.to_bytes()) - .collect(); - - let proofs: Vec = blobs - .iter() - .zip(commitments.iter()) - .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob.clone(), *commitment, &kzg_settings).unwrap() - }) - .map(|proof| proof.to_bytes()) - .collect(); - - BlobTransaction { transaction, blobs, commitments, proofs } -} #[cfg(test)] mod test { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; diff --git a/crates/net/eth-wire/testdata/blob_transaction b/crates/net/eth-wire/testdata/blob_transaction new file mode 100644 index 000000000000..130bd30e34f7 --- /dev/null +++ b/crates/net/eth-wire/testdata/blob_transaction @@ -0,0 +1 @@ +ba02012a03fa020125f8b7010516058261a894030405000000000000000000000000000000000063b20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00fe1a0010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c44401401a09ee8609ebdd5226fe88eb5aa4c3a00557301f337a480fc63c55a7b98fb5b5460a067df54086c2ff2242abfdeb3d008e8bb88c44460aa31e8013444495079f845d2fa020004ba0200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 diff --git a/crates/net/eth-wire/testdata/pooled_transactions_with_blob b/crates/net/eth-wire/testdata/pooled_transactions_with_blob new file mode 100644 index 000000000000..41bdfe0946c3 --- /dev/null +++ b/crates/net/eth-wire/testdata/pooled_transactions_with_blob @@ -0,0 +1 @@ +fa0201f9b86601f8630103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544c001a0c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b2660a032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d37521ba02012a03fa020125f8b7010516058261a894030405000000000000000000000000000000000063b20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00fe1a0010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c44401401a0419109cdfbb893b9002cb829606bbbc3392af074988a0df63b4dee1ddc1e279ea0448aee46ad28a79b9f2afe94fa4d0504ec1bc820e5ced5731dafbda6cd8dffd9fa020004ba0200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f86103018207d094b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a8255441ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa08887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3 diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs new file mode 100644 index 000000000000..66dbdd001985 --- /dev/null +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -0,0 +1,23 @@ +//! Decoding tests for [`PooledTransactions`] +use reth_eth_wire::PooledTransactions; +use reth_primitives::{hex, PooledTransactionsElement}; +use reth_rlp::Decodable; +use std::{fs, path::PathBuf}; + +#[test] +fn decode_pooled_transactions_data() { + let network_data_path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/pooled_transactions_with_blob"); + let data = fs::read_to_string(network_data_path).expect("Unable to read file"); + let hex_data = hex::decode(data.trim()).unwrap(); + let _txs = PooledTransactions::decode(&mut &hex_data[..]).unwrap(); +} + +#[test] +fn decode_blob_transaction_data() { + let network_data_path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/blob_transaction"); + let data = fs::read_to_string(network_data_path).expect("Unable to read file"); + let hex_data = hex::decode(data.trim()).unwrap(); + let _txs = PooledTransactionsElement::decode(&mut &hex_data[..]).unwrap(); +} diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 1a50a649267d..12b4c4980a6b 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; use reth_primitives::{ - BlockBody, Bytes, Header, PeerId, ReceiptWithBloom, TransactionSigned, H256, + BlockBody, Bytes, Header, PeerId, PooledTransactionsElement, ReceiptWithBloom, H256, }; use std::{ fmt, @@ -199,7 +199,7 @@ impl PeerResponse { pub enum PeerResponseResult { BlockHeaders(RequestResult>), BlockBodies(RequestResult>), - PooledTransactions(RequestResult>), + PooledTransactions(RequestResult>), NodeData(RequestResult>), Receipts(RequestResult>>), } diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index fe2e5e579d8c..f5cc1487dd8c 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -193,7 +193,8 @@ where // we sent a response at which point we assume that the peer is aware of the transaction peer.transactions.extend(transactions.iter().map(|tx| tx.hash())); - let resp = PooledTransactions(transactions); + // TODO: remove this! this will be different when we introduce the blobpool + let resp = PooledTransactions(transactions.into_iter().map(Into::into).collect()); let _ = response.send(Ok(resp)); } } @@ -579,7 +580,11 @@ where { match result { Ok(Ok(txs)) => { - this.import_transactions(peer_id, txs.0, TransactionSource::Response); + // convert all transactions to the inner transaction type, ignoring any + // sidecars + // TODO: remove this! this will be different when we introduce the blobpool + let transactions = txs.0.into_iter().map(|tx| tx.into_transaction()).collect(); + this.import_transactions(peer_id, transactions, TransactionSource::Response) } Ok(Err(req_err)) => { this.on_request_error(peer_id, req_err); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 065dd2500cfc..8d0ffb023615 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -67,6 +67,7 @@ pub use compression::*; pub use constants::{ DEV_GENESIS, EMPTY_OMMER_ROOT, GOERLI_GENESIS, KECCAK_EMPTY, MAINNET_GENESIS, SEPOLIA_GENESIS, }; +pub use eip4844::{calculate_excess_blob_gas, kzg_to_versioned_hash}; pub use forkid::{ForkFilter, ForkHash, ForkId, ForkTransition, ValidationError}; pub use genesis::{Genesis, GenesisAccount}; pub use hardfork::Hardfork; @@ -89,11 +90,12 @@ pub use serde_helper::JsonU256; pub use storage::StorageEntry; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer, sign_message}, - AccessList, AccessListItem, AccessListWithGasUsed, FromRecoveredTransaction, - IntoRecoveredTransaction, InvalidTransactionError, Signature, Transaction, TransactionKind, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, - EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + AccessList, AccessListItem, AccessListWithGasUsed, BlobTransaction, BlobTransactionSidecar, + FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, + PooledTransactionsElement, Signature, Transaction, TransactionKind, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, + TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index ba3e18a6d2be..3a8dcec7a8a2 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,6 +1,7 @@ use super::access_list::AccessList; use crate::{Bytes, ChainId, TransactionKind}; use reth_codecs::{main_codec, Compact}; +use reth_rlp::{Decodable, DecodeError}; use std::mem; /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). @@ -82,6 +83,34 @@ impl TxEip1559 { } } + /// Decodes the inner [TxEip1559] fields from RLP bytes. + /// + /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following + /// RLP fields in the following order: + /// + /// - `chain_id` + /// - `nonce` + /// - `max_priority_fee_per_gas` + /// - `max_fee_per_gas` + /// - `gas_limit` + /// - `to` + /// - `value` + /// - `data` (`input`) + /// - `access_list` + pub(crate) fn decode_inner(buf: &mut &[u8]) -> Result { + Ok(Self { + chain_id: Decodable::decode(buf)?, + nonce: Decodable::decode(buf)?, + max_priority_fee_per_gas: Decodable::decode(buf)?, + max_fee_per_gas: Decodable::decode(buf)?, + gas_limit: Decodable::decode(buf)?, + to: Decodable::decode(buf)?, + value: Decodable::decode(buf)?, + input: Bytes(Decodable::decode(buf)?), + access_list: Decodable::decode(buf)?, + }) + } + /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. #[inline] pub fn size(&self) -> usize { diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index ecd6ecb7ec20..16029ac97dd3 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,6 +1,7 @@ use super::access_list::AccessList; use crate::{Bytes, ChainId, TransactionKind}; use reth_codecs::{main_codec, Compact}; +use reth_rlp::{Decodable, DecodeError}; use std::mem; /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). @@ -64,6 +65,32 @@ impl TxEip2930 { self.access_list.size() + // access_list self.input.len() // input } + + /// Decodes the inner [TxEip2930] fields from RLP bytes. + /// + /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following + /// RLP fields in the following order: + /// + /// - `chain_id` + /// - `nonce` + /// - `gas_price` + /// - `gas_limit` + /// - `to` + /// - `value` + /// - `data` (`input`) + /// - `access_list` + pub(crate) fn decode_inner(buf: &mut &[u8]) -> Result { + Ok(Self { + chain_id: Decodable::decode(buf)?, + nonce: Decodable::decode(buf)?, + gas_price: Decodable::decode(buf)?, + gas_limit: Decodable::decode(buf)?, + to: Decodable::decode(buf)?, + value: Decodable::decode(buf)?, + input: Bytes(Decodable::decode(buf)?), + access_list: Decodable::decode(buf)?, + }) + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index f9da9b0970ea..b09e019a76b8 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,7 +1,17 @@ use super::access_list::AccessList; -use crate::{constants::eip4844::DATA_GAS_PER_BLOB, Bytes, ChainId, TransactionKind, H256}; +use crate::{ + constants::eip4844::DATA_GAS_PER_BLOB, + kzg::{ + self, Blob, Bytes48, KzgCommitment, KzgProof, KzgSettings, BYTES_PER_BLOB, + BYTES_PER_COMMITMENT, BYTES_PER_PROOF, + }, + kzg_to_versioned_hash, Bytes, ChainId, Signature, Transaction, TransactionKind, + TransactionSigned, TransactionSignedNoHash, TxType, EIP4844_TX_TYPE_ID, H256, +}; use reth_codecs::{main_codec, Compact}; -use std::mem; +use reth_rlp::{Decodable, DecodeError, Encodable, Header}; +use serde::{Deserialize, Serialize}; +use std::{mem, ops::Deref}; /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// @@ -100,6 +110,38 @@ impl TxEip4844 { self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB } + /// Decodes the inner [TxEip4844] fields from RLP bytes. + /// + /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following + /// RLP fields in the following order: + /// + /// - `chain_id` + /// - `nonce` + /// - `max_priority_fee_per_gas` + /// - `max_fee_per_gas` + /// - `gas_limit` + /// - `to` + /// - `value` + /// - `data` (`input`) + /// - `access_list` + /// - `max_fee_per_blob_gas` + /// - `blob_versioned_hashes` + pub fn decode_inner(buf: &mut &[u8]) -> Result { + Ok(Self { + chain_id: Decodable::decode(buf)?, + nonce: Decodable::decode(buf)?, + max_priority_fee_per_gas: Decodable::decode(buf)?, + max_fee_per_gas: Decodable::decode(buf)?, + gas_limit: Decodable::decode(buf)?, + to: Decodable::decode(buf)?, + value: Decodable::decode(buf)?, + input: Bytes(Decodable::decode(buf)?), + access_list: Decodable::decode(buf)?, + max_fee_per_blob_gas: Decodable::decode(buf)?, + blob_versioned_hashes: Decodable::decode(buf)?, + }) + } + /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. #[inline] pub fn size(&self) -> usize { @@ -116,3 +158,331 @@ impl TxEip4844 { mem::size_of::() // max_fee_per_data_gas } } + +/// An error that can occur when validating a [BlobTransaction]. +#[derive(Debug)] +pub enum BlobTransactionValidationError { + /// An error returned by the [kzg] library + KZGError(kzg::Error), + /// The inner transaction is not a blob transaction + NotBlobTransaction(TxType), +} + +impl From for BlobTransactionValidationError { + fn from(value: kzg::Error) -> Self { + Self::KZGError(value) + } +} + +/// A response to `GetPooledTransactions` that includes blob data, their commitments, and their +/// corresponding proofs. +/// +/// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element +/// of a `PooledTransactions` response. +/// +/// NOTE: This contains a [TransactionSigned], which could be a non-4844 transaction type, even +/// though that would not make sense. This type is meant to be constructed using decoding methods, +/// which should always construct the [TransactionSigned] with an EIP-4844 transaction. +#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] +pub struct BlobTransaction { + /// The transaction payload. + pub transaction: TransactionSigned, + /// The transaction's blob sidecar. + pub sidecar: BlobTransactionSidecar, +} + +impl BlobTransaction { + /// Verifies that the transaction's blob data, commitments, and proofs are all valid. + /// + /// Takes as input the [KzgSettings], which should contain the the parameters derived from the + /// KZG trusted setup. + /// + /// This ensures that the blob transaction payload has the same number of blob data elements, + /// commitments, and proofs. Each blob data element is verified against its commitment and + /// proof. + /// + /// Returns `false` if any blob KZG proof in the response fails to verify, or if the versioned + /// hashes in the transaction do not match the actual commitment versioned hashes. + pub fn validate( + &self, + proof_settings: &KzgSettings, + ) -> Result { + let inner_tx = match &self.transaction.transaction { + Transaction::Eip4844(blob_tx) => blob_tx, + non_blob_tx => { + return Err(BlobTransactionValidationError::NotBlobTransaction( + non_blob_tx.tx_type(), + )) + } + }; + + // Ensure the versioned hashes and commitments have the same length + if inner_tx.blob_versioned_hashes.len() != self.sidecar.commitments.len() { + return Err(kzg::Error::MismatchLength(format!( + "There are {} versioned commitment hashes and {} commitments", + inner_tx.blob_versioned_hashes.len(), + self.sidecar.commitments.len() + )) + .into()) + } + + // zip and iterate, calculating versioned hashes + for (versioned_hash, commitment) in + inner_tx.blob_versioned_hashes.iter().zip(self.sidecar.commitments.iter()) + { + // convert to KzgCommitment + let commitment = KzgCommitment::from(*commitment.deref()); + + // Calculate the versioned hash + // + // TODO: should this method distinguish the type of validation failure? For example + // whether a certain versioned hash does not match, or whether the blob proof + // validation failed? + let calculated_versioned_hash = kzg_to_versioned_hash(commitment); + if *versioned_hash != calculated_versioned_hash { + return Ok(false) + } + } + + // Verify as a batch + KzgProof::verify_blob_kzg_proof_batch( + self.sidecar.blobs.as_slice(), + self.sidecar.commitments.as_slice(), + self.sidecar.proofs.as_slice(), + proof_settings, + ) + .map_err(Into::into) + } + + /// Splits the [BlobTransaction] into its [TransactionSigned] and [BlobTransactionSidecar] + /// components. + pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { + (self.transaction, self.sidecar) + } + + /// Encodes the [BlobTransaction] fields as RLP, with a tx type. If `with_header` is `false`, + /// the following will be encoded: + /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` + /// + /// If `with_header` is `true`, the following will be encoded: + /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` + /// + /// NOTE: The header will be a byte string header, not a list header. + pub(crate) fn encode_with_type_inner(&self, out: &mut dyn bytes::BufMut, with_header: bool) { + // Calculate the length of: + // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs])` + // + // to construct and encode the string header + if with_header { + Header { + list: false, + // add one for the tx type + payload_length: 1 + self.payload_len(), + } + .encode(out); + } + + out.put_u8(EIP4844_TX_TYPE_ID); + + // Now we encode the inner blob transaction: + self.encode_inner(out); + } + + /// Encodes the [BlobTransaction] fields as RLP, with the following format: + /// `rlp([transaction_payload_body, blobs, commitments, proofs])` + /// + /// where `transaction_payload_body` is a list: + /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + /// + /// Note: this should be used only when implementing other RLP encoding methods, and does not + /// represent the full RLP encoding of the blob transaction. + pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { + // First we construct both required list headers. + // + // The `transaction_payload_body` length is the length of the fields, plus the length of + // its list header. + let tx_header = Header { + list: true, + payload_length: self.transaction.fields_len() + + self.transaction.signature.payload_len(), + }; + + let tx_length = tx_header.length() + tx_header.payload_length; + + // The payload length is the length of the `tranascation_payload_body` list, plus the + // length of the blobs, commitments, and proofs. + let payload_length = tx_length + self.sidecar.fields_len(); + + // First we use the payload len to construct the first list header + let blob_tx_header = Header { list: true, payload_length }; + + // Encode the blob tx header first + blob_tx_header.encode(out); + + // Encode the inner tx list header, then its fields + tx_header.encode(out); + self.transaction.encode_fields(out); + + // Encode the blobs, commitments, and proofs + self.sidecar.encode_inner(out); + } + + /// Ouputs the length of the RLP encoding of the blob transaction, including the tx type byte, + /// optionally including the length of a wrapping string header. If `with_header` is `false`, + /// the length of the following will be calculated: + /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` + /// + /// If `with_header` is `true`, the length of the following will be calculated: + /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` + pub(crate) fn payload_len_with_type(&self, with_header: bool) -> usize { + if with_header { + // Construct a header and use that to calculate the total length + let wrapped_header = Header { + list: false, + // add one for the tx type byte + payload_length: 1 + self.payload_len(), + }; + + // The total length is now the length of the header plus the length of the payload + // (which includes the tx type byte) + wrapped_header.length() + wrapped_header.payload_length + } else { + // Just add the length of the tx type to the payload length + 1 + self.payload_len() + } + } + + /// Outputs the length of the RLP encoding of the blob transaction with the following format: + /// `rlp([transaction_payload_body, blobs, commitments, proofs])` + /// + /// where `transaction_payload_body` is a list: + /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + /// + /// Note: this should be used only when implementing other RLP encoding length methods, and + /// does not represent the full RLP encoding of the blob transaction. + pub(crate) fn payload_len(&self) -> usize { + // The `transaction_payload_body` length is the length of the fields, plus the length of + // its list header. + let tx_header = Header { + list: true, + payload_length: self.transaction.fields_len() + + self.transaction.signature.payload_len(), + }; + + let tx_length = tx_header.length() + tx_header.payload_length; + + // The payload length is the length of the `tranascation_payload_body` list, plus the + // length of the blobs, commitments, and proofs. + tx_length + self.sidecar.fields_len() + } + + /// Decodes a [BlobTransaction] from RLP. This expects the encoding to be: + /// `rlp([transaction_payload_body, blobs, commitments, proofs])` + /// + /// where `transaction_payload_body` is a list: + /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + /// + /// Note: this should be used only when implementing other RLP decoding methods, and does not + /// represent the full RLP decoding of the `PooledTransactionsElement` type. + pub(crate) fn decode_inner(data: &mut &[u8]) -> Result { + // decode the _first_ list header for the rest of the transaction + let header = Header::decode(data)?; + if !header.list { + return Err(DecodeError::Custom("PooledTransactions blob tx must be encoded as a list")) + } + + // Now we need to decode the inner 4844 transaction and its signature: + // + // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + let header = Header::decode(data)?; + if !header.list { + return Err(DecodeError::Custom( + "PooledTransactions inner blob tx must be encoded as a list", + )) + } + + // inner transaction + let transaction = Transaction::Eip4844(TxEip4844::decode_inner(data)?); + + // signature + let signature = Signature::decode(data)?; + + // construct the tx now that we've decoded the fields in order + let tx_no_hash = TransactionSignedNoHash { transaction, signature }; + + // All that's left are the blobs, commitments, and proofs + let sidecar = BlobTransactionSidecar::decode_inner(data)?; + + // # Calculating the hash + // + // The full encoding of the `PooledTransaction` response is: + // `tx_type (0x03) || rlp([tx_payload_body, blobs, commitments, proofs])` + // + // The transaction hash however, is: + // `keccak256(tx_type (0x03) || rlp(tx_payload_body))` + // + // Note that this is `tx_payload_body`, not `[tx_payload_body]`, which would be + // `[[chain_id, nonce, max_priority_fee_per_gas, ...]]`, i.e. a list within a list. + // + // Because the pooled transaction encoding is different than the hash encoding for + // EIP-4844 transactions, we do not use the original buffer to calculate the hash. + // + // Instead, we use `TransactionSignedNoHash` which will encode the transaction internally. + let signed_tx = tx_no_hash.with_hash(); + + Ok(Self { transaction: signed_tx, sidecar }) + } +} + +/// This represents a set of blobs, and its corresponding commitments and proofs. +#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] +pub struct BlobTransactionSidecar { + /// The blob data. + pub blobs: Vec, + /// The blob commitments. + pub commitments: Vec, + /// The blob proofs. + pub proofs: Vec, +} + +impl BlobTransactionSidecar { + /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. + /// + /// This encodes the fields in the following order: + /// - `blobs` + /// - `commitments` + /// - `proofs` + pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { + // Encode the blobs, commitments, and proofs + self.blobs.encode(out); + self.commitments.encode(out); + self.proofs.encode(out); + } + + /// Outputs the RLP length of the [BlobTransactionSidecar] fields, without a RLP header. + pub(crate) fn fields_len(&self) -> usize { + self.blobs.len() + self.commitments.len() + self.proofs.len() + } + + /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. + /// + /// This decodes the fields in the following order: + /// - `blobs` + /// - `commitments` + /// - `proofs` + pub(crate) fn decode_inner(buf: &mut &[u8]) -> Result { + Ok(Self { + blobs: Decodable::decode(buf)?, + commitments: Decodable::decode(buf)?, + proofs: Decodable::decode(buf)?, + }) + } + + /// Calculates a size heuristic for the in-memory size of the [BlobTransactionSidecar]. + #[inline] + pub fn size(&self) -> usize { + self.blobs.len() * BYTES_PER_BLOB + // blobs + self.commitments.len() * BYTES_PER_COMMITMENT + // commitments + self.proofs.len() * BYTES_PER_PROOF // proofs + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ed347ce3f46a..7f67f0e1b024 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -22,8 +22,9 @@ pub use tx_type::{ pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; -pub use eip4844::TxEip4844; +pub use eip4844::{BlobTransaction, BlobTransactionSidecar, TxEip4844}; pub use legacy::TxLegacy; +pub use pooled::PooledTransactionsElement; mod access_list; mod eip1559; @@ -32,6 +33,7 @@ mod eip4844; mod error; mod legacy; mod meta; +mod pooled; mod signature; mod tx_type; pub(crate) mod util; @@ -343,7 +345,7 @@ impl Transaction { /// Outputs the length of the transaction's fields, without a RLP header or length of the /// eip155 fields. - pub(crate) fn fields_len(&self) -> usize { + pub fn fields_len(&self) -> usize { match self { Transaction::Legacy(TxLegacy { chain_id: _, @@ -438,7 +440,7 @@ impl Transaction { } /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + pub fn encode_fields(&self, out: &mut dyn bytes::BufMut) { match self { Transaction::Legacy(TxLegacy { chain_id: _, @@ -1059,7 +1061,11 @@ impl TransactionSigned { /// Decodes legacy transaction from the data buffer. /// /// This expects `rlp(legacy_tx)` - fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> Result { + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction( + data: &mut &[u8], + ) -> Result { // keep this around, so we can use it to calculate the hash let original_encoding = *data; @@ -1088,7 +1094,7 @@ impl TransactionSigned { /// Decodes en enveloped EIP-2718 typed transaction. /// /// CAUTION: this expects that `data` is `[id, rlp(tx)]` - fn decode_enveloped_typed_transaction( + pub fn decode_enveloped_typed_transaction( data: &mut &[u8], ) -> Result { // keep this around so we can use it to calculate the hash @@ -1096,6 +1102,7 @@ impl TransactionSigned { let tx_type = *data.first().ok_or(DecodeError::InputTooShort)?; data.advance(1); + // decode the list header for the rest of the transaction let header = Header::decode(data)?; if !header.list { @@ -1107,40 +1114,9 @@ impl TransactionSigned { // decode common fields let transaction = match tx_type { - 1 => Transaction::Eip2930(TxEip2930 { - chain_id: Decodable::decode(data)?, - nonce: Decodable::decode(data)?, - gas_price: Decodable::decode(data)?, - gas_limit: Decodable::decode(data)?, - to: Decodable::decode(data)?, - value: Decodable::decode(data)?, - input: Bytes(Decodable::decode(data)?), - access_list: Decodable::decode(data)?, - }), - 2 => Transaction::Eip1559(TxEip1559 { - chain_id: Decodable::decode(data)?, - nonce: Decodable::decode(data)?, - max_priority_fee_per_gas: Decodable::decode(data)?, - max_fee_per_gas: Decodable::decode(data)?, - gas_limit: Decodable::decode(data)?, - to: Decodable::decode(data)?, - value: Decodable::decode(data)?, - input: Bytes(Decodable::decode(data)?), - access_list: Decodable::decode(data)?, - }), - 3 => Transaction::Eip4844(TxEip4844 { - chain_id: Decodable::decode(data)?, - nonce: Decodable::decode(data)?, - max_priority_fee_per_gas: Decodable::decode(data)?, - max_fee_per_gas: Decodable::decode(data)?, - gas_limit: Decodable::decode(data)?, - to: Decodable::decode(data)?, - value: Decodable::decode(data)?, - input: Bytes(Decodable::decode(data)?), - access_list: Decodable::decode(data)?, - max_fee_per_blob_gas: Decodable::decode(data)?, - blob_versioned_hashes: Decodable::decode(data)?, - }), + 1 => Transaction::Eip2930(TxEip2930::decode_inner(data)?), + 2 => Transaction::Eip1559(TxEip1559::decode_inner(data)?), + 3 => Transaction::Eip4844(TxEip4844::decode_inner(data)?), _ => return Err(DecodeError::Custom("unsupported typed transaction type")), }; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs new file mode 100644 index 000000000000..44299f7a560b --- /dev/null +++ b/crates/primitives/src/transaction/pooled.rs @@ -0,0 +1,178 @@ +//! Includes the +use crate::{BlobTransaction, Bytes, TransactionSigned, EIP4844_TX_TYPE_ID}; +use bytes::Buf; +use reth_rlp::{Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE}; +use serde::{Deserialize, Serialize}; + +/// A response to `GetPooledTransactions`. This can include either a blob transaction, or a +/// non-4844 signed transaction. +// TODO: redo arbitrary for this encoding - the previous encoding was incorrect +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum PooledTransactionsElement { + /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. + BlobTransaction(BlobTransaction), + /// A non-4844 signed transaction. + Transaction(TransactionSigned), +} + +impl PooledTransactionsElement { + /// Decodes the "raw" format of transaction (e.g. `eth_sendRawTransaction`). + /// + /// The raw transaction is either a legacy transaction or EIP-2718 typed transaction + /// For legacy transactions, the format is encoded as: `rlp(tx)` + /// For EIP-2718 typed transaction, the format is encoded as the type of the transaction + /// followed by the rlp of the transaction: `type` + `rlp(tx)` + /// + /// For encoded EIP-4844 transactions, the blob sidecar _must_ be included. + pub fn decode_enveloped(tx: Bytes) -> Result { + let mut data = tx.as_ref(); + + if data.is_empty() { + return Err(DecodeError::InputTooShort) + } + + // Check if the tx is a list - tx types are less than EMPTY_LIST_CODE (0xc0) + if data[0] >= EMPTY_LIST_CODE { + // decode as legacy transaction + Ok(Self::Transaction(TransactionSigned::decode_rlp_legacy_transaction(&mut data)?)) + } else { + // decode the type byte, only decode BlobTransaction if it is a 4844 transaction + let tx_type = *data.first().ok_or(DecodeError::InputTooShort)?; + + if tx_type == EIP4844_TX_TYPE_ID { + // Recall that the blob transaction response `TranactionPayload` is encoded like + // this: `rlp([tx_payload_body, blobs, commitments, proofs])` + // + // Note that `tx_payload_body` is a list: + // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + // + // This makes the full encoding: + // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` + // + // First, we advance the buffer past the type byte + data.advance(1); + + // Now, we decode the inner blob transaction: + // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` + let blob_tx = BlobTransaction::decode_inner(&mut data)?; + Ok(PooledTransactionsElement::BlobTransaction(blob_tx)) + } else { + // DO NOT advance the buffer for the type, since we want the enveloped decoding to + // decode it again and advance the buffer on its own. + let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(&mut data)?; + Ok(PooledTransactionsElement::Transaction(typed_tx)) + } + } + } + + /// Returns the inner [TransactionSigned]. + pub fn into_transaction(self) -> TransactionSigned { + match self { + Self::Transaction(tx) => tx, + Self::BlobTransaction(blob_tx) => blob_tx.transaction, + } + } +} + +impl Encodable for PooledTransactionsElement { + /// Encodes an enveloped post EIP-4844 [PooledTransactionsElement]. + fn encode(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Transaction(tx) => tx.encode(out), + Self::BlobTransaction(blob_tx) => { + // The inner encoding is used with `with_header` set to true, making the final + // encoding: + // `rlp(tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` + blob_tx.encode_with_type_inner(out, true); + } + } + } + + fn length(&self) -> usize { + match self { + Self::Transaction(tx) => tx.length(), + Self::BlobTransaction(blob_tx) => { + // the encoding uses a header, so we set `with_header` to true + blob_tx.payload_len_with_type(true) + } + } + } +} + +impl Decodable for PooledTransactionsElement { + /// Decodes an enveloped post EIP-4844 [PooledTransactionsElement]. + /// + /// CAUTION: this expects that `buf` is `[id, rlp(tx)]` + fn decode(buf: &mut &[u8]) -> Result { + // From the EIP-4844 spec: + // Blob transactions have two network representations. During transaction gossip responses + // (`PooledTransactions`), the EIP-2718 `TransactionPayload` of the blob transaction is + // wrapped to become: + // + // `rlp([tx_payload_body, blobs, commitments, proofs])` + // + // This means the full wire encoding is: + // `rlp(tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` + // + // First, we check whether or not the transaction is a legacy transaction. + if buf.is_empty() { + return Err(DecodeError::InputTooShort) + } + + // keep this around for buffer advancement post-legacy decoding + let mut original_encoding = *buf; + + // If the header is a list header, it is a legacy transaction. Otherwise, it is a typed + // transaction + let header = Header::decode(buf)?; + + // Check if the tx is a list + if header.list { + // decode as legacy transaction + let legacy_tx = + TransactionSigned::decode_rlp_legacy_transaction(&mut original_encoding)?; + + // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the + // buffer + *buf = original_encoding; + + Ok(PooledTransactionsElement::Transaction(legacy_tx)) + } else { + // decode the type byte, only decode BlobTransaction if it is a 4844 transaction + let tx_type = *buf.first().ok_or(DecodeError::InputTooShort)?; + + if tx_type == EIP4844_TX_TYPE_ID { + // Recall that the blob transaction response `TranactionPayload` is encoded like + // this: `rlp([tx_payload_body, blobs, commitments, proofs])` + // + // Note that `tx_payload_body` is a list: + // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` + // + // This makes the full encoding: + // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` + // + // First, we advance the buffer past the type byte + buf.advance(1); + + // Now, we decode the inner blob transaction: + // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` + let blob_tx = BlobTransaction::decode_inner(buf)?; + Ok(PooledTransactionsElement::BlobTransaction(blob_tx)) + } else { + // DO NOT advance the buffer for the type, since we want the enveloped decoding to + // decode it again and advance the buffer on its own. + let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(buf)?; + Ok(PooledTransactionsElement::Transaction(typed_tx)) + } + } + } +} + +impl From for PooledTransactionsElement { + /// Converts from a [TransactionSigned] to a [PooledTransactionsElement]. + /// + /// NOTE: This will always return a [PooledTransactionsElement::Transaction] variant. + fn from(tx: TransactionSigned) -> Self { + Self::Transaction(tx) + } +} diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 6977d8c26182..cfe639a04e9f 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -93,19 +93,19 @@ impl Signature { } /// Output the length of the signature without the length of the RLP header - pub(crate) fn payload_len(&self) -> usize { + pub fn payload_len(&self) -> usize { self.odd_y_parity.length() + self.r.length() + self.s.length() } /// Encode the `odd_y_parity`, `r`, `s` values without a RLP header. - pub(crate) fn encode(&self, out: &mut dyn reth_rlp::BufMut) { + pub fn encode(&self, out: &mut dyn reth_rlp::BufMut) { self.odd_y_parity.encode(out); self.r.encode(out); self.s.encode(out); } /// Decodes the `odd_y_parity`, `r`, `s` values without a RLP header. - pub(crate) fn decode(buf: &mut &[u8]) -> Result { + pub fn decode(buf: &mut &[u8]) -> Result { Ok(Signature { odd_y_parity: Decodable::decode(buf)?, r: Decodable::decode(buf)?, From d427ade178e13a2dd9770734b77f6941459537d8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 01:42:11 +0200 Subject: [PATCH 450/722] fix: propagate promoted transactions (#4236) --- crates/transaction-pool/src/pool/mod.rs | 71 ++++++++++++++----------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 827963369010..7bb1e9f30bbd 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -322,8 +322,8 @@ where let hash = *added.hash(); // Notify about new pending transactions - if added.is_pending() { - self.on_new_pending_transaction(&added); + if let Some(pending) = added.as_pending() { + self.on_new_pending_transaction(pending); } // Notify tx event listeners @@ -394,8 +394,7 @@ where } /// Notify all listeners about a new pending transaction. - fn on_new_pending_transaction(&self, pending: &AddedTransaction) { - let tx_hash = *pending.hash(); + fn on_new_pending_transaction(&self, pending: &AddedPendingTransaction) { let propagate_allowed = pending.is_propagate_allowed(); let mut transaction_listeners = self.pending_transaction_listener.lock(); @@ -406,25 +405,29 @@ where return !listener.sender.is_closed() } - match listener.sender.try_send(tx_hash) { - Ok(()) => true, - Err(err) => { - if matches!(err, mpsc::error::TrySendError::Full(_)) { - debug!( - target: "txpool", - "[{:?}] failed to send pending tx; channel full", - tx_hash, - ); - true - } else { - false + // broadcast all pending transactions to the listener + for tx_hash in pending.pending_transactions() { + match listener.sender.try_send(tx_hash) { + Ok(()) => {} + Err(err) => { + return if matches!(err, mpsc::error::TrySendError::Full(_)) { + debug!( + target: "txpool", + "[{:?}] failed to send pending tx; channel full", + tx_hash, + ); + true + } else { + false + } } } } + true }); } - /// Notify all listeners about a new pending transaction. + /// Notify all listeners about a newly inserted pending transaction. fn on_new_transaction(&self, event: NewTransactionEvent) { let mut transaction_listeners = self.transaction_listener.lock(); @@ -455,7 +458,7 @@ where discarded.iter().for_each(|tx| listener.discarded(tx)); } - /// Fire events for the newly added transaction. + /// Fire events for the newly added transaction if there are any. fn notify_event_listeners(&self, tx: &AddedTransaction) { let mut listener = self.event_listener.write(); @@ -601,12 +604,24 @@ pub struct AddedPendingTransaction { transaction: Arc>, /// Replaced transaction. replaced: Option>>, - /// transactions promoted to the ready queue - promoted: Vec, + /// transactions promoted to the pending queue + promoted: Vec, /// transaction that failed and became discarded discarded: Vec, } +impl AddedPendingTransaction { + /// Returns all transactions that were promoted to the pending pool + pub(crate) fn pending_transactions(&self) -> impl Iterator + '_ { + std::iter::once(self.transaction.hash()).chain(self.promoted.iter()).copied() + } + + /// Returns if the transaction should be propagated. + pub(crate) fn is_propagate_allowed(&self) -> bool { + self.transaction.propagate + } +} + /// Represents a transaction that was added into the pool and its state #[derive(Debug, Clone)] pub enum AddedTransaction { @@ -625,9 +640,12 @@ pub enum AddedTransaction { } impl AddedTransaction { - /// Returns whether the transaction is pending - pub(crate) fn is_pending(&self) -> bool { - matches!(self, AddedTransaction::Pending(_)) + /// Returns whether the transaction has been added to the pending pool. + pub(crate) fn as_pending(&self) -> Option<&AddedPendingTransaction> { + match self { + AddedTransaction::Pending(tx) => Some(tx), + _ => None, + } } /// Returns the hash of the transaction @@ -637,13 +655,6 @@ impl AddedTransaction { AddedTransaction::Parked { transaction, .. } => transaction.hash(), } } - /// Returns if the transaction should be propagated. - pub(crate) fn is_propagate_allowed(&self) -> bool { - match self { - AddedTransaction::Pending(transaction) => transaction.transaction.propagate, - AddedTransaction::Parked { transaction, .. } => transaction.propagate, - } - } /// Converts this type into the event type for listeners pub(crate) fn into_new_transaction_event(self) -> NewTransactionEvent { From 75da6528a94be40930ae7a7f7d84955d4899cef0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 17 Aug 2023 06:11:26 -0400 Subject: [PATCH 451/722] fix: return None instead of BlockBodyIndicesNotFound in BlockReader (#4239) --- .../src/providers/database/provider.rs | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1d80bf4662f8..0ac58a3b7db2 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -938,14 +938,24 @@ impl<'this, TX: DbTx<'this>> BlockReader for DatabaseProvider<'this, TX> { } } + /// Returns the block with matching number from database. + /// + /// If the header for this block is not found, this returns `None`. + /// If the header is found, but the transactions either do not exist, or are not indexed, this + /// will return None. fn block(&self, id: BlockHashOrNumber) -> Result> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; let ommers = self.ommers(number.into())?.unwrap_or_default(); - let transactions = self - .transactions_by_block(number.into())? - .ok_or(ProviderError::BlockBodyIndicesNotFound(number))?; + // If the body indices are not found, this means that the transactions either do not + // exist in the database yet, or they do exit but are not indexed. + // If they exist but are not indexed, we don't have enough + // information to return the block anyways, so we return `None`. + let transactions = match self.transactions_by_block(number.into())? { + Some(transactions) => transactions, + None => return Ok(None), + }; return Ok(Some(Block { header, body: transactions, ommers, withdrawals })) } @@ -986,7 +996,9 @@ impl<'this, TX: DbTx<'this>> BlockReader for DatabaseProvider<'this, TX> { /// **NOTE: The transactions have invalid hashes, since they would need to be calculated on the /// spot, and we want fast querying.** /// - /// Returns `None` if block is not found. + /// If the header for this block is not found, this returns `None`. + /// If the header is found, but the transactions either do not exist, or are not indexed, this + /// will return None. fn block_with_senders(&self, block_number: BlockNumber) -> Result> { let header = self .header_by_number(block_number)? @@ -996,9 +1008,16 @@ impl<'this, TX: DbTx<'this>> BlockReader for DatabaseProvider<'this, TX> { let withdrawals = self.withdrawals_by_block(block_number.into(), header.timestamp)?; // Get the block body - let body = self - .block_body_indices(block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + // + // If the body indices are not found, this means that the transactions either do not exist + // in the database yet, or they do exit but are not indexed. If they exist but are not + // indexed, we don't have enough information to return the block anyways, so we return + // `None`. + let body = match self.block_body_indices(block_number)? { + Some(body) => body, + None => return Ok(None), + }; + let tx_range = body.tx_num_range(); let (transactions, senders) = if tx_range.is_empty() { From 2d7c4203c8c344003e09dc38daf774fba5b2d22d Mon Sep 17 00:00:00 2001 From: "Protocolwhisper.eth" <57886661+protocolwhisper@users.noreply.github.com> Date: Thu, 17 Aug 2023 05:19:39 -0500 Subject: [PATCH 452/722] (fix): Impl. Lagged error in pool update channel (#4242) --- crates/transaction-pool/src/pool/best.rs | 31 +++++++++++++++--------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 61b47d44f2e4..7beb11e8ec0a 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -7,7 +7,7 @@ use std::{ collections::{BTreeMap, BTreeSet, HashSet}, sync::Arc, }; -use tokio::sync::broadcast::Receiver; +use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; /// An iterator that returns transactions that can be executed on the current state (*best* @@ -86,17 +86,24 @@ impl BestTransactions { /// Non-blocking read on the new pending transactions subscription channel fn try_recv(&mut self) -> Option> { - match self.new_transaction_reciever.try_recv() { - Ok(tx) => Some(tx), - // note TryRecvError::Lagged can be returned here, which is an error that attempts to - // correct itself on consecutive try_recv() attempts - - // the cost of ignoring this error is allowing old transactions to get - // overwritten after the chan buffer size is met - - // this case is still better than the existing iterator behavior where no new - // pending txs are surfaced to consumers - Err(_) => None, + loop { + match self.new_transaction_reciever.try_recv() { + Ok(tx) => return Some(tx), + // note TryRecvError::Lagged can be returned here, which is an error that attempts + // to correct itself on consecutive try_recv() attempts + + // the cost of ignoring this error is allowing old transactions to get + // overwritten after the chan buffer size is met + Err(TryRecvError::Lagged(_)) => { + // Handle the case where the receiver lagged too far behind. + // `num_skipped` indicates the number of messages that were skipped. + continue + } + + // this case is still better than the existing iterator behavior where no new + // pending txs are surfaced to consumers + Err(_) => return None, + } } } From 639a6eac1797c87206ab4cd8a6dafef9a137264a Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 17 Aug 2023 06:23:37 -0400 Subject: [PATCH 453/722] feat: refactor PooledTransactionsElement into typed variants (#4241) --- crates/primitives/src/transaction/eip1559.rs | 60 ++++- crates/primitives/src/transaction/eip2930.rs | 58 ++++- crates/primitives/src/transaction/eip4844.rs | 108 ++++++-- crates/primitives/src/transaction/legacy.rs | 43 +++- crates/primitives/src/transaction/mod.rs | 251 ++++--------------- crates/primitives/src/transaction/pooled.rs | 197 +++++++++++++-- 6 files changed, 467 insertions(+), 250 deletions(-) diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 3a8dcec7a8a2..31072cd326b3 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,7 +1,7 @@ use super::access_list::AccessList; -use crate::{Bytes, ChainId, TransactionKind}; +use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; use reth_codecs::{main_codec, Compact}; -use reth_rlp::{Decodable, DecodeError}; +use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use std::mem; /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). @@ -111,6 +111,62 @@ impl TxEip1559 { }) } + /// Encodes only the transaction's fields into the desired buffer, without a RLP header. + pub(crate) fn fields_len(&self) -> usize { + let mut len = 0; + len += self.chain_id.length(); + len += self.nonce.length(); + len += self.max_priority_fee_per_gas.length(); + len += self.max_fee_per_gas.length(); + len += self.gas_limit.length(); + len += self.to.length(); + len += self.value.length(); + len += self.input.0.length(); + len += self.access_list.length(); + len + } + + /// Encodes only the transaction's fields into the desired buffer, without a RLP header. + pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + self.chain_id.encode(out); + self.nonce.encode(out); + self.max_priority_fee_per_gas.encode(out); + self.max_fee_per_gas.encode(out); + self.gas_limit.encode(out); + self.to.encode(out); + self.value.encode(out); + self.input.0.encode(out); + self.access_list.encode(out); + } + + /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating + /// hash that for eip2718 does not require rlp header + pub(crate) fn encode_with_signature( + &self, + signature: &Signature, + out: &mut dyn bytes::BufMut, + with_header: bool, + ) { + let payload_length = self.fields_len() + signature.payload_len(); + if with_header { + Header { + list: false, + payload_length: 1 + length_of_length(payload_length) + payload_length, + } + .encode(out); + } + out.put_u8(self.tx_type() as u8); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode(out); + } + + /// Get transaction type + pub(crate) fn tx_type(&self) -> TxType { + TxType::EIP1559 + } + /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. #[inline] pub fn size(&self) -> usize { diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 16029ac97dd3..074b2fc5b36e 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,7 +1,7 @@ use super::access_list::AccessList; -use crate::{Bytes, ChainId, TransactionKind}; +use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; use reth_codecs::{main_codec, Compact}; -use reth_rlp::{Decodable, DecodeError}; +use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use std::mem; /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). @@ -91,6 +91,60 @@ impl TxEip2930 { access_list: Decodable::decode(buf)?, }) } + + /// Outputs the length of the transaction's fields, without a RLP header. + pub(crate) fn fields_len(&self) -> usize { + let mut len = 0; + len += self.chain_id.length(); + len += self.nonce.length(); + len += self.gas_price.length(); + len += self.gas_limit.length(); + len += self.to.length(); + len += self.value.length(); + len += self.input.0.length(); + len += self.access_list.length(); + len + } + + /// Encodes only the transaction's fields into the desired buffer, without a RLP header. + pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + self.chain_id.encode(out); + self.nonce.encode(out); + self.gas_price.encode(out); + self.gas_limit.encode(out); + self.to.encode(out); + self.value.encode(out); + self.input.0.encode(out); + self.access_list.encode(out); + } + + /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating + /// hash that for eip2718 does not require rlp header + pub(crate) fn encode_with_signature( + &self, + signature: &Signature, + out: &mut dyn bytes::BufMut, + with_header: bool, + ) { + let payload_length = self.fields_len() + signature.payload_len(); + if with_header { + Header { + list: false, + payload_length: 1 + length_of_length(payload_length) + payload_length, + } + .encode(out); + } + out.put_u8(self.tx_type() as u8); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode(out); + } + + /// Get transaction type + pub(crate) fn tx_type(&self) -> TxType { + TxType::EIP2930 + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index b09e019a76b8..31573d4919f5 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,15 +1,16 @@ use super::access_list::AccessList; use crate::{ constants::eip4844::DATA_GAS_PER_BLOB, + keccak256, kzg::{ self, Blob, Bytes48, KzgCommitment, KzgProof, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, }, kzg_to_versioned_hash, Bytes, ChainId, Signature, Transaction, TransactionKind, - TransactionSigned, TransactionSignedNoHash, TxType, EIP4844_TX_TYPE_ID, H256, + TransactionSigned, TxHash, TxType, EIP4844_TX_TYPE_ID, H256, }; use reth_codecs::{main_codec, Compact}; -use reth_rlp::{Decodable, DecodeError, Encodable, Header}; +use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use serde::{Deserialize, Serialize}; use std::{mem, ops::Deref}; @@ -142,6 +143,38 @@ impl TxEip4844 { }) } + /// Outputs the length of the transaction's fields, without a RLP header. + pub(crate) fn fields_len(&self) -> usize { + let mut len = 0; + len += self.chain_id.length(); + len += self.nonce.length(); + len += self.gas_limit.length(); + len += self.max_fee_per_gas.length(); + len += self.max_priority_fee_per_gas.length(); + len += self.to.length(); + len += self.value.length(); + len += self.access_list.length(); + len += self.blob_versioned_hashes.length(); + len += self.max_fee_per_blob_gas.length(); + len += self.input.0.length(); + len + } + + /// Encodes only the transaction's fields into the desired buffer, without a RLP header. + pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + self.chain_id.encode(out); + self.nonce.encode(out); + self.max_priority_fee_per_gas.encode(out); + self.max_fee_per_gas.encode(out); + self.gas_limit.encode(out); + self.to.encode(out); + self.value.encode(out); + self.input.0.encode(out); + self.access_list.encode(out); + self.max_fee_per_blob_gas.encode(out); + self.blob_versioned_hashes.encode(out); + } + /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. #[inline] pub fn size(&self) -> usize { @@ -157,6 +190,34 @@ impl TxEip4844 { self.blob_versioned_hashes.capacity() * mem::size_of::() + // blob hashes size mem::size_of::() // max_fee_per_data_gas } + + /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating + /// hash that for eip2718 does not require rlp header + pub(crate) fn encode_with_signature( + &self, + signature: &Signature, + out: &mut dyn bytes::BufMut, + with_header: bool, + ) { + let payload_length = self.fields_len() + signature.payload_len(); + if with_header { + Header { + list: false, + payload_length: 1 + length_of_length(payload_length) + payload_length, + } + .encode(out); + } + out.put_u8(self.tx_type() as u8); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode(out); + } + + /// Get transaction type + pub(crate) fn tx_type(&self) -> TxType { + TxType::EIP4844 + } } /// An error that can occur when validating a [BlobTransaction]. @@ -185,8 +246,12 @@ impl From for BlobTransactionValidationError { /// which should always construct the [TransactionSigned] with an EIP-4844 transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlobTransaction { + /// The transaction hash. + pub hash: TxHash, /// The transaction payload. - pub transaction: TransactionSigned, + pub transaction: TxEip4844, + /// The transaction signature. + pub signature: Signature, /// The transaction's blob sidecar. pub sidecar: BlobTransactionSidecar, } @@ -207,14 +272,7 @@ impl BlobTransaction { &self, proof_settings: &KzgSettings, ) -> Result { - let inner_tx = match &self.transaction.transaction { - Transaction::Eip4844(blob_tx) => blob_tx, - non_blob_tx => { - return Err(BlobTransactionValidationError::NotBlobTransaction( - non_blob_tx.tx_type(), - )) - } - }; + let inner_tx = &self.transaction; // Ensure the versioned hashes and commitments have the same length if inner_tx.blob_versioned_hashes.len() != self.sidecar.commitments.len() { @@ -257,7 +315,13 @@ impl BlobTransaction { /// Splits the [BlobTransaction] into its [TransactionSigned] and [BlobTransactionSidecar] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { - (self.transaction, self.sidecar) + let transaction = TransactionSigned { + transaction: Transaction::Eip4844(self.transaction), + hash: self.hash, + signature: self.signature, + }; + + (transaction, self.sidecar) } /// Encodes the [BlobTransaction] fields as RLP, with a tx type. If `with_header` is `false`, @@ -303,8 +367,7 @@ impl BlobTransaction { // its list header. let tx_header = Header { list: true, - payload_length: self.transaction.fields_len() + - self.transaction.signature.payload_len(), + payload_length: self.transaction.fields_len() + self.signature.payload_len(), }; let tx_length = tx_header.length() + tx_header.payload_length; @@ -365,8 +428,7 @@ impl BlobTransaction { // its list header. let tx_header = Header { list: true, - payload_length: self.transaction.fields_len() + - self.transaction.signature.payload_len(), + payload_length: self.transaction.fields_len() + self.signature.payload_len(), }; let tx_length = tx_header.length() + tx_header.payload_length; @@ -402,14 +464,11 @@ impl BlobTransaction { } // inner transaction - let transaction = Transaction::Eip4844(TxEip4844::decode_inner(data)?); + let transaction = TxEip4844::decode_inner(data)?; // signature let signature = Signature::decode(data)?; - // construct the tx now that we've decoded the fields in order - let tx_no_hash = TransactionSignedNoHash { transaction, signature }; - // All that's left are the blobs, commitments, and proofs let sidecar = BlobTransactionSidecar::decode_inner(data)?; @@ -427,10 +486,13 @@ impl BlobTransaction { // Because the pooled transaction encoding is different than the hash encoding for // EIP-4844 transactions, we do not use the original buffer to calculate the hash. // - // Instead, we use `TransactionSignedNoHash` which will encode the transaction internally. - let signed_tx = tx_no_hash.with_hash(); + // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a + // signature for hashing without a header. We then hash the result. + let mut buf = Vec::new(); + transaction.encode_with_signature(&signature, &mut buf, false); + let hash = keccak256(&buf); - Ok(Self { transaction: signed_tx, sidecar }) + Ok(Self { transaction, hash, signature, sidecar }) } } diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index cd324732bd5e..ad9d4b141fe3 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,5 +1,6 @@ -use crate::{Bytes, ChainId, TransactionKind}; +use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; use reth_codecs::{main_codec, Compact}; +use reth_rlp::{Encodable, Header}; use std::mem; /// Legacy transaction. @@ -56,6 +57,46 @@ impl TxLegacy { mem::size_of::() + // value self.input.len() // input } + + /// Outputs the length of the transaction's fields, without a RLP header or length of the + /// eip155 fields. + pub(crate) fn fields_len(&self) -> usize { + let mut len = 0; + len += self.nonce.length(); + len += self.gas_price.length(); + len += self.gas_limit.length(); + len += self.to.length(); + len += self.value.length(); + len += self.input.0.length(); + len + } + + /// Encodes only the transaction's fields into the desired buffer, without a RLP header or + /// eip155 fields. + pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + self.nonce.encode(out); + self.gas_price.encode(out); + self.gas_limit.encode(out); + self.to.encode(out); + self.value.encode(out); + self.input.0.encode(out); + } + + /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating + /// hash. + pub(crate) fn encode_with_signature(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { + let payload_length = + self.fields_len() + signature.payload_len_with_eip155_chain_id(self.chain_id); + let header = Header { list: true, payload_length }; + header.encode(out); + self.encode_fields(out); + signature.encode_with_eip155_chain_id(out, self.chain_id); + } + + /// Get transaction type + pub(crate) fn tx_type(&self) -> TxType { + TxType::Legacy + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 7f67f0e1b024..e771c5ab2335 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -144,10 +144,10 @@ impl Transaction { /// Get transaction type pub fn tx_type(&self) -> TxType { match self { - Transaction::Legacy { .. } => TxType::Legacy, - Transaction::Eip2930 { .. } => TxType::EIP2930, - Transaction::Eip1559 { .. } => TxType::EIP1559, - Transaction::Eip4844 { .. } => TxType::EIP4844, + Transaction::Legacy(legacy_tx) => legacy_tx.tx_type(), + Transaction::Eip2930(access_list_tx) => access_list_tx.tx_type(), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.tx_type(), + Transaction::Eip4844(blob_tx) => blob_tx.tx_type(), } } @@ -345,184 +345,22 @@ impl Transaction { /// Outputs the length of the transaction's fields, without a RLP header or length of the /// eip155 fields. - pub fn fields_len(&self) -> usize { + pub(crate) fn fields_len(&self) -> usize { match self { - Transaction::Legacy(TxLegacy { - chain_id: _, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => { - let mut len = 0; - len += nonce.length(); - len += gas_price.length(); - len += gas_limit.length(); - len += to.length(); - len += value.length(); - len += input.0.length(); - len - } - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - access_list, - }) => { - let mut len = 0; - len += chain_id.length(); - len += nonce.length(); - len += gas_price.length(); - len += gas_limit.length(); - len += to.length(); - len += value.length(); - len += input.0.length(); - len += access_list.length(); - len - } - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => { - let mut len = 0; - len += chain_id.length(); - len += nonce.length(); - len += max_priority_fee_per_gas.length(); - len += max_fee_per_gas.length(); - len += gas_limit.length(); - len += to.length(); - len += value.length(); - len += input.0.length(); - len += access_list.length(); - len - } - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - access_list, - blob_versioned_hashes, - max_fee_per_blob_gas, - input, - }) => { - let mut len = 0; - len += chain_id.length(); - len += nonce.length(); - len += gas_limit.length(); - len += max_fee_per_gas.length(); - len += max_priority_fee_per_gas.length(); - len += to.length(); - len += value.length(); - len += access_list.length(); - len += blob_versioned_hashes.length(); - len += max_fee_per_blob_gas.length(); - len += input.0.length(); - len - } + Transaction::Legacy(legacy_tx) => legacy_tx.fields_len(), + Transaction::Eip2930(access_list_tx) => access_list_tx.fields_len(), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.fields_len(), + Transaction::Eip4844(blob_tx) => blob_tx.fields_len(), } } /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub fn encode_fields(&self, out: &mut dyn bytes::BufMut) { + pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { match self { - Transaction::Legacy(TxLegacy { - chain_id: _, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => { - nonce.encode(out); - gas_price.encode(out); - gas_limit.encode(out); - to.encode(out); - value.encode(out); - input.0.encode(out); - } - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - access_list, - }) => { - chain_id.encode(out); - nonce.encode(out); - gas_price.encode(out); - gas_limit.encode(out); - to.encode(out); - value.encode(out); - input.0.encode(out); - access_list.encode(out); - } - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => { - chain_id.encode(out); - nonce.encode(out); - max_priority_fee_per_gas.encode(out); - max_fee_per_gas.encode(out); - gas_limit.encode(out); - to.encode(out); - value.encode(out); - input.0.encode(out); - access_list.encode(out); - } - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - access_list, - blob_versioned_hashes, - max_fee_per_blob_gas, - input, - }) => { - chain_id.encode(out); - nonce.encode(out); - max_priority_fee_per_gas.encode(out); - max_fee_per_gas.encode(out); - gas_limit.encode(out); - to.encode(out); - value.encode(out); - input.0.encode(out); - access_list.encode(out); - max_fee_per_blob_gas.encode(out); - blob_versioned_hashes.encode(out); - } + Transaction::Legacy(legacy_tx) => legacy_tx.encode_fields(out), + Transaction::Eip2930(access_list_tx) => access_list_tx.encode_fields(out), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.encode_fields(out), + Transaction::Eip4844(blob_tx) => blob_tx.encode_fields(out), } } @@ -541,29 +379,18 @@ impl Transaction { with_header: bool, ) { match self { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { + Transaction::Legacy(legacy_tx) => { // do nothing w/ with_header - let payload_length = - self.fields_len() + signature.payload_len_with_eip155_chain_id(*chain_id); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode_with_eip155_chain_id(out, *chain_id); + legacy_tx.encode_with_signature(signature, out) } - _ => { - let payload_length = self.fields_len() + signature.payload_len(); - if with_header { - Header { - list: false, - payload_length: 1 + length_of_length(payload_length) + payload_length, - } - .encode(out); - } - out.put_u8(self.tx_type() as u8); - let header = Header { list: true, payload_length }; - header.encode(out); - self.encode_fields(out); - signature.encode(out); + Transaction::Eip2930(access_list_tx) => { + access_list_tx.encode_with_signature(signature, out, with_header) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.encode_with_signature(signature, out, with_header) + } + Transaction::Eip4844(blob_tx) => { + blob_tx.encode_with_signature(signature, out, with_header) } } } @@ -1058,20 +885,20 @@ impl TransactionSigned { mem::size_of::() + self.transaction.size() + self.signature.size() } - /// Decodes legacy transaction from the data buffer. + /// Decodes legacy transaction from the data buffer into a tuple. /// /// This expects `rlp(legacy_tx)` // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, // so decoding methods do not need to manually advance the buffer - pub fn decode_rlp_legacy_transaction( + pub(crate) fn decode_rlp_legacy_transaction_tuple( data: &mut &[u8], - ) -> Result { + ) -> Result<(TxLegacy, TxHash, Signature), DecodeError> { // keep this around, so we can use it to calculate the hash let original_encoding = *data; let header = Header::decode(data)?; - let mut transaction = Transaction::Legacy(TxLegacy { + let mut transaction = TxLegacy { nonce: Decodable::decode(data)?, gas_price: Decodable::decode(data)?, gas_limit: Decodable::decode(data)?, @@ -1079,15 +906,27 @@ impl TransactionSigned { value: Decodable::decode(data)?, input: Bytes(Decodable::decode(data)?), chain_id: None, - }); + }; let (signature, extracted_id) = Signature::decode_with_eip155_chain_id(data)?; - if let Some(id) = extracted_id { - transaction.set_chain_id(id); - } + transaction.chain_id = extracted_id; let tx_length = header.payload_length + header.length(); let hash = keccak256(&original_encoding[..tx_length]); - let signed = TransactionSigned { transaction, hash, signature }; + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction( + data: &mut &[u8], + ) -> Result { + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(data)?; + let signed = + TransactionSigned { transaction: Transaction::Legacy(transaction), hash, signature }; Ok(signed) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 44299f7a560b..da4fcf47c12f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,5 +1,9 @@ -//! Includes the -use crate::{BlobTransaction, Bytes, TransactionSigned, EIP4844_TX_TYPE_ID}; +//! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a +//! response to `GetPooledTransactions`. +use crate::{ + BlobTransaction, Bytes, Signature, Transaction, TransactionSigned, TxEip1559, TxEip2930, + TxHash, TxLegacy, EIP4844_TX_TYPE_ID, +}; use bytes::Buf; use reth_rlp::{Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE}; use serde::{Deserialize, Serialize}; @@ -9,10 +13,35 @@ use serde::{Deserialize, Serialize}; // TODO: redo arbitrary for this encoding - the previous encoding was incorrect #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum PooledTransactionsElement { + /// A legacy transaction + Legacy { + /// The inner transaction + transaction: TxLegacy, + /// The signature + signature: Signature, + /// The hash of the transaction + hash: TxHash, + }, + /// An EIP-2930 typed transaction + Eip2930 { + /// The inner transaction + transaction: TxEip2930, + /// The signature + signature: Signature, + /// The hash of the transaction + hash: TxHash, + }, + /// An EIP-1559 typed transaction + Eip1559 { + /// The inner transaction + transaction: TxEip1559, + /// The signature + signature: Signature, + /// The hash of the transaction + hash: TxHash, + }, /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. BlobTransaction(BlobTransaction), - /// A non-4844 signed transaction. - Transaction(TransactionSigned), } impl PooledTransactionsElement { @@ -34,7 +63,10 @@ impl PooledTransactionsElement { // Check if the tx is a list - tx types are less than EMPTY_LIST_CODE (0xc0) if data[0] >= EMPTY_LIST_CODE { // decode as legacy transaction - Ok(Self::Transaction(TransactionSigned::decode_rlp_legacy_transaction(&mut data)?)) + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(&mut data)?; + + Ok(Self::Legacy { transaction, signature, hash }) } else { // decode the type byte, only decode BlobTransaction if it is a 4844 transaction let tx_type = *data.first().ok_or(DecodeError::InputTooShort)?; @@ -60,7 +92,27 @@ impl PooledTransactionsElement { // DO NOT advance the buffer for the type, since we want the enveloped decoding to // decode it again and advance the buffer on its own. let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(&mut data)?; - Ok(PooledTransactionsElement::Transaction(typed_tx)) + + // because we checked the tx type, we can be sure that the transaction is not a + // blob transaction or legacy + match typed_tx.transaction { + Transaction::Legacy(_) => Err(DecodeError::Custom( + "legacy transactions should not be a result of EIP-2718 decoding", + )), + Transaction::Eip4844(_) => Err(DecodeError::Custom( + "EIP-4844 transactions can only be decoded with transaction type 0x03", + )), + Transaction::Eip2930(tx) => Ok(PooledTransactionsElement::Eip2930 { + transaction: tx, + signature: typed_tx.signature, + hash: typed_tx.hash, + }), + Transaction::Eip1559(tx) => Ok(PooledTransactionsElement::Eip1559 { + transaction: tx, + signature: typed_tx.signature, + hash: typed_tx.hash, + }), + } } } } @@ -68,8 +120,20 @@ impl PooledTransactionsElement { /// Returns the inner [TransactionSigned]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Transaction(tx) => tx, - Self::BlobTransaction(blob_tx) => blob_tx.transaction, + Self::Legacy { transaction, signature, hash } => { + TransactionSigned { transaction: Transaction::Legacy(transaction), signature, hash } + } + Self::Eip2930 { transaction, signature, hash } => TransactionSigned { + transaction: Transaction::Eip2930(transaction), + signature, + hash, + }, + Self::Eip1559 { transaction, signature, hash } => TransactionSigned { + transaction: Transaction::Eip1559(transaction), + signature, + hash, + }, + Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } } } @@ -78,7 +142,39 @@ impl Encodable for PooledTransactionsElement { /// Encodes an enveloped post EIP-4844 [PooledTransactionsElement]. fn encode(&self, out: &mut dyn bytes::BufMut) { match self { - Self::Transaction(tx) => tx.encode(out), + Self::Legacy { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Legacy(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + // encode signed transaction + signed_tx.encode(out); + } + Self::Eip2930 { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Eip2930(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + // encode signed transaction + signed_tx.encode(out); + } + Self::Eip1559 { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Eip1559(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + // encode signed transaction + signed_tx.encode(out); + } Self::BlobTransaction(blob_tx) => { // The inner encoding is used with `with_header` set to true, making the final // encoding: @@ -90,7 +186,36 @@ impl Encodable for PooledTransactionsElement { fn length(&self) -> usize { match self { - Self::Transaction(tx) => tx.length(), + Self::Legacy { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Legacy(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + signed_tx.length() + } + Self::Eip2930 { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Eip2930(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + signed_tx.length() + } + Self::Eip1559 { transaction, signature, hash } => { + // construct signed transaction + let signed_tx = TransactionSigned { + transaction: Transaction::Eip1559(transaction.clone()), + signature: *signature, + hash: *hash, + }; + + signed_tx.length() + } Self::BlobTransaction(blob_tx) => { // the encoding uses a header, so we set `with_header` to true blob_tx.payload_len_with_type(true) @@ -129,14 +254,14 @@ impl Decodable for PooledTransactionsElement { // Check if the tx is a list if header.list { // decode as legacy transaction - let legacy_tx = - TransactionSigned::decode_rlp_legacy_transaction(&mut original_encoding)?; + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(&mut original_encoding)?; // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the // buffer *buf = original_encoding; - Ok(PooledTransactionsElement::Transaction(legacy_tx)) + Ok(Self::Legacy { transaction, signature, hash }) } else { // decode the type byte, only decode BlobTransaction if it is a 4844 transaction let tx_type = *buf.first().ok_or(DecodeError::InputTooShort)?; @@ -162,7 +287,27 @@ impl Decodable for PooledTransactionsElement { // DO NOT advance the buffer for the type, since we want the enveloped decoding to // decode it again and advance the buffer on its own. let typed_tx = TransactionSigned::decode_enveloped_typed_transaction(buf)?; - Ok(PooledTransactionsElement::Transaction(typed_tx)) + + // because we checked the tx type, we can be sure that the transaction is not a + // blob transaction or legacy + match typed_tx.transaction { + Transaction::Legacy(_) => Err(DecodeError::Custom( + "legacy transactions should not be a result of EIP-2718 decoding", + )), + Transaction::Eip4844(_) => Err(DecodeError::Custom( + "EIP-4844 transactions can only be decoded with transaction type 0x03", + )), + Transaction::Eip2930(tx) => Ok(PooledTransactionsElement::Eip2930 { + transaction: tx, + signature: typed_tx.signature, + hash: typed_tx.hash, + }), + Transaction::Eip1559(tx) => Ok(PooledTransactionsElement::Eip1559 { + transaction: tx, + signature: typed_tx.signature, + hash: typed_tx.hash, + }), + } } } } @@ -171,8 +316,28 @@ impl Decodable for PooledTransactionsElement { impl From for PooledTransactionsElement { /// Converts from a [TransactionSigned] to a [PooledTransactionsElement]. /// - /// NOTE: This will always return a [PooledTransactionsElement::Transaction] variant. + /// NOTE: For EIP-4844 transactions, this will return an empty sidecar. fn from(tx: TransactionSigned) -> Self { - Self::Transaction(tx) + let TransactionSigned { transaction, signature, hash } = tx; + match transaction { + Transaction::Legacy(tx) => { + PooledTransactionsElement::Legacy { transaction: tx, signature, hash } + } + Transaction::Eip2930(tx) => { + PooledTransactionsElement::Eip2930 { transaction: tx, signature, hash } + } + Transaction::Eip1559(tx) => { + PooledTransactionsElement::Eip1559 { transaction: tx, signature, hash } + } + Transaction::Eip4844(tx) => { + PooledTransactionsElement::BlobTransaction(BlobTransaction { + transaction: tx, + signature, + hash, + // This is empty - just for the conversion! + sidecar: Default::default(), + }) + } + } } } From e948928224f340d5d6b8f2dae0f2a51c084de124 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 17 Aug 2023 13:46:51 +0200 Subject: [PATCH 454/722] feat(doc): enhance `append_blocks_with_post_state` doc (#4195) --- crates/storage/provider/src/traits/block.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 1c45db5da720..182d7c57982a 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -244,8 +244,21 @@ pub trait BlockWriter: Send + Sync { senders: Option>, ) -> Result; - /// Append blocks and insert its post state. - /// This will insert block data to all related tables and will update pipeline progress. + /// Appends a batch of sealed blocks to the blockchain, including sender information, and + /// updates the post-state. + /// + /// Inserts the blocks into the database and updates the state with + /// provided `PostState`. + /// + /// # Parameters + /// + /// - `blocks`: Vector of `SealedBlockWithSenders` instances to append. + /// - `state`: Post-state information to update after appending. + /// + /// # Returns + /// + /// Returns `Ok(())` on success, or an error if any operation fails. + fn append_blocks_with_post_state( &self, blocks: Vec, From ca99ee2ec95cf30f3b56a700765d0cffd1ad5753 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 17:19:39 +0200 Subject: [PATCH 455/722] fix: broadcast promoted transactions (#4248) --- crates/transaction-pool/src/pool/mod.rs | 99 +++++++++++++++++++--- crates/transaction-pool/src/pool/txpool.rs | 41 ++++++--- 2 files changed, 115 insertions(+), 25 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 7bb1e9f30bbd..5056a3905487 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -269,11 +269,15 @@ where last_seen_block_number: number, pending_basefee: pending_block_base_fee, }; + + // update the pool let outcome = self.pool.write().on_canonical_state_change( block_info, mined_transactions, changed_senders, ); + + // notify listeners about updates self.notify_on_new_state(outcome); } @@ -285,7 +289,8 @@ where let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); let mut listener = self.event_listener.write(); - promoted.iter().for_each(|tx| listener.pending(tx, None)); + + promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); discarded.iter().for_each(|tx| listener.discarded(tx)); } @@ -406,7 +411,7 @@ where } // broadcast all pending transactions to the listener - for tx_hash in pending.pending_transactions() { + for tx_hash in pending.pending_transactions(listener.kind) { match listener.sender.try_send(tx_hash) { Ok(()) => {} Err(err) => { @@ -448,13 +453,39 @@ where } /// Notifies transaction listeners about changes after a block was processed. - fn notify_on_new_state(&self, outcome: OnNewCanonicalStateOutcome) { + fn notify_on_new_state(&self, outcome: OnNewCanonicalStateOutcome) { + // notify about promoted pending transactions + { + let mut transaction_listeners = self.pending_transaction_listener.lock(); + transaction_listeners.retain_mut(|listener| { + // broadcast all pending transactions to the listener + for tx_hash in outcome.pending_transactions(listener.kind) { + match listener.sender.try_send(tx_hash) { + Ok(()) => {} + Err(err) => { + return if matches!(err, mpsc::error::TrySendError::Full(_)) { + debug!( + target: "txpool", + "[{:?}] failed to send pending tx; channel full", + tx_hash, + ); + true + } else { + false + } + } + } + } + true + }); + } + let OnNewCanonicalStateOutcome { mined, promoted, discarded, block_hash } = outcome; let mut listener = self.event_listener.write(); mined.iter().for_each(|tx| listener.mined(tx, block_hash)); - promoted.iter().for_each(|tx| listener.pending(tx, None)); + promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); discarded.iter().for_each(|tx| listener.discarded(tx)); } @@ -467,7 +498,7 @@ where let AddedPendingTransaction { transaction, promoted, discarded, replaced } = tx; listener.pending(transaction.hash(), replaced.clone()); - promoted.iter().for_each(|tx| listener.pending(tx, None)); + promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); discarded.iter().for_each(|tx| listener.discarded(tx)); } AddedTransaction::Parked { transaction, replaced, .. } => { @@ -605,15 +636,23 @@ pub struct AddedPendingTransaction { /// Replaced transaction. replaced: Option>>, /// transactions promoted to the pending queue - promoted: Vec, + promoted: Vec>>, /// transaction that failed and became discarded discarded: Vec, } impl AddedPendingTransaction { - /// Returns all transactions that were promoted to the pending pool - pub(crate) fn pending_transactions(&self) -> impl Iterator + '_ { - std::iter::once(self.transaction.hash()).chain(self.promoted.iter()).copied() + /// Returns all transactions that were promoted to the pending pool and adhere to the given + /// [PendingTransactionListenerKind]. + /// + /// If the kind is [PendingTransactionListenerKind::PropagateOnly], then only transactions that + /// are allowed to be propagated are returned. + pub(crate) fn pending_transactions( + &self, + kind: PendingTransactionListenerKind, + ) -> impl Iterator + '_ { + let iter = std::iter::once(&self.transaction).chain(self.promoted.iter()); + PendingTransactionIter { kind, iter } } /// Returns if the transaction should be propagated. @@ -622,6 +661,29 @@ impl AddedPendingTransaction { } } +pub(crate) struct PendingTransactionIter { + kind: PendingTransactionListenerKind, + iter: Iter, +} + +impl<'a, Iter, T> Iterator for PendingTransactionIter +where + Iter: Iterator>>, + T: PoolTransaction + 'a, +{ + type Item = H256; + + fn next(&mut self) -> Option { + loop { + let next = self.iter.next()?; + if self.kind.is_propagate_only() && !next.propagate { + continue + } + return Some(*next.hash()) + } + } +} + /// Represents a transaction that was added into the pool and its state #[derive(Debug, Clone)] pub enum AddedTransaction { @@ -671,13 +733,28 @@ impl AddedTransaction { /// Contains all state changes after a [`CanonicalStateUpdate`] was processed #[derive(Debug)] -pub(crate) struct OnNewCanonicalStateOutcome { +pub(crate) struct OnNewCanonicalStateOutcome { /// Hash of the block. pub(crate) block_hash: H256, /// All mined transactions. pub(crate) mined: Vec, /// Transactions promoted to the ready queue. - pub(crate) promoted: Vec, + pub(crate) promoted: Vec>>, /// transaction that were discarded during the update pub(crate) discarded: Vec, } + +impl OnNewCanonicalStateOutcome { + /// Returns all transactions that were promoted to the pending pool and adhere to the given + /// [PendingTransactionListenerKind]. + /// + /// If the kind is [PendingTransactionListenerKind::PropagateOnly], then only transactions that + /// are allowed to be propagated are returned. + pub(crate) fn pending_transactions( + &self, + kind: PendingTransactionListenerKind, + ) -> impl Iterator + '_ { + let iter = self.promoted.iter(); + PendingTransactionIter { kind, iter } + } +} diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 0aa7f84be017..8f8ba2de0f3d 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -266,7 +266,7 @@ impl TxPool { pub(crate) fn update_accounts( &mut self, changed_senders: HashMap, - ) -> UpdateOutcome { + ) -> UpdateOutcome { // track changed accounts self.sender_info.extend(changed_senders.clone()); // Apply the state changes to the total set of transactions which triggers sub-pool updates. @@ -287,7 +287,7 @@ impl TxPool { block_info: BlockInfo, mined_transactions: Vec, changed_senders: HashMap, - ) -> OnNewCanonicalStateOutcome { + ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; self.all_transactions.set_block_info(block_info); @@ -409,7 +409,7 @@ impl TxPool { /// Maintenance task to apply a series of updates. /// /// This will move/discard the given transaction according to the `PoolUpdate` - fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { + fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { let mut outcome = UpdateOutcome::default(); for update in updates { let PoolUpdate { id, hash, current, destination } = update; @@ -422,9 +422,11 @@ impl TxPool { } Destination::Pool(move_to) => { debug_assert!(!move_to.eq(¤t), "destination must be different"); - self.move_transaction(current, move_to, &id); + let moved = self.move_transaction(current, move_to, &id); if matches!(move_to, SubPool::Pending) { - outcome.promoted.push(hash); + if let Some(tx) = moved { + outcome.promoted.push(tx); + } } } } @@ -436,10 +438,15 @@ impl TxPool { /// /// This will remove the given transaction from one sub-pool and insert it into the other /// sub-pool. - fn move_transaction(&mut self, from: SubPool, to: SubPool, id: &TransactionId) { - if let Some(tx) = self.remove_from_subpool(from, id) { - self.add_transaction_to_subpool(to, tx); - } + fn move_transaction( + &mut self, + from: SubPool, + to: SubPool, + id: &TransactionId, + ) -> Option>> { + let tx = self.remove_from_subpool(from, id)?; + self.add_transaction_to_subpool(to, tx.clone()); + Some(tx) } /// Removes and returns all matching transactions from the pool. @@ -1324,14 +1331,20 @@ impl PoolInternalTransaction { } /// Tracks the result after updating the pool -#[derive(Default, Debug)] -pub(crate) struct UpdateOutcome { - /// transactions promoted to the ready queue - pub(crate) promoted: Vec, - /// transaction that failed and became discarded +#[derive(Debug)] +pub(crate) struct UpdateOutcome { + /// transactions promoted to the pending pool + pub(crate) promoted: Vec>>, + /// transaction that failed and were discarded pub(crate) discarded: Vec, } +impl Default for UpdateOutcome { + fn default() -> Self { + Self { promoted: vec![], discarded: vec![] } + } +} + /// Represents the outcome of a prune pub struct PruneResult { /// A list of added transactions that a pruned marker satisfied From a5b777a65fe13a212f30acd2d7ad8336fa202b5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 18:38:59 +0200 Subject: [PATCH 456/722] feat: add blob store service (#4191) Co-authored-by: Bjerg --- crates/net/network/src/transactions.rs | 5 +- .../src/blobstore/maintain.rs | 27 ++++ crates/transaction-pool/src/blobstore/mem.rs | 126 ++++++++++++++++++ crates/transaction-pool/src/blobstore/mod.rs | 66 +++++++++ crates/transaction-pool/src/blobstore/noop.rs | 40 ++++++ crates/transaction-pool/src/lib.rs | 1 + crates/transaction-pool/src/traits.rs | 4 +- crates/transaction-pool/src/validate/mod.rs | 1 + 8 files changed, 268 insertions(+), 2 deletions(-) create mode 100644 crates/transaction-pool/src/blobstore/maintain.rs create mode 100644 crates/transaction-pool/src/blobstore/mem.rs create mode 100644 crates/transaction-pool/src/blobstore/mod.rs create mode 100644 crates/transaction-pool/src/blobstore/noop.rs diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index f5cc1487dd8c..56d43dbb5e28 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -183,6 +183,7 @@ where response: oneshot::Sender>, ) { if let Some(peer) = self.peers.get_mut(&peer_id) { + // TODO softResponseLimit 2 * 1024 * 1024 let transactions = self .pool .get_all(request.0) @@ -237,6 +238,8 @@ where /// /// The message for new pooled hashes depends on the negotiated version of the stream. /// See [NewPooledTransactionHashes](NewPooledTransactionHashes) + /// + /// TODO add note that this never broadcasts full 4844 transactions fn propagate_transactions( &mut self, to_propagate: Vec, @@ -803,7 +806,7 @@ impl Future for GetPooledTxRequestFut { struct Peer { /// Keeps track of transactions that we know the peer has seen. transactions: LruCache, - /// A communication channel directly to the session task. + /// A communication channel directly to the peer's session task. request_tx: PeerRequestSender, /// negotiated version of the session. version: EthVersion, diff --git a/crates/transaction-pool/src/blobstore/maintain.rs b/crates/transaction-pool/src/blobstore/maintain.rs new file mode 100644 index 000000000000..cfc4c8fc68c1 --- /dev/null +++ b/crates/transaction-pool/src/blobstore/maintain.rs @@ -0,0 +1,27 @@ +//! Support for maintaining the blob pool. + +use crate::blobstore::BlobStore; +use reth_primitives::H256; +use std::collections::BTreeMap; + +/// The type that is used to maintain the blob store and discard finalized transactions. +#[derive(Debug)] +#[allow(unused)] +pub struct BlobStoreMaintainer { + /// The blob store that holds all the blob data. + store: S, + /// Keeps track of the blob transactions that are in blocks. + blob_txs_in_blocks: BTreeMap>, +} + +impl BlobStoreMaintainer { + /// Creates a new blob store maintenance instance. + pub fn new(store: S) -> Self { + Self { store, blob_txs_in_blocks: Default::default() } + } +} + +impl BlobStoreMaintainer { + /// Invoked when a block is finalized. + pub fn on_finalized(&mut self, _block_number: u64) {} +} diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs new file mode 100644 index 000000000000..6d1dcb76aa1a --- /dev/null +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -0,0 +1,126 @@ +use crate::blobstore::{BlobStore, BlobStoreError, BlobTransactionSidecar}; +use parking_lot::RwLock; +use reth_primitives::H256; +use std::{ + collections::HashMap, + sync::{atomic::AtomicUsize, Arc}, +}; + +/// An in-memory blob store. +#[derive(Clone, Debug, Default)] +pub struct InMemoryBlobStore { + inner: Arc, +} + +#[derive(Debug, Default)] +struct InMemoryBlobStoreInner { + /// Storage for all blob data. + store: RwLock>, + size: AtomicUsize, +} + +impl InMemoryBlobStoreInner { + fn add_size(&self, add: usize) { + self.size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); + } + + fn sub_size(&self, sub: usize) { + self.size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); + } + + fn update_size(&self, add: usize, sub: usize) { + if add > sub { + self.add_size(add - sub); + } else { + self.sub_size(sub - add); + } + } +} + +impl BlobStore for InMemoryBlobStore { + fn insert(&self, tx: H256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { + let mut store = self.inner.store.write(); + let (add, sub) = insert_size(&mut store, tx, data); + self.inner.update_size(add, sub); + Ok(()) + } + + fn insert_all(&self, txs: Vec<(H256, BlobTransactionSidecar)>) -> Result<(), BlobStoreError> { + if txs.is_empty() { + return Ok(()) + } + let mut store = self.inner.store.write(); + let mut total_add = 0; + let mut total_sub = 0; + for (tx, data) in txs { + let (add, sub) = insert_size(&mut store, tx, data); + total_add += add; + total_sub += sub; + } + self.inner.update_size(total_add, total_sub); + Ok(()) + } + + fn delete(&self, tx: H256) -> Result<(), BlobStoreError> { + let mut store = self.inner.store.write(); + let sub = remove_size(&mut store, &tx); + self.inner.sub_size(sub); + Ok(()) + } + + fn delete_all(&self, txs: Vec) -> Result<(), BlobStoreError> { + if txs.is_empty() { + return Ok(()) + } + let mut store = self.inner.store.write(); + let mut total_sub = 0; + for tx in txs { + total_sub += remove_size(&mut store, &tx); + } + self.inner.sub_size(total_sub); + Ok(()) + } + + // Retrieves the decoded blob data for the given transaction hash. + fn get(&self, tx: H256) -> Result, BlobStoreError> { + let store = self.inner.store.write(); + Ok(store.get(&tx).cloned()) + } + + fn get_all( + &self, + txs: Vec, + ) -> Result, BlobStoreError> { + let mut items = Vec::with_capacity(txs.len()); + let store = self.inner.store.write(); + for tx in txs { + if let Some(item) = store.get(&tx) { + items.push((tx, item.clone())); + } + } + + Ok(items) + } + + fn data_size_hint(&self) -> Option { + Some(self.inner.size.load(std::sync::atomic::Ordering::Relaxed)) + } +} + +/// Removes the given blob from the store and returns the size of the blob that was removed. +#[inline] +fn remove_size(store: &mut HashMap, tx: &H256) -> usize { + store.remove(tx).map(|rem| rem.size()).unwrap_or_default() +} + +/// Inserts the given blob into the store and returns the size of the blob that was (added,removed) +#[inline] +fn insert_size( + store: &mut HashMap, + tx: H256, + blob: BlobTransactionSidecar, +) -> (usize, usize) { + let add = blob.size(); + let sub = store.insert(tx, blob).map(|rem| rem.size()).unwrap_or_default(); + (add, sub) +} diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs new file mode 100644 index 000000000000..0bdd14218048 --- /dev/null +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -0,0 +1,66 @@ +//! Storage for blob data of EIP4844 transactions. + +use reth_primitives::{BlobTransactionSidecar, H256}; +mod maintain; +mod mem; +mod noop; + +pub use maintain::BlobStoreMaintainer; +pub use mem::InMemoryBlobStore; +pub use noop::NoopBlobStore; + +/// A blob store that can be used to store blob data of EIP4844 transactions. +/// +/// This type is responsible for keeping track of blob data until it is no longer needed (after +/// finalization). +/// +/// Note: this is Clone because it is expected to be wrapped in an Arc. +pub trait BlobStore: Send + Sync + 'static { + /// Inserts the blob sidecar into the store + fn insert(&self, tx: H256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError>; + + /// Inserts multiple blob sidecars into the store + fn insert_all(&self, txs: Vec<(H256, BlobTransactionSidecar)>) -> Result<(), BlobStoreError>; + + /// Deletes the blob sidecar from the store + fn delete(&self, tx: H256) -> Result<(), BlobStoreError>; + + /// Deletes multiple blob sidecars from the store + fn delete_all(&self, txs: Vec) -> Result<(), BlobStoreError>; + + /// Retrieves the decoded blob data for the given transaction hash. + fn get(&self, tx: H256) -> Result, BlobStoreError>; + + /// Retrieves all decoded blob data for the given transaction hashes. + /// + /// This only returns the blobs that were found in the store. + /// If there's no blob it will not be returned. + fn get_all( + &self, + txs: Vec, + ) -> Result, BlobStoreError>; + + /// Data size of all transactions in the blob store. + fn data_size_hint(&self) -> Option; +} + +/// Error variants that can occur when interacting with a blob store. +#[derive(Debug, thiserror::Error)] +pub enum BlobStoreError { + /// Failed to decode the stored blob data. + #[error("failed to decode blob data: {0}")] + DecodeError(#[from] reth_rlp::DecodeError), + /// Other implementation specific error. + #[error(transparent)] + Other(Box), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[allow(unused)] + struct DynStore { + store: Box, + } +} diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs new file mode 100644 index 000000000000..d21bf59ef183 --- /dev/null +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -0,0 +1,40 @@ +use crate::blobstore::{BlobStore, BlobStoreError, BlobTransactionSidecar}; +use reth_primitives::H256; + +/// A blobstore implementation that does nothing +#[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Default)] +#[non_exhaustive] +pub struct NoopBlobStore; + +impl BlobStore for NoopBlobStore { + fn insert(&self, _tx: H256, _data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { + Ok(()) + } + + fn insert_all(&self, _txs: Vec<(H256, BlobTransactionSidecar)>) -> Result<(), BlobStoreError> { + Ok(()) + } + + fn delete(&self, _tx: H256) -> Result<(), BlobStoreError> { + Ok(()) + } + + fn delete_all(&self, _txs: Vec) -> Result<(), BlobStoreError> { + Ok(()) + } + + fn get(&self, _tx: H256) -> Result, BlobStoreError> { + Ok(None) + } + + fn get_all( + &self, + _txs: Vec, + ) -> Result, BlobStoreError> { + Ok(vec![]) + } + + fn data_size_hint(&self) -> Option { + Some(0) + } +} diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8d039f66a9d5..4d78f6aba580 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -182,6 +182,7 @@ pub mod noop; pub mod pool; pub mod validate; +pub mod blobstore; mod config; mod identifier; mod ordering; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 511ddc9a8307..2e869e671e1e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -247,7 +247,8 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns all transactions objects for the given hashes. /// - /// This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): + /// TODO(mattsse): this will no longer be accurate and we need a new function specifically for + /// pooled txs This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// The transactions must be in same order as in the request, but it is OK to skip transactions /// which are not available. fn get_all(&self, txs: Vec) -> Vec>>; @@ -576,6 +577,7 @@ pub struct PooledTransaction { /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. pub(crate) cost: U256, + // TODO optional sidecar } impl PooledTransaction { diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 3c758bd749f2..989f7cc78d3f 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -33,6 +33,7 @@ pub enum TransactionValidationOutcome { /// Current nonce of the sender. state_nonce: u64, /// Validated transaction. + // TODO add enum type for blob,regular? transaction: T, /// Whether to propagate the transaction to the network. propagate: bool, From e6f471ddcfc9ee5fb875e411aee35d30fb3c4d82 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 18:39:10 +0200 Subject: [PATCH 457/722] feat: add 4844 config and basic checks (#4245) --- crates/primitives/src/transaction/error.rs | 7 ++- crates/rpc/rpc/src/eth/error.rs | 7 ++- crates/transaction-pool/src/error.rs | 3 +- crates/transaction-pool/src/validate/eth.rs | 48 +++++++++++++++++++-- 4 files changed, 54 insertions(+), 11 deletions(-) diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index e3860ab3af8c..b45ac8cb08b7 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -20,11 +20,14 @@ pub enum InvalidTransactionError { #[error("Transaction's chain ID does not match.")] ChainIdMismatch, /// The transaction requires EIP-2930 which is not enabled currently. - #[error("EIP-2930 transactions are not valid before Berlin.")] + #[error("EIP-2930 transactions are disabled.")] Eip2930Disabled, /// The transaction requires EIP-1559 which is not enabled currently. - #[error("EIP-1559 transactions are not valid before London.")] + #[error("EIP-1559 transactions are disabled.")] Eip1559Disabled, + /// The transaction requires EIP-4844 which is not enabled currently. + #[error("EIP-4844 transactions are disabled.")] + Eip4844Disabled, /// Thrown if a transaction is not supported in the current network configuration. #[error("Transaction type not supported")] TxTypeNotSupported, diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index d4c8813f5dd3..b3cb1f6302ef 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -381,10 +381,9 @@ impl From for RpcInvalidTransactionErr RpcInvalidTransactionError::OldLegacyChainId } InvalidTransactionError::ChainIdMismatch => RpcInvalidTransactionError::InvalidChainId, - InvalidTransactionError::Eip2930Disabled => { - RpcInvalidTransactionError::TxTypeNotSupported - } - InvalidTransactionError::Eip1559Disabled => { + InvalidTransactionError::Eip2930Disabled | + InvalidTransactionError::Eip1559Disabled | + InvalidTransactionError::Eip4844Disabled => { RpcInvalidTransactionError::TxTypeNotSupported } InvalidTransactionError::TxTypeNotSupported => { diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 75b2f66316f2..12ccfbebe5ce 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -175,7 +175,8 @@ impl InvalidPoolTransactionError { false } InvalidTransactionError::Eip2930Disabled | - InvalidTransactionError::Eip1559Disabled => { + InvalidTransactionError::Eip1559Disabled | + InvalidTransactionError::Eip4844Disabled => { // settings false } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 168546a71ae0..7bf3ecb94cd5 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -11,7 +11,7 @@ use crate::{ }; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -128,10 +128,14 @@ pub struct EthTransactionValidatorBuilder { chain_spec: Arc, /// Fork indicator whether we are in the Shanghai stage. shanghai: bool, + /// Fork indicator whether we are in the Cancun hardfork. + cancun: bool, /// Fork indicator whether we are using EIP-2718 type transactions. eip2718: bool, /// Fork indicator whether we are using EIP-1559 type transactions. eip1559: bool, + /// Fork indicator whether we are using EIP-4844 blob transactions. + eip4844: bool, /// The current max gas limit block_gas_limit: u64, /// Minimum priority fee to enforce for acceptance into the pool. @@ -157,9 +161,24 @@ impl EthTransactionValidatorBuilder { additional_tasks: 1, // default to true, can potentially take this as a param in the future propagate_local_transactions: true, + + // TODO: can hard enable by default once transitioned + cancun: false, + eip4844: false, } } + /// Disables the Cancun fork. + pub fn no_cancun(self) -> Self { + self.set_cancun(false) + } + + /// Set the Cancun fork. + pub fn set_cancun(mut self, cancun: bool) -> Self { + self.cancun = cancun; + self + } + /// Disables the Shanghai fork. pub fn no_shanghai(self) -> Self { self.set_shanghai(false) @@ -238,8 +257,10 @@ impl EthTransactionValidatorBuilder { let Self { chain_spec, shanghai, + cancun, eip2718, eip1559, + eip4844, block_gas_limit, minimum_priority_fee, additional_tasks, @@ -252,6 +273,8 @@ impl EthTransactionValidatorBuilder { shanghai, eip2718, eip1559, + cancun, + eip4844, block_gas_limit, minimum_priority_fee, propagate_local_transactions, @@ -290,18 +313,22 @@ struct EthTransactionValidatorInner { client: Client, /// Fork indicator whether we are in the Shanghai stage. shanghai: bool, + /// Fork indicator whether we are in the Cancun hardfork. + cancun: bool, /// Fork indicator whether we are using EIP-2718 type transactions. eip2718: bool, /// Fork indicator whether we are using EIP-1559 type transactions. eip1559: bool, + /// Fork indicator whether we are using EIP-4844 blob transactions. + eip4844: bool, /// The current max gas limit block_gas_limit: u64, /// Minimum priority fee to enforce for acceptance into the pool. minimum_priority_fee: Option, - /// Marker for the transaction type - _marker: PhantomData, /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, + /// Marker for the transaction type + _marker: PhantomData, } // === impl EthTransactionValidatorInner === @@ -340,7 +367,6 @@ where ) } } - EIP1559_TX_TYPE_ID => { // Reject dynamic fee transactions until EIP-1559 activates. if !self.eip1559 { @@ -350,6 +376,15 @@ where ) } } + EIP4844_TX_TYPE_ID => { + // Reject blob transactions. + if !self.eip4844 { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::Eip4844Disabled.into(), + ) + } + } _ => { return TransactionValidationOutcome::Invalid( @@ -414,6 +449,11 @@ where } } + // blob tx checks + if self.cancun { + // TODO: implement blob tx checks + } + let account = match self .client .latest() From 60ad6b2f5413ff000bd43b3dad3839164decad89 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 17 Aug 2023 13:15:24 -0400 Subject: [PATCH 458/722] perf: remove clones in PooledTransaction encoding (#4251) --- crates/primitives/src/transaction/eip1559.rs | 8 +++ crates/primitives/src/transaction/eip2930.rs | 8 +++ crates/primitives/src/transaction/eip4844.rs | 8 +++ crates/primitives/src/transaction/legacy.rs | 10 ++- crates/primitives/src/transaction/mod.rs | 18 ++--- crates/primitives/src/transaction/pooled.rs | 74 +++++--------------- 6 files changed, 57 insertions(+), 69 deletions(-) diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 31072cd326b3..c446da2e1607 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -162,6 +162,14 @@ impl TxEip1559 { signature.encode(out); } + /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. + pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { + let payload_length = self.fields_len() + signature.payload_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + let len = 1 + length_of_length(payload_length) + payload_length; + length_of_length(len) + len + } + /// Get transaction type pub(crate) fn tx_type(&self) -> TxType { TxType::EIP1559 diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 074b2fc5b36e..3f2a8f8fa6b7 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -141,6 +141,14 @@ impl TxEip2930 { signature.encode(out); } + /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. + pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { + let payload_length = self.fields_len() + signature.payload_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + let len = 1 + length_of_length(payload_length) + payload_length; + length_of_length(len) + len + } + /// Get transaction type pub(crate) fn tx_type(&self) -> TxType { TxType::EIP2930 diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 31573d4919f5..90246031e500 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -214,6 +214,14 @@ impl TxEip4844 { signature.encode(out); } + /// Output the length of the RLP signed transaction encoding. This encodes with a RLP header. + pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { + let payload_length = self.fields_len() + signature.payload_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + let len = 1 + length_of_length(payload_length) + payload_length; + length_of_length(len) + len + } + /// Get transaction type pub(crate) fn tx_type(&self) -> TxType { TxType::EIP4844 diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index ad9d4b141fe3..fcbb627268b4 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,6 +1,6 @@ use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; use reth_codecs::{main_codec, Compact}; -use reth_rlp::{Encodable, Header}; +use reth_rlp::{length_of_length, Encodable, Header}; use std::mem; /// Legacy transaction. @@ -93,6 +93,14 @@ impl TxLegacy { signature.encode_with_eip155_chain_id(out, self.chain_id); } + /// Output the length of the RLP signed transaction encoding. + pub(crate) fn payload_len_with_signature(&self, signature: &Signature) -> usize { + let payload_length = + self.fields_len() + signature.payload_len_with_eip155_chain_id(self.chain_id); + // 'header length' + 'payload length' + length_of_length(payload_length) + payload_length + } + /// Get transaction type pub(crate) fn tx_type(&self) -> TxType { TxType::Legacy diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e771c5ab2335..962bd7e5bcdc 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -847,19 +847,15 @@ impl TransactionSigned { /// Output the length of the encode_inner(out, true). Note to assume that `with_header` is only /// `true`. pub(crate) fn payload_len_inner(&self) -> usize { - match self.transaction { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { - let payload_length = self.transaction.fields_len() + - self.signature.payload_len_with_eip155_chain_id(chain_id); - // 'header length' + 'payload length' - length_of_length(payload_length) + payload_length + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_with_signature(&self.signature), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.payload_len_with_signature(&self.signature) } - _ => { - let payload_length = self.transaction.fields_len() + self.signature.payload_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - let len = 1 + length_of_length(payload_length) + payload_length; - length_of_length(len) + len + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.payload_len_with_signature(&self.signature) } + Transaction::Eip4844(blob_tx) => blob_tx.payload_len_with_signature(&self.signature), } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index da4fcf47c12f..ebaf5ce144de 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -142,38 +142,16 @@ impl Encodable for PooledTransactionsElement { /// Encodes an enveloped post EIP-4844 [PooledTransactionsElement]. fn encode(&self, out: &mut dyn bytes::BufMut) { match self { - Self::Legacy { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Legacy(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - // encode signed transaction - signed_tx.encode(out); + Self::Legacy { transaction, signature, .. } => { + transaction.encode_with_signature(signature, out) } - Self::Eip2930 { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Eip2930(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - // encode signed transaction - signed_tx.encode(out); + Self::Eip2930 { transaction, signature, .. } => { + // encodes with header + transaction.encode_with_signature(signature, out, true) } - Self::Eip1559 { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Eip1559(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - // encode signed transaction - signed_tx.encode(out); + Self::Eip1559 { transaction, signature, .. } => { + // encodes with header + transaction.encode_with_signature(signature, out, true) } Self::BlobTransaction(blob_tx) => { // The inner encoding is used with `with_header` set to true, making the final @@ -186,35 +164,17 @@ impl Encodable for PooledTransactionsElement { fn length(&self) -> usize { match self { - Self::Legacy { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Legacy(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - signed_tx.length() + Self::Legacy { transaction, signature, .. } => { + // method computes the payload len with a RLP header + transaction.payload_len_with_signature(signature) } - Self::Eip2930 { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Eip2930(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - signed_tx.length() + Self::Eip2930 { transaction, signature, .. } => { + // method computes the payload len with a RLP header + transaction.payload_len_with_signature(signature) } - Self::Eip1559 { transaction, signature, hash } => { - // construct signed transaction - let signed_tx = TransactionSigned { - transaction: Transaction::Eip1559(transaction.clone()), - signature: *signature, - hash: *hash, - }; - - signed_tx.length() + Self::Eip1559 { transaction, signature, .. } => { + // method computes the payload len with a RLP header + transaction.payload_len_with_signature(signature) } Self::BlobTransaction(blob_tx) => { // the encoding uses a header, so we set `with_header` to true From 0bcd388a2058f86ca8f560ce79b3a83ddb84fb68 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 20:43:28 +0200 Subject: [PATCH 459/722] chore: rename transaction (#4252) --- crates/transaction-pool/src/lib.rs | 15 +++++++++------ crates/transaction-pool/src/noop.rs | 12 ++++++------ crates/transaction-pool/src/traits.rs | 14 +++++++------- examples/network-txpool.rs | 4 ++-- 4 files changed, 24 insertions(+), 21 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 4d78f6aba580..e78a68533a3f 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -165,9 +165,9 @@ pub use crate::{ }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, - NewTransactionEvent, PendingTransactionListenerKind, PoolSize, PoolTransaction, - PooledTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, - TransactionPool, TransactionPoolExt, + EthPooledTransaction, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, + PoolTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, TransactionPool, + TransactionPoolExt, }, validate::{ EthTransactionValidator, TransactionValidationOutcome, TransactionValidator, @@ -262,12 +262,15 @@ where } impl - Pool, CoinbaseTipOrdering> + Pool< + EthTransactionValidator, + CoinbaseTipOrdering, + > where Client: StateProviderFactory + Clone + 'static, { /// Returns a new [Pool] that uses the default [EthTransactionValidator] when validating - /// [PooledTransaction]s and ords via [CoinbaseTipOrdering] + /// [EthPooledTransaction]s and ords via [CoinbaseTipOrdering] /// /// # Example /// @@ -284,7 +287,7 @@ where /// # } /// ``` pub fn eth_pool( - validator: EthTransactionValidator, + validator: EthTransactionValidator, config: PoolConfig, ) -> Self { Self::new(validator, CoinbaseTipOrdering::default(), config) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index d9c26214a516..2f17b3c43b76 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -5,8 +5,8 @@ use crate::{ error::PoolError, traits::PendingTransactionListenerKind, AllPoolTransactions, - AllTransactionsEvents, BestTransactions, BlockInfo, NewTransactionEvent, PoolResult, PoolSize, - PoolTransaction, PooledTransaction, PropagatedTransactions, TransactionEvents, + AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, NewTransactionEvent, + PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; @@ -24,7 +24,7 @@ pub struct NoopTransactionPool; #[async_trait::async_trait] impl TransactionPool for NoopTransactionPool { - type Transaction = PooledTransaction; + type Transaction = EthPooledTransaction; fn pool_size(&self) -> PoolSize { Default::default() @@ -212,16 +212,16 @@ impl Default for MockTransactionValidator { #[derive(Debug, Clone, thiserror::Error)] #[error("Can't insert transaction into the noop pool that does nothing.")] pub struct NoopInsertError { - tx: PooledTransaction, + tx: EthPooledTransaction, } impl NoopInsertError { - fn new(tx: PooledTransaction) -> Self { + fn new(tx: EthPooledTransaction) -> Self { Self { tx } } /// Returns the transaction that failed to be inserted. - pub fn into_inner(self) -> PooledTransaction { + pub fn into_inner(self) -> EthPooledTransaction { self.tx } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2e869e671e1e..4d740da9ab53 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -565,12 +565,12 @@ pub trait PoolTransaction: fn chain_id(&self) -> Option; } -/// The default [PoolTransaction] for the [Pool](crate::Pool). +/// The default [PoolTransaction] for the [Pool](crate::Pool) for Ethereum. /// /// This type is essentially a wrapper around [TransactionSignedEcRecovered] with additional fields /// derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct PooledTransaction { +pub struct EthPooledTransaction { /// EcRecovered transaction info pub(crate) transaction: TransactionSignedEcRecovered, @@ -580,7 +580,7 @@ pub struct PooledTransaction { // TODO optional sidecar } -impl PooledTransaction { +impl EthPooledTransaction { /// Create new instance of [Self]. pub fn new(transaction: TransactionSignedEcRecovered) -> Self { let gas_cost = match &transaction.transaction { @@ -600,7 +600,7 @@ impl PooledTransaction { } } -impl PoolTransaction for PooledTransaction { +impl PoolTransaction for EthPooledTransaction { /// Returns hash of the transaction. fn hash(&self) -> &TxHash { self.transaction.hash_ref() @@ -696,13 +696,13 @@ impl PoolTransaction for PooledTransaction { } } -impl FromRecoveredTransaction for PooledTransaction { +impl FromRecoveredTransaction for EthPooledTransaction { fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { - PooledTransaction::new(tx) + EthPooledTransaction::new(tx) } } -impl IntoRecoveredTransaction for PooledTransaction { +impl IntoRecoveredTransaction for EthPooledTransaction { fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { self.transaction.clone() } diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index afabcb17f1af..afb854460ccb 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -10,7 +10,7 @@ use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ - CoinbaseTipOrdering, PoolTransaction, PooledTransaction, TransactionOrigin, TransactionPool, + CoinbaseTipOrdering, EthPooledTransaction, PoolTransaction, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, }; @@ -68,7 +68,7 @@ struct OkValidator; #[async_trait::async_trait] impl TransactionValidator for OkValidator { - type Transaction = PooledTransaction; + type Transaction = EthPooledTransaction; async fn validate_transaction( &self, From f53fdd5329762c5ef8b715fefd1710d52ea78d81 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Aug 2023 21:57:40 +0200 Subject: [PATCH 460/722] feat: add blob sidecar extraction to tx validation (#4254) --- crates/transaction-pool/src/noop.rs | 12 ++-- crates/transaction-pool/src/pool/mod.rs | 15 +++- crates/transaction-pool/src/validate/eth.rs | 6 +- crates/transaction-pool/src/validate/mod.rs | 77 +++++++++++++++++++-- examples/network-txpool.rs | 6 +- 5 files changed, 99 insertions(+), 17 deletions(-) diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 2f17b3c43b76..0c0aa860662d 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,11 +4,11 @@ //! to be generic over it. use crate::{ - error::PoolError, traits::PendingTransactionListenerKind, AllPoolTransactions, - AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, NewTransactionEvent, - PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, TransactionEvents, - TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, - ValidPoolTransaction, + error::PoolError, traits::PendingTransactionListenerKind, validate::ValidTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, + NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, + TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, + TransactionValidator, ValidPoolTransaction, }; use reth_primitives::{Address, TxHash}; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; @@ -184,7 +184,7 @@ impl TransactionValidator for MockTransactionValidator { TransactionValidationOutcome::Valid { balance: Default::default(), state_nonce: 0, - transaction, + transaction: ValidTransaction::Valid(transaction), propagate: match origin { TransactionOrigin::External => true, TransactionOrigin::Local => self.propagate_local, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 5056a3905487..940fff3d0b79 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -96,7 +96,9 @@ mod events; pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; -use crate::{pool::txpool::UpdateOutcome, traits::PendingTransactionListenerKind}; +use crate::{ + pool::txpool::UpdateOutcome, traits::PendingTransactionListenerKind, validate::ValidTransaction, +}; pub use listener::{AllTransactionsEvents, TransactionEvents}; mod best; @@ -314,6 +316,17 @@ where let transaction_id = TransactionId::new(sender_id, transaction.nonce()); let encoded_length = transaction.encoded_length(); + let (transaction, _maybe_sidecar) = match transaction { + ValidTransaction::Valid(tx) => (tx, None), + ValidTransaction::ValidWithSidecar { transaction, sidecar } => { + debug_assert!( + transaction.is_eip4844(), + "validator returned sidecar for non EIP-4844 transaction" + ); + (transaction, Some(sidecar)) + } + }; + let tx = ValidPoolTransaction { transaction, transaction_id, diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 7bf3ecb94cd5..0c0e5e3f2757 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -4,8 +4,8 @@ use crate::{ error::InvalidPoolTransactionError, traits::{PoolTransaction, TransactionOrigin}, validate::{ - task::ValidationJobSender, TransactionValidatorError, ValidationTask, MAX_INIT_CODE_SIZE, - TX_MAX_SIZE, + task::ValidationJobSender, TransactionValidatorError, ValidTransaction, ValidationTask, + MAX_INIT_CODE_SIZE, TX_MAX_SIZE, }, TransactionValidationOutcome, TransactionValidator, }; @@ -499,7 +499,7 @@ where TransactionValidationOutcome::Valid { balance: account.balance, state_nonce: account.nonce, - transaction, + transaction: ValidTransaction::Valid(transaction), // by this point assume all external transactions should be propagated propagate: match origin { TransactionOrigin::External => true, diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 989f7cc78d3f..37bc883ca1f6 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -6,7 +6,8 @@ use crate::{ traits::{PoolTransaction, TransactionOrigin}, }; use reth_primitives::{ - Address, IntoRecoveredTransaction, TransactionKind, TransactionSignedEcRecovered, TxHash, U256, + Address, BlobTransactionSidecar, IntoRecoveredTransaction, TransactionKind, + TransactionSignedEcRecovered, TxHash, H256, U256, }; use std::{fmt, time::Instant}; @@ -32,9 +33,13 @@ pub enum TransactionValidationOutcome { balance: U256, /// Current nonce of the sender. state_nonce: u64, - /// Validated transaction. - // TODO add enum type for blob,regular? - transaction: T, + /// The validated transaction. + /// + /// See also [ValidTransaction]. + /// + /// If this is a _new_ EIP-4844 blob transaction, then this must contain the extracted + /// sidecar. + transaction: ValidTransaction, /// Whether to propagate the transaction to the network. propagate: bool, }, @@ -56,6 +61,65 @@ impl TransactionValidationOutcome { } } +/// A wrapper type for a transaction that is valid and has an optional extracted EIP-4844 blob +/// transaction sidecar. +/// +/// If this is provided, then the sidecar will be temporarily stored in the blob store until the +/// transaction is finalized. +/// +/// Note: Since blob transactions can be re-injected without their sidecar (after reorg), the +/// validator can omit the sidecar if it is still in the blob store and return a +/// [ValidTransaction::Valid] instead. +#[derive(Debug)] +pub enum ValidTransaction { + /// A valid transaction without a sidecar. + Valid(T), + /// A valid transaction for which a sidecar should be stored. + /// + /// Caution: The [TransactionValidator] must ensure that this is only returned for EIP-4844 + /// transactions. + ValidWithSidecar { + /// The valid EIP-4844 transaction. + transaction: T, + /// The extracted sidecar of that transaction + sidecar: BlobTransactionSidecar, + }, +} + +impl ValidTransaction { + #[inline] + pub(crate) fn transaction(&self) -> &T { + match self { + Self::Valid(transaction) => transaction, + Self::ValidWithSidecar { transaction, .. } => transaction, + } + } + + /// Returns the address of that transaction. + #[inline] + pub(crate) fn sender(&self) -> Address { + self.transaction().sender() + } + + /// Returns the hash of the transaction. + #[inline] + pub(crate) fn hash(&self) -> &H256 { + self.transaction().hash() + } + + /// Returns the length of the rlp encoded object + #[inline] + pub(crate) fn encoded_length(&self) -> usize { + self.transaction().encoded_length() + } + + /// Returns the nonce of the transaction. + #[inline] + pub(crate) fn nonce(&self) -> u64 { + self.transaction().nonce() + } +} + /// Provides support for validating transaction at any given state of the chain #[async_trait::async_trait] pub trait TransactionValidator: Send + Sync { @@ -113,6 +177,11 @@ pub trait TransactionValidator: Send + Sync { } /// A valid transaction in the pool. +/// +/// This is used as the internal representation of a transaction inside the pool. +/// +/// For EIP-4844 blob transactions this will _not_ contain the blob sidecar which is stored +/// separately in the [BlobStore](crate::blobstore::BlobStore). pub struct ValidPoolTransaction { /// The transaction pub transaction: T, diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index afb854460ccb..84e605b883db 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -10,8 +10,8 @@ use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ - CoinbaseTipOrdering, EthPooledTransaction, PoolTransaction, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, + validate::ValidTransaction, CoinbaseTipOrdering, EthPooledTransaction, PoolTransaction, + TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, }; #[tokio::main] @@ -79,7 +79,7 @@ impl TransactionValidator for OkValidator { TransactionValidationOutcome::Valid { balance: transaction.cost(), state_nonce: transaction.nonce(), - transaction, + transaction: ValidTransaction::Valid(transaction), propagate: false, } } From 5039b3b582a19d5d38daf343dbd3826702b8e335 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Fri, 18 Aug 2023 16:55:37 +0530 Subject: [PATCH 461/722] debug_backtraceAt (#4232) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc-api/src/debug.rs | 6 ++++++ crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/debug.rs | 4 ++++ 4 files changed, 12 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index c22e923952df..969ed1ddb6b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5986,6 +5986,7 @@ dependencies = [ "hyper", "jsonrpsee", "jsonwebtoken", + "lazy_static", "pin-project", "rand 0.8.5", "rayon", diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 6fba56c83d78..91377017425c 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -123,4 +123,10 @@ pub trait DebugApi { opts: Option, state_override: Option, ) -> RpcResult>; + + /// Sets the logging backtrace location. When a backtrace location is set and a log message is + /// emitted at that location, the stack of the goroutine executing the log statement will + /// be printed to stderr. + #[method(name = "backtraceAt")] + async fn debug_backtrace_at(&self, location: &str) -> RpcResult<()>; } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 756fefd8a1e3..3be464d0afba 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -25,6 +25,7 @@ reth-tasks.workspace = true reth-metrics.workspace = true reth-consensus-common = { path = "../../consensus/common" } reth-rpc-types-compat.workspace = true +lazy_static = "*" # eth revm = { workspace = true, features = [ diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 147c2a9f359c..aac4f176efdf 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -643,6 +643,10 @@ where Ok(res.into()) } + async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { + Ok(()) + } + /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self.inner.provider.block_by_id(block_id).to_rpc_result()?; From efab153cd9c4c7b6349eb46a5b9b5532ab14eb4f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Fri, 18 Aug 2023 13:28:09 +0200 Subject: [PATCH 462/722] feat(transaction-pool): make `EthTransactionValidator` generic over Validator (#4258) Co-authored-by: Matthias Seitz --- bin/reth/src/node/mod.rs | 4 +- crates/transaction-pool/src/lib.rs | 26 ++-- crates/transaction-pool/src/validate/eth.rs | 127 +++--------------- crates/transaction-pool/src/validate/mod.rs | 4 +- crates/transaction-pool/src/validate/task.rs | 130 ++++++++++++++++++- examples/network-txpool.rs | 4 +- 6 files changed, 163 insertions(+), 132 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 0837cfe291ba..986f3012f82c 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -65,7 +65,7 @@ use reth_stages::{ MetricEventsSender, MetricsListener, }; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{EthTransactionValidator, TransactionPool}; +use reth_transaction_pool::{TransactionPool, TransactionValidationTaskExecutor}; use secp256k1::SecretKey; use std::{ net::{Ipv4Addr, SocketAddr, SocketAddrV4}, @@ -263,7 +263,7 @@ impl NodeCommand { let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?; let transaction_pool = reth_transaction_pool::Pool::eth_pool( - EthTransactionValidator::with_additional_tasks( + TransactionValidationTaskExecutor::eth_with_additional_tasks( blockchain_db.clone(), Arc::clone(&self.chain), ctx.task_executor.clone(), diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index e78a68533a3f..fb267c117f9d 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -94,10 +94,10 @@ //! use reth_primitives::MAINNET; //! use reth_provider::{ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::{EthTransactionValidator, Pool, TransactionPool}; +//! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; //! async fn t(client: C) where C: StateProviderFactory + ChainSpecProvider + Clone + 'static{ //! let pool = Pool::eth_pool( -//! EthTransactionValidator::new(client, MAINNET.clone(), TokioTaskExecutor::default()), +//! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), //! Default::default(), //! ); //! let mut transactions = pool.pending_transactions_listener(); @@ -119,14 +119,14 @@ //! use reth_primitives::MAINNET; //! use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::{EthTransactionValidator, Pool}; +//! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; //! use reth_transaction_pool::maintain::maintain_transaction_pool_future; //! async fn t(client: C, stream: St) //! where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, //! St: Stream + Send + Unpin + 'static, //! { //! let pool = Pool::eth_pool( -//! EthTransactionValidator::new(client.clone(), MAINNET.clone(), TokioTaskExecutor::default()), +//! TransactionValidationTaskExecutor::eth(client.clone(), MAINNET.clone(), TokioTaskExecutor::default()), //! Default::default(), //! ); //! @@ -170,8 +170,8 @@ pub use crate::{ TransactionPoolExt, }, validate::{ - EthTransactionValidator, TransactionValidationOutcome, TransactionValidator, - ValidPoolTransaction, + EthTransactionValidator, TransactionValidationOutcome, TransactionValidationTaskExecutor, + TransactionValidator, ValidPoolTransaction, }, }; @@ -263,14 +263,14 @@ where impl Pool< - EthTransactionValidator, + TransactionValidationTaskExecutor>, CoinbaseTipOrdering, > where Client: StateProviderFactory + Clone + 'static, { - /// Returns a new [Pool] that uses the default [EthTransactionValidator] when validating - /// [EthPooledTransaction]s and ords via [CoinbaseTipOrdering] + /// Returns a new [Pool] that uses the default [TransactionValidationTaskExecutor] when + /// validating [EthPooledTransaction]s and ords via [CoinbaseTipOrdering] /// /// # Example /// @@ -278,16 +278,18 @@ where /// use reth_provider::StateProviderFactory; /// use reth_primitives::MAINNET; /// use reth_tasks::TokioTaskExecutor; - /// use reth_transaction_pool::{EthTransactionValidator, Pool}; + /// use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static{ /// let pool = Pool::eth_pool( - /// EthTransactionValidator::new(client, MAINNET.clone(), TokioTaskExecutor::default()), + /// TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), /// Default::default(), /// ); /// # } /// ``` pub fn eth_pool( - validator: EthTransactionValidator, + validator: TransactionValidationTaskExecutor< + EthTransactionValidator, + >, config: PoolConfig, ) -> Self { Self::new(validator, CoinbaseTipOrdering::default(), config) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 0c0e5e3f2757..837dee8a6400 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -3,11 +3,8 @@ use crate::{ error::InvalidPoolTransactionError, traits::{PoolTransaction, TransactionOrigin}, - validate::{ - task::ValidationJobSender, TransactionValidatorError, ValidTransaction, ValidationTask, - MAX_INIT_CODE_SIZE, TX_MAX_SIZE, - }, - TransactionValidationOutcome, TransactionValidator, + validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_SIZE, TX_MAX_SIZE}, + TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ constants::ETHEREUM_BLOCK_GAS_LIMIT, ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, @@ -16,113 +13,16 @@ use reth_primitives::{ use reth_provider::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{marker::PhantomData, sync::Arc}; -use tokio::sync::{oneshot, Mutex}; +use tokio::sync::Mutex; -/// A [TransactionValidator] implementation that validates ethereum transaction. -/// -/// This validator is non-blocking, all validation work is done in a separate task. -#[derive(Debug, Clone)] +/// Validator for Ethereum transactions. +#[derive(Debug)] pub struct EthTransactionValidator { /// The type that performs the actual validation. - inner: Arc>, - /// The sender half to validation tasks that perform the actual validation. - to_validation_task: Arc>, -} - -// === impl EthTransactionValidator === - -impl EthTransactionValidator<(), ()> { - /// Convenience method to create a [EthTransactionValidatorBuilder] - pub fn builder(chain_spec: Arc) -> EthTransactionValidatorBuilder { - EthTransactionValidatorBuilder::new(chain_spec) - } + pub inner: Arc>, } -impl EthTransactionValidator { - /// Creates a new instance for the given [ChainSpec] - /// - /// This will spawn a single validation tasks that performs the actual validation. - /// See [EthTransactionValidator::with_additional_tasks] - pub fn new(client: Client, chain_spec: Arc, tasks: T) -> Self - where - T: TaskSpawner, - { - Self::with_additional_tasks(client, chain_spec, tasks, 0) - } - - /// Creates a new instance for the given [ChainSpec] - /// - /// By default this will enable support for: - /// - shanghai - /// - eip1559 - /// - eip2930 - /// - /// This will always spawn a validation task that performs the actual validation. It will spawn - /// `num_additional_tasks` additional tasks. - pub fn with_additional_tasks( - client: Client, - chain_spec: Arc, - tasks: T, - num_additional_tasks: usize, - ) -> Self - where - T: TaskSpawner, - { - EthTransactionValidatorBuilder::new(chain_spec) - .with_additional_tasks(num_additional_tasks) - .build(client, tasks) - } - - /// Returns the configured chain id - pub fn chain_id(&self) -> u64 { - self.inner.chain_id() - } -} - -#[async_trait::async_trait] -impl TransactionValidator for EthTransactionValidator -where - Client: StateProviderFactory + Clone + 'static, - Tx: PoolTransaction + 'static, -{ - type Transaction = Tx; - - async fn validate_transaction( - &self, - origin: TransactionOrigin, - transaction: Self::Transaction, - ) -> TransactionValidationOutcome { - let hash = *transaction.hash(); - let (tx, rx) = oneshot::channel(); - { - let to_validation_task = self.to_validation_task.clone(); - let to_validation_task = to_validation_task.lock().await; - let validator = Arc::clone(&self.inner); - let res = to_validation_task - .send(Box::pin(async move { - let res = validator.validate_transaction(origin, transaction).await; - let _ = tx.send(res); - })) - .await; - if res.is_err() { - return TransactionValidationOutcome::Error( - hash, - Box::new(TransactionValidatorError::ValidationServiceUnreachable), - ) - } - } - - match rx.await { - Ok(res) => res, - Err(_) => TransactionValidationOutcome::Error( - hash, - Box::new(TransactionValidatorError::ValidationServiceUnreachable), - ), - } - } -} - -/// A builder for [EthTransactionValidator] +/// A builder for [TransactionValidationTaskExecutor] #[derive(Debug, Clone)] pub struct EthTransactionValidatorBuilder { chain_spec: Arc, @@ -241,7 +141,7 @@ impl EthTransactionValidatorBuilder { self } - /// Builds a [EthTransactionValidator] + /// Builds a [TransactionValidationTaskExecutor] /// /// The validator will spawn `additional_tasks` additional tasks for validation. /// @@ -250,7 +150,7 @@ impl EthTransactionValidatorBuilder { self, client: Client, tasks: T, - ) -> EthTransactionValidator + ) -> TransactionValidationTaskExecutor> where T: TaskSpawner, { @@ -300,13 +200,16 @@ impl EthTransactionValidatorBuilder { let to_validation_task = Arc::new(Mutex::new(tx)); - EthTransactionValidator { inner: Arc::new(inner), to_validation_task } + TransactionValidationTaskExecutor { + validator: EthTransactionValidator { inner: Arc::new(inner) }, + to_validation_task, + } } } /// A [TransactionValidator] implementation that validates ethereum transaction. #[derive(Debug, Clone)] -struct EthTransactionValidatorInner { +pub struct EthTransactionValidatorInner { /// Spec of the chain chain_spec: Arc, /// This type fetches account info from the db @@ -335,7 +238,7 @@ struct EthTransactionValidatorInner { impl EthTransactionValidatorInner { /// Returns the configured chain id - fn chain_id(&self) -> u64 { + pub fn chain_id(&self) -> u64 { self.chain_spec.chain().id() } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 37bc883ca1f6..1ec2bd07f4eb 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -19,7 +19,7 @@ mod task; pub use eth::{EthTransactionValidator, EthTransactionValidatorBuilder}; /// A spawnable task that performs transaction validation. -pub use task::ValidationTask; +pub use task::{TransactionValidationTaskExecutor, ValidationTask}; /// Validation constants. pub use constants::{MAX_CODE_SIZE, MAX_INIT_CODE_SIZE, TX_MAX_SIZE, TX_SLOT_SIZE}; @@ -150,7 +150,7 @@ pub trait TransactionValidator: Send + Sync { /// example nonce or balance changes. Hence, any validation checks must be applied in this /// function. /// - /// See [EthTransactionValidator] for a reference implementation. + /// See [TransactionValidationTaskExecutor] for a reference implementation. async fn validate_transaction( &self, origin: TransactionOrigin, diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 80c477802f2d..7e219157a30e 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -1,9 +1,19 @@ //! A validation service for transactions. -use crate::validate::TransactionValidatorError; +use crate::{ + validate::{EthTransactionValidatorBuilder, TransactionValidatorError}, + EthTransactionValidator, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, + TransactionValidator, +}; use futures_util::{lock::Mutex, StreamExt}; +use reth_primitives::ChainSpec; +use reth_provider::StateProviderFactory; +use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; -use tokio::sync::mpsc; +use tokio::{ + sync, + sync::{mpsc, oneshot}, +}; use tokio_stream::wrappers::ReceiverStream; /// A service that performs validation jobs. @@ -60,3 +70,119 @@ impl ValidationJobSender { self.tx.send(job).await.map_err(|_| TransactionValidatorError::ValidationServiceUnreachable) } } + +/// A [TransactionValidator] implementation that validates ethereum transaction. +/// +/// This validator is non-blocking, all validation work is done in a separate task. +#[derive(Debug, Clone)] +pub struct TransactionValidationTaskExecutor { + /// The validator that will validate transactions on a separate task. + pub validator: V, + /// The sender half to validation tasks that perform the actual validation. + pub to_validation_task: Arc>, +} + +// === impl TransactionValidationTaskExecutor === + +impl TransactionValidationTaskExecutor<()> { + /// Convenience method to create a [EthTransactionValidatorBuilder] + pub fn eth_builder(chain_spec: Arc) -> EthTransactionValidatorBuilder { + EthTransactionValidatorBuilder::new(chain_spec) + } +} + +impl TransactionValidationTaskExecutor> { + /// Creates a new instance for the given [ChainSpec] + /// + /// This will spawn a single validation tasks that performs the actual validation. + /// See [TransactionValidationTaskExecutor::eth_with_additional_tasks] + pub fn eth(client: Client, chain_spec: Arc, tasks: T) -> Self + where + T: TaskSpawner, + { + Self::eth_with_additional_tasks(client, chain_spec, tasks, 0) + } + + /// Creates a new instance for the given [ChainSpec] + /// + /// By default this will enable support for: + /// - shanghai + /// - eip1559 + /// - eip2930 + /// + /// This will always spawn a validation task that performs the actual validation. It will spawn + /// `num_additional_tasks` additional tasks. + pub fn eth_with_additional_tasks( + client: Client, + chain_spec: Arc, + tasks: T, + num_additional_tasks: usize, + ) -> Self + where + T: TaskSpawner, + { + EthTransactionValidatorBuilder::new(chain_spec) + .with_additional_tasks(num_additional_tasks) + .build::(client, tasks) + } + + /// Returns the configured chain id + pub fn chain_id(&self) -> u64 { + self.validator.inner.chain_id() + } +} + +impl TransactionValidationTaskExecutor { + /// Creates a new executor instance with the given validator for transaction validation. + /// + /// Initializes the executor with the provided validator and sets up communication for + /// validation tasks. + pub fn new(validator: V) -> Self { + let (tx, _) = ValidationTask::new(); + Self { validator, to_validation_task: Arc::new(sync::Mutex::new(tx)) } + } +} + +#[async_trait::async_trait] +impl TransactionValidator + for TransactionValidationTaskExecutor> +where + Client: StateProviderFactory + Clone + 'static, + Tx: PoolTransaction + Clone + 'static, +{ + type Transaction = Tx; + + async fn validate_transaction( + &self, + origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + let hash = *transaction.hash(); + let (tx, rx) = oneshot::channel(); + { + let to_validation_task = self.to_validation_task.clone(); + let to_validation_task = to_validation_task.lock().await; + let validator = Arc::clone(&self.validator.inner); + let res = to_validation_task + .send(Box::pin(async move { + let res = validator.validate_transaction(origin, transaction).await; + let _ = tx.send(res); + })) + .await; + if res.is_err() { + return TransactionValidationOutcome::Error( + hash, + Box::new(TransactionValidatorError::ValidationServiceUnreachable), + ) + } + } + + match rx.await { + Ok(res) => res, + Err(_) => TransactionValidationOutcome::Error( + hash, + Box::new(TransactionValidatorError::ValidationServiceUnreachable), + ), + } + } +} diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index 84e605b883db..23580d1457b7 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -57,8 +57,8 @@ async fn main() -> eyre::Result<()> { /// A transaction validator that determines all transactions to be valid. /// /// An actual validator impl like -/// [EthTransactionValidator](reth_transaction_pool::EthTransactionValidator) would require up to -/// date db access. +/// [TransactionValidationTaskExecutor](reth_transaction_pool::TransactionValidationTaskExecutor) +/// would require up to date db access. /// /// CAUTION: This validator is not safe to use since it doesn't actually validate the transaction's /// properties such as chain id, balance, nonce, etc. From 290474565067b8e25a111140794f79fb127f0ab2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 18 Aug 2023 15:57:28 +0100 Subject: [PATCH 463/722] chore: bump `metrics` (#4265) --- Cargo.lock | 102 ++++++------------ bin/reth/Cargo.toml | 7 +- bin/reth/src/prometheus_exporter.rs | 2 +- crates/blockchain-tree/Cargo.toml | 7 +- crates/blockchain-tree/src/metrics.rs | 2 +- crates/consensus/beacon/Cargo.toml | 5 +- .../beacon/src/engine/invalid_headers.rs | 2 +- crates/consensus/beacon/src/engine/metrics.rs | 2 +- crates/metrics/Cargo.toml | 2 +- crates/metrics/metrics-derive/Cargo.toml | 2 +- crates/net/downloaders/Cargo.toml | 5 +- crates/net/downloaders/src/metrics.rs | 2 +- crates/net/eth-wire/Cargo.toml | 3 + crates/net/eth-wire/src/p2pstream.rs | 2 +- crates/net/network/Cargo.toml | 5 +- crates/net/network/src/metrics.rs | 2 +- crates/payload/basic/Cargo.toml | 5 +- crates/payload/basic/src/metrics.rs | 5 +- crates/payload/builder/Cargo.toml | 5 +- crates/payload/builder/src/metrics.rs | 2 +- crates/prune/Cargo.toml | 3 + crates/prune/src/lib.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 5 +- crates/rpc/rpc-builder/src/metrics.rs | 2 +- crates/rpc/rpc/Cargo.toml | 6 +- crates/rpc/rpc/src/eth/cache/metrics.rs | 5 +- crates/stages/Cargo.toml | 5 +- crates/stages/src/lib.rs | 2 +- crates/stages/src/metrics/sync_metrics.rs | 5 +- crates/storage/db/Cargo.toml | 5 +- .../storage/db/src/implementation/mdbx/tx.rs | 2 +- crates/tasks/Cargo.toml | 7 +- crates/tasks/src/metrics.rs | 5 +- crates/transaction-pool/Cargo.toml | 5 +- crates/transaction-pool/src/metrics.rs | 2 +- 35 files changed, 113 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 969ed1ddb6b6..319973a585c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3823,15 +3823,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - [[package]] name = "mach2" version = "0.4.1" @@ -3892,17 +3883,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash 0.7.6", - "metrics-macros 0.6.0", - "portable-atomic 0.3.20", -] - [[package]] name = "metrics" version = "0.21.1" @@ -3910,40 +3890,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ "ahash 0.8.3", - "metrics-macros 0.7.0", - "portable-atomic 1.4.2", + "metrics-macros", + "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70" +checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ + "base64 0.21.2", "hyper", "indexmap 1.9.3", "ipnet", - "metrics 0.20.1", + "metrics", "metrics-util", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", "quanta", "thiserror", "tokio", "tracing", ] -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 1.0.109", -] - [[package]] name = "metrics-macros" version = "0.7.0" @@ -3963,7 +3931,7 @@ checksum = "1c93f6ad342d3f7bc14724147e2dbc6eb6fdbe5a832ace16ea23b73618e8cc17" dependencies = [ "libproc", "mach2", - "metrics 0.21.1", + "metrics", "once_cell", "procfs", "rlimit", @@ -3972,20 +3940,18 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" +checksum = "111cb375987443c3de8d503580b536f77dc8416d32db62d9456db5d93bd7ac47" dependencies = [ "aho-corasick 0.7.20", "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.12.3", + "hashbrown 0.13.2", "indexmap 1.9.3", - "metrics 0.20.1", + "metrics", "num_cpus", "ordered-float", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", "quanta", "radix_trie", "sketches-ddsketch", @@ -4360,9 +4326,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "2.10.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +checksum = "7417b1484e3641a8791af3c3123cdc083ac60a0d262a2f281b6125d58917caf4" dependencies = [ "num-traits", ] @@ -4689,15 +4655,6 @@ dependencies = [ "universal-hash", ] -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.4.2", -] - [[package]] name = "portable-atomic" version = "1.4.2" @@ -4926,16 +4883,16 @@ dependencies = [ [[package]] name = "quanta" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", - "mach", + "mach2", "once_cell", "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "web-sys", "winapi", ] @@ -5254,6 +5211,7 @@ dependencies = [ "hyper", "jemalloc-ctl", "jemallocator", + "metrics", "metrics-exporter-prometheus", "metrics-process", "metrics-util", @@ -5327,6 +5285,7 @@ version = "0.1.0-alpha.6" dependencies = [ "futures-core", "futures-util", + "metrics", "reth-metrics", "reth-payload-builder", "reth-primitives", @@ -5346,6 +5305,7 @@ version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "futures", + "metrics", "reth-blockchain-tree", "reth-consensus-common", "reth-db", @@ -5376,6 +5336,7 @@ dependencies = [ "assert_matches", "linked_hash_set", "lru 0.10.1", + "metrics", "parking_lot 0.12.1", "reth-db", "reth-interfaces", @@ -5444,6 +5405,7 @@ dependencies = [ "futures", "heapless", "iai", + "metrics", "modular-bitfield", "page_size", "parity-scale-codec", @@ -5527,6 +5489,7 @@ dependencies = [ "futures", "futures-util", "itertools 0.11.0", + "metrics", "pin-project", "rayon", "reth-db", @@ -5586,6 +5549,7 @@ dependencies = [ "futures", "hex", "hex-literal 0.3.4", + "metrics", "pin-project", "proptest", "proptest-derive", @@ -5689,7 +5653,7 @@ name = "reth-metrics" version = "0.1.0-alpha.6" dependencies = [ "futures", - "metrics 0.20.1", + "metrics", "reth-metrics-derive", "tokio", ] @@ -5698,7 +5662,7 @@ dependencies = [ name = "reth-metrics-derive" version = "0.1.0-alpha.6" dependencies = [ - "metrics 0.20.1", + "metrics", "once_cell", "proc-macro2 1.0.66", "quote 1.0.32", @@ -5749,6 +5713,7 @@ dependencies = [ "humantime-serde", "linked-hash-map", "linked_hash_set", + "metrics", "parking_lot 0.12.1", "pin-project", "rand 0.8.5", @@ -5799,6 +5764,7 @@ name = "reth-payload-builder" version = "0.1.0-alpha.6" dependencies = [ "futures-util", + "metrics", "reth-interfaces", "reth-metrics", "reth-primitives", @@ -5892,6 +5858,7 @@ version = "0.1.0-alpha.6" dependencies = [ "assert_matches", "itertools 0.11.0", + "metrics", "rayon", "reth-db", "reth-interfaces", @@ -5987,6 +5954,7 @@ dependencies = [ "jsonrpsee", "jsonwebtoken", "lazy_static", + "metrics", "pin-project", "rand 0.8.5", "rayon", @@ -6050,6 +6018,7 @@ version = "0.1.0-alpha.6" dependencies = [ "hyper", "jsonrpsee", + "metrics", "reth-beacon-consensus", "reth-interfaces", "reth-ipc", @@ -6131,6 +6100,7 @@ dependencies = [ "criterion", "futures-util", "itertools 0.11.0", + "metrics", "num-traits", "paste", "pin-project", @@ -6163,6 +6133,7 @@ version = "0.1.0-alpha.6" dependencies = [ "dyn-clone", "futures-util", + "metrics", "reth-metrics", "thiserror", "tokio", @@ -6192,6 +6163,7 @@ dependencies = [ "criterion", "fnv", "futures-util", + "metrics", "parking_lot 0.12.1", "paste", "proptest", @@ -8196,12 +8168,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 24e35172428a..9666b44f295b 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -38,7 +38,6 @@ reth-net-nat = { path = "../../crates/net/nat" } reth-payload-builder.workspace = true reth-basic-payload-builder = { path = "../../crates/payload/basic" } reth-discv4 = { path = "../../crates/net/discv4" } -reth-metrics.workspace = true reth-prune = { path = "../../crates/prune" } reth-trie = { path = "../../crates/trie" } @@ -58,9 +57,11 @@ confy = "0.5" toml = { version = "0.7", features = ["display"] } # metrics -metrics-exporter-prometheus = "0.11.0" -metrics-util = "0.14.0" +metrics-exporter-prometheus = "0.12.1" +metrics-util = "0.15.0" metrics-process = "1.0.9" +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation # test vectors generation proptest.workspace = true diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index c1cea799d918..cea3c0633bff 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -7,7 +7,7 @@ use hyper::{ use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; use reth_db::{database::Database, tables, DatabaseEnv}; -use reth_metrics::metrics::{self, absolute_counter, describe_counter, Unit}; +use reth_metrics::metrics::{absolute_counter, describe_counter, Unit}; use std::{convert::Infallible, net::SocketAddr, sync::Arc}; pub(crate) trait Hook: Fn() + Send + Sync {} diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index aacd3b9b380e..12dbd33d2136 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -18,7 +18,6 @@ normal = [ reth-primitives.workspace = true reth-interfaces.workspace = true reth-db = { path = "../storage/db" } -reth-metrics = { workspace = true, features = ["common"] } reth-provider.workspace = true reth-stages = { path = "../stages" } @@ -27,7 +26,11 @@ parking_lot.workspace = true lru = "0.10" tracing.workspace = true -# mics +# metrics +reth-metrics = { workspace = true, features = ["common"] } +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + +# misc aquamarine = "0.3.0" linked_hash_set = "0.1.4" diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs index b49ad3c5b921..acc82bb644ef 100644 --- a/crates/blockchain-tree/src/metrics.rs +++ b/crates/blockchain-tree/src/metrics.rs @@ -1,5 +1,5 @@ use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 6b23c39642b1..4d44fdc28b57 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -18,7 +18,6 @@ reth-provider.workspace = true reth-rpc-types.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true -reth-metrics.workspace = true reth-prune = { path = "../../prune" } # async @@ -26,6 +25,10 @@ tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true futures.workspace = true +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc tracing.workspace = true thiserror.workspace = true diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 6006c047adae..251f8db18c0c 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,5 +1,5 @@ use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; use reth_primitives::{Header, SealedHeader, H256}; diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs index 66a95dc578ad..6daae69eaa73 100644 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ b/crates/consensus/beacon/src/engine/metrics.rs @@ -1,5 +1,5 @@ use reth_metrics::{ - metrics::{self, Counter, Gauge, Histogram}, + metrics::{Counter, Gauge, Histogram}, Metrics, }; diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index 63a14cc8344d..dbc9ab908109 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -13,7 +13,7 @@ description = "reth metrics utilities" reth-metrics-derive = { path = "./metrics-derive" } # metrics -metrics = "0.20.1" +metrics = "0.21.1" # async tokio = { workspace = true, features = ["full"], optional = true } diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml index 23e35c3954b7..4a2b7c2c7b52 100644 --- a/crates/metrics/metrics-derive/Cargo.toml +++ b/crates/metrics/metrics-derive/Cargo.toml @@ -18,6 +18,6 @@ regex = "1.6.0" once_cell = "1.17.0" [dev-dependencies] -metrics = "0.20.1" +metrics = "0.21.1" trybuild = "1.0" serial_test = "0.10" diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 281dbfc6c784..aecd48abdaa6 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -14,7 +14,6 @@ reth-interfaces.workspace = true reth-primitives.workspace = true reth-db = { path = "../../storage/db" } reth-tasks.workspace = true -reth-metrics.workspace = true # async futures.workspace = true @@ -24,6 +23,10 @@ tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc tracing.workspace = true rayon.workspace = true diff --git a/crates/net/downloaders/src/metrics.rs b/crates/net/downloaders/src/metrics.rs index a227f38f8b7b..629243ee66b4 100644 --- a/crates/net/downloaders/src/metrics.rs +++ b/crates/net/downloaders/src/metrics.rs @@ -1,6 +1,6 @@ use reth_interfaces::p2p::error::DownloadError; use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index d3042c94feee..d44ee3b68608 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -24,7 +24,10 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } + +# metrics reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation # used for Chain and builders ethers-core = { workspace = true, default-features = false } diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index bc1e34a48381..8b2b9e0fac45 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -9,7 +9,7 @@ use crate::{ use futures::{Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_codecs::derive_arbitrary; -use reth_metrics::metrics::{self, counter}; +use reth_metrics::metrics::counter; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, hex, diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 438b96cf9602..e46386f29741 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -31,7 +31,6 @@ reth-rlp-derive = { path = "../../rlp/rlp-derive" } reth-tasks.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true -reth-metrics = { workspace = true, features = ["common"] } reth-rpc-types.workspace = true # async/futures @@ -46,6 +45,10 @@ serde = { workspace = true, optional = true } humantime-serde = { version = "1.1", optional = true } serde_json = { workspace = true, optional = true } +# metrics +reth-metrics = { workspace = true, features = ["common"] } +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc auto_impl = "1" aquamarine = "0.3.0" diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 9c527ea01f0a..7245624ca17b 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -1,6 +1,6 @@ use reth_eth_wire::DisconnectReason; use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 874c9842d830..da976dba7090 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -17,7 +17,6 @@ reth-rlp.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true -reth-metrics.workspace = true ## ethereum revm.workspace = true @@ -27,5 +26,9 @@ tokio = { workspace = true, features = ["sync", "time"] } futures-core = "0.3" futures-util.workspace = true +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + ## misc tracing.workspace = true diff --git a/crates/payload/basic/src/metrics.rs b/crates/payload/basic/src/metrics.rs index 560db7b3e162..c7d9bf7968ce 100644 --- a/crates/payload/basic/src/metrics.rs +++ b/crates/payload/basic/src/metrics.rs @@ -1,9 +1,6 @@ //! Metrics for the payload builder impl -use reth_metrics::{ - metrics::{self, Counter}, - Metrics, -}; +use reth_metrics::{metrics::Counter, Metrics}; /// Transaction pool metrics #[derive(Metrics)] diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 664fae7039a2..1a807017fa22 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -15,7 +15,6 @@ reth-rpc-types.workspace = true reth-rlp.workspace = true reth-interfaces.workspace = true reth-revm-primitives = { path = "../../revm/revm-primitives" } -reth-metrics.workspace = true ## ethereum revm-primitives.workspace = true @@ -25,6 +24,10 @@ tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true futures-util.workspace = true +## metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + ## misc thiserror.workspace = true sha2 = { version = "0.10", default-features = false } diff --git a/crates/payload/builder/src/metrics.rs b/crates/payload/builder/src/metrics.rs index c2d042d730ac..a400b74fbf02 100644 --- a/crates/payload/builder/src/metrics.rs +++ b/crates/payload/builder/src/metrics.rs @@ -1,7 +1,7 @@ //! Payload builder service metrics. use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 4ef111c6cda0..79bc0aa95469 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -16,7 +16,10 @@ reth-primitives.workspace = true reth-db.workspace = true reth-provider.workspace = true reth-interfaces.workspace = true + +# metrics reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation # misc tracing.workspace = true diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs index b6264e47ceb3..56999c50c135 100644 --- a/crates/prune/src/lib.rs +++ b/crates/prune/src/lib.rs @@ -2,6 +2,6 @@ mod error; mod metrics; mod pruner; +use crate::metrics::Metrics; pub use error::PrunerError; -use metrics::Metrics; pub use pruner::{BatchSizes, Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index e7f7cf826d55..072cdbb3a29f 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -21,7 +21,6 @@ reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-rpc-types.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true -reth-metrics = { workspace = true, features = ["common"] } # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -29,6 +28,10 @@ tower-http = { version = "0.4", features = ["full"] } tower = { version = "0.4", features = ["full"] } hyper = "0.14" +# metrics +reth-metrics = { workspace = true, features = ["common"] } +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index ee3ed62328a8..9560bc363708 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -3,7 +3,7 @@ use jsonrpsee::{ server::logger::{HttpRequest, Logger, MethodKind, Params, TransportProtocol}, }; use reth_metrics::{ - metrics::{self, Counter, Histogram}, + metrics::{Counter, Histogram}, Metrics, }; use std::{net::SocketAddr, time::Instant}; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 3be464d0afba..bc3df6431c4d 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -22,7 +22,6 @@ reth-network-api.workspace = true reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-revm = { path = "../../revm" } reth-tasks.workspace = true -reth-metrics.workspace = true reth-consensus-common = { path = "../../consensus/common" } reth-rpc-types-compat.workspace = true lazy_static = "*" @@ -52,6 +51,11 @@ tokio-util = "0.7" pin-project.workspace = true rayon.workspace = true +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + +# misc bytes.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } serde = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc/src/eth/cache/metrics.rs b/crates/rpc/rpc/src/eth/cache/metrics.rs index eb1f092e8094..34a41e3c5111 100644 --- a/crates/rpc/rpc/src/eth/cache/metrics.rs +++ b/crates/rpc/rpc/src/eth/cache/metrics.rs @@ -1,7 +1,4 @@ -use reth_metrics::{ - metrics::{self, Gauge}, - Metrics, -}; +use reth_metrics::{metrics::Gauge, Metrics}; #[derive(Metrics)] #[metrics(scope = "rpc.eth_cache")] diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 960ca15aa572..98846248b375 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -21,7 +21,6 @@ reth-interfaces.workspace = true reth-db = { path = "../storage/db" } reth-codecs = { path = "../storage/codecs" } reth-provider.workspace = true -reth-metrics.workspace = true reth-trie = { path = "../trie" } # async @@ -37,6 +36,10 @@ tracing.workspace = true # io serde.workspace = true +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc thiserror.workspace = true aquamarine = "0.3.0" diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index ebfbac6f7934..6b988366e1c7 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -81,7 +81,7 @@ pub mod stages; pub mod sets; +pub use crate::metrics::*; pub use error::*; -pub use metrics::*; pub use pipeline::*; pub use stage::*; diff --git a/crates/stages/src/metrics/sync_metrics.rs b/crates/stages/src/metrics/sync_metrics.rs index 93b1c86eeac6..c242a922791e 100644 --- a/crates/stages/src/metrics/sync_metrics.rs +++ b/crates/stages/src/metrics/sync_metrics.rs @@ -1,7 +1,4 @@ -use reth_metrics::{ - metrics::{self, Gauge}, - Metrics, -}; +use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::stage::StageId; use std::collections::HashMap; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 19b7a8a509e5..433a0ccee9a9 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -14,7 +14,6 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-codecs = { path = "../codecs" } reth-libmdbx = { path = "../libmdbx-rs", optional = true, features = ["return-borrowed"] } -reth-metrics.workspace = true # codecs serde = { workspace = true, default-features = false } @@ -31,6 +30,10 @@ secp256k1 = { workspace = true, default-features = false, features = [ ], optional = true } modular-bitfield = "0.11.2" +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc bytes.workspace = true page_size = "0.4.2" diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 6e8558726c80..052333399f14 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -10,7 +10,7 @@ use crate::{ use parking_lot::RwLock; use reth_interfaces::db::DatabaseWriteOperation; use reth_libmdbx::{ffi::DBI, EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW}; -use reth_metrics::metrics::{self, histogram}; +use reth_metrics::metrics::histogram; use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant}; /// Wrapper for the libmdbx transaction. diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 7a754014bac8..fa9f52b33ed7 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -15,13 +15,14 @@ tokio = { workspace = true, features = ["sync", "rt"] } tracing-futures = "0.2" futures-util.workspace = true +## metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + ## misc tracing.workspace = true thiserror.workspace = true dyn-clone = "1.0" -## rpc/metrics -reth-metrics.workspace = true - [dev-dependencies] tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread", "time", "macros"] } diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index 6d4924045b41..5fa6c252f657 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -1,8 +1,5 @@ //! Task Executor Metrics -use reth_metrics::{ - metrics::{self, Counter}, - Metrics, -}; +use reth_metrics::{metrics::Counter, Metrics}; /// Task Executor Metrics #[derive(Metrics, Clone)] diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 6a2704d8af38..ecea45a1c419 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -22,7 +22,6 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-interfaces.workspace = true reth-rlp.workspace = true -reth-metrics.workspace = true reth-tasks.workspace = true # async/futures @@ -32,6 +31,10 @@ parking_lot.workspace = true tokio = { workspace = true, default-features = false, features = ["sync"] } tokio-stream.workspace = true +# metrics +reth-metrics.workspace = true +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation + # misc aquamarine = "0.3.0" thiserror.workspace = true diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 1a29e4b75f33..574043aa2a8b 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -1,7 +1,7 @@ //! Transaction pool metrics. use reth_metrics::{ - metrics::{self, Counter, Gauge}, + metrics::{Counter, Gauge}, Metrics, }; From 849a47efb86e995ca4be22041581371f50156024 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 17:07:30 +0200 Subject: [PATCH 464/722] fix: prevent node info zero address (#4268) --- crates/net/network/src/network.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 1175eea863e8..42fe5b4e5cf0 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -186,7 +186,17 @@ impl PeersInfo for NetworkHandle { fn local_node_record(&self) -> NodeRecord { let id = *self.peer_id(); - let socket_addr = *self.inner.listener_address.lock(); + let mut socket_addr = *self.inner.listener_address.lock(); + + if socket_addr.ip().is_unspecified() { + // zero address is invalid + if socket_addr.ip().is_ipv4() { + socket_addr.set_ip(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST)); + } else { + socket_addr.set_ip(std::net::IpAddr::V6(std::net::Ipv6Addr::LOCALHOST)); + } + } + NodeRecord::new(socket_addr, id) } } From 466934c8f966237c5f1e5925e25ca77a8833137a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 18 Aug 2023 18:32:01 +0300 Subject: [PATCH 465/722] feat(trie): account proofs (#4249) --- Cargo.lock | 1 + crates/primitives/src/trie/nodes/mod.rs | 2 +- crates/trie/Cargo.toml | 1 + crates/trie/src/errors.rs | 17 ++ crates/trie/src/lib.rs | 5 +- crates/trie/src/proof.rs | 329 ++++++++++++++++++++++++ 6 files changed, 353 insertions(+), 2 deletions(-) create mode 100644 crates/trie/src/proof.rs diff --git a/Cargo.lock b/Cargo.lock index 319973a585c8..2b3866cf227e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6188,6 +6188,7 @@ dependencies = [ "criterion", "derive_more", "hex", + "pretty_assertions", "proptest", "reth-db", "reth-interfaces", diff --git a/crates/primitives/src/trie/nodes/mod.rs b/crates/primitives/src/trie/nodes/mod.rs index df38b58faadc..c4807444d838 100644 --- a/crates/primitives/src/trie/nodes/mod.rs +++ b/crates/primitives/src/trie/nodes/mod.rs @@ -15,7 +15,7 @@ pub use self::{ /// The range of valid child indexes. pub const CHILD_INDEX_RANGE: Range = 0..16; -/// Given an RLP encoded node, returns either RLP(Node) or RLP(keccak(RLP(node))) +/// Given an RLP encoded node, returns either RLP(node) or RLP(keccak(RLP(node))) fn rlp_node(rlp: &[u8]) -> Vec { if rlp.len() < H256::len_bytes() { rlp.to_vec() diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 3c37d4295e8e..ae1840a60617 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -45,6 +45,7 @@ proptest.workspace = true tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } tokio-stream.workspace = true criterion = "0.5" +pretty_assertions = "1.3.0" [features] test-utils = ["triehash"] diff --git a/crates/trie/src/errors.rs b/crates/trie/src/errors.rs index 6b742318a16a..9999de96e6fc 100644 --- a/crates/trie/src/errors.rs +++ b/crates/trie/src/errors.rs @@ -1,3 +1,4 @@ +use reth_primitives::H256; use thiserror::Error; /// State root error. @@ -27,3 +28,19 @@ pub enum StorageRootError { #[error(transparent)] DB(#[from] reth_db::DatabaseError), } + +/// Proof error. +#[derive(Error, PartialEq, Eq, Clone, Debug)] +pub enum ProofError { + /// Leaf account missing + #[error( + "Expected leaf account with key greater or equal to {0:?} is missing from the database" + )] + LeafAccountMissing(H256), + /// Storage root error. + #[error(transparent)] + StorageRootError(#[from] StorageRootError), + /// Internal database error. + #[error(transparent)] + DB(#[from] reth_db::DatabaseError), +} diff --git a/crates/trie/src/lib.rs b/crates/trie/src/lib.rs index 507fb950c6d9..ae89671def47 100644 --- a/crates/trie/src/lib.rs +++ b/crates/trie/src/lib.rs @@ -36,7 +36,10 @@ pub mod hashed_cursor; pub mod walker; mod errors; -pub use errors::{StateRootError, StorageRootError}; +pub use errors::*; + +/// Merkle proof generation. +pub mod proof; /// The implementation of the Merkle Patricia Trie. mod trie; diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs new file mode 100644 index 000000000000..1450843d8c43 --- /dev/null +++ b/crates/trie/src/proof.rs @@ -0,0 +1,329 @@ +use crate::{ + account::EthAccount, + hashed_cursor::{HashedAccountCursor, HashedCursorFactory}, + prefix_set::PrefixSet, + trie_cursor::{AccountTrieCursor, TrieCursor}, + walker::TrieWalker, + ProofError, StorageRoot, +}; +use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx}; +use reth_primitives::{ + keccak256, + trie::{ + nodes::{rlp_hash, BranchNode, LeafNode, CHILD_INDEX_RANGE}, + BranchNodeCompact, HashBuilder, Nibbles, + }, + Address, Bytes, H256, +}; +use reth_rlp::Encodable; + +/// A struct for generating merkle proofs. +/// +/// Proof generator starts with acquiring the trie walker and restoring the root node in the trie. +/// The root node is restored from its immediate children which are stored in the database. +/// +/// Upon encountering the child of the root node that matches the prefix of the requested account's +/// hashed key, the proof generator traverses the path down to the leaf node (excluded as we don't +/// store leaf nodes in the database). The proof generator stops traversing the path upon +/// encountering a branch node with no children matching the hashed key. +/// +/// After traversing the branch node path, the proof generator attempts to restore the leaf node of +/// the target account by looking up the target account info. +/// If the leaf node exists, we encoded it and add it to the proof thus proving **inclusion**. +/// If the leaf node does not exist, we return the proof as is thus proving **exclusion**. +/// +/// After traversing the path, the proof generator continues to restore the root node of the trie +/// until completion. The root node is then inserted at the start of the proof. +pub struct Proof<'a, 'b, TX, H> { + /// A reference to the database transaction. + tx: &'a TX, + /// The factory for hashed cursors. + hashed_cursor_factory: &'b H, +} + +impl<'a, 'tx, TX> Proof<'a, 'a, TX, TX> +where + TX: DbTx<'tx> + HashedCursorFactory<'a>, +{ + /// Create a new [Proof] instance. + pub fn new(tx: &'a TX) -> Self { + Self { tx, hashed_cursor_factory: tx } + } + + /// Generate an account proof from intermediate nodes. + pub fn account_proof(&self, address: Address) -> Result, ProofError> { + let hashed_address = keccak256(address); + let target_nibbles = Nibbles::unpack(hashed_address); + + let mut proof_restorer = ProofRestorer::new(self.hashed_cursor_factory)?; + let mut trie_cursor = + AccountTrieCursor::new(self.tx.cursor_read::()?); + + // Create the walker and immediately advance it from the root key. + let mut walker = TrieWalker::new(&mut trie_cursor, PrefixSet::default()); + walker.advance()?; + + // Create a hash builder to rebuild the root node since it is not available in the database. + let mut root_node_hash_builder = HashBuilder::default(); + + let mut proofs: Vec = Vec::new(); + while let Some(key) = walker.key() { + if target_nibbles.has_prefix(&key) { + debug_assert!(proofs.is_empty(), "Prefix must match a single key"); + proofs = self.traverse_path(walker.cursor, &mut proof_restorer, hashed_address)?; + } + + let value = walker.hash().unwrap(); + let is_in_db_trie = walker.children_are_in_trie(); + root_node_hash_builder.add_branch(key.clone(), value, is_in_db_trie); + walker.advance()?; + } + + // TODO: This is a hack to retrieve the root node from the hash builder. + // We should find a better way. + root_node_hash_builder.set_updates(true); + let _ = root_node_hash_builder.root(); + let (_, mut updates) = root_node_hash_builder.split(); + let root_node = updates.remove(&Nibbles::default()).expect("root node is present"); + + // Restore the root node RLP and prepend it to the proofs result + let root_node_rlp = proof_restorer.restore_branch_node(&Nibbles::default(), root_node)?; + proofs.insert(0, root_node_rlp); + + Ok(proofs) + } + + fn traverse_path>( + &self, + trie_cursor: &mut AccountTrieCursor, + proof_restorer: &mut ProofRestorer<'a, 'a, TX, TX>, + hashed_address: H256, + ) -> Result, ProofError> { + let mut intermediate_proofs = Vec::new(); + + let target = Nibbles::unpack(hashed_address); + let mut current_prefix = target.slice(0, 1); + while let Some((_, node)) = + trie_cursor.seek_exact(current_prefix.hex_data.to_vec().into())? + { + let branch_node_rlp = proof_restorer.restore_branch_node(¤t_prefix, node)?; + intermediate_proofs.push(branch_node_rlp); + + if current_prefix.len() < target.len() { + current_prefix.extend([target.0[current_prefix.len()]]); + } + } + + if let Some(leaf_node_rlp) = + proof_restorer.restore_target_leaf_node(hashed_address, current_prefix.len())? + { + intermediate_proofs.push(leaf_node_rlp); + } + + Ok(intermediate_proofs) + } +} + +struct ProofRestorer<'a, 'b, TX, H> +where + H: HashedCursorFactory<'b>, +{ + /// A reference to the database transaction. + tx: &'a TX, + /// The factory for hashed cursors. + hashed_cursor_factory: &'b H, + /// The hashed account cursor. + hashed_account_cursor: H::AccountCursor, + /// Pre-allocated buffer for account RLP encoding + account_rlp_buf: Vec, + /// Pre-allocated buffer for branch/leaf node RLP encoding + node_rlp_buf: Vec, +} + +impl<'a, 'tx, TX> ProofRestorer<'a, 'a, TX, TX> +where + TX: DbTx<'tx> + HashedCursorFactory<'a>, +{ + fn new(tx: &'a TX) -> Result { + let hashed_account_cursor = tx.hashed_account_cursor()?; + Ok(Self { + tx, + hashed_cursor_factory: tx, + hashed_account_cursor, + account_rlp_buf: Vec::with_capacity(128), + node_rlp_buf: Vec::with_capacity(128), + }) + } + + fn restore_branch_node( + &mut self, + prefix: &Nibbles, + node: BranchNodeCompact, + ) -> Result { + let mut hash_idx = 0; + let mut branch_node_stack = Vec::with_capacity(node.state_mask.count_ones() as usize); + + for child in CHILD_INDEX_RANGE.filter(|ch| node.state_mask.is_bit_set(*ch)) { + if node.hash_mask.is_bit_set(child) { + branch_node_stack.push(rlp_hash(node.hashes[hash_idx])); + hash_idx += 1; + } else { + let child_key = prefix.join(&Nibbles::from_hex(Vec::from([child]))); + let mut child_key_to_seek = child_key.pack(); + child_key_to_seek.resize(32, 0); + + let leaf_node_rlp = + self.restore_leaf_node(H256::from_slice(&child_key_to_seek), child_key.len())?; + branch_node_stack.push(leaf_node_rlp.to_vec()); + } + } + + self.node_rlp_buf.clear(); + BranchNode::new(&branch_node_stack).rlp(node.state_mask, &mut self.node_rlp_buf); + Ok(Bytes::from(self.node_rlp_buf.as_slice())) + } + + /// Restore leaf node. + /// The leaf nodes are always encoded as `RLP(node) or RLP(keccak(RLP(node)))`. + fn restore_leaf_node(&mut self, seek_key: H256, slice_at: usize) -> Result { + let (hashed_address, account) = self + .hashed_account_cursor + .seek(seek_key)? + .ok_or(ProofError::LeafAccountMissing(seek_key))?; + + // Restore account's storage root. + let storage_root = StorageRoot::new_hashed(self.tx, hashed_address) + .with_hashed_cursor_factory(self.hashed_cursor_factory) + .root()?; + + self.account_rlp_buf.clear(); + EthAccount::from(account).with_storage_root(storage_root).encode(&mut self.account_rlp_buf); + + let leaf_node_key = Nibbles::unpack(hashed_address).slice_from(slice_at); + let leaf_node = LeafNode::new(&leaf_node_key, &self.account_rlp_buf); + + self.node_rlp_buf.clear(); + Ok(Bytes::from(leaf_node.rlp(&mut self.node_rlp_buf))) + } + + /// Restore target leaf node. + /// The target node has to have an exactly matching key and is always encoded as `RLP(node)`. + /// The target node might be missing from the trie. + fn restore_target_leaf_node( + &mut self, + seek_key: H256, + slice_at: usize, + ) -> Result, ProofError> { + let (hashed_address, account) = match self.hashed_account_cursor.seek(seek_key)? { + Some(entry) if entry.0 == seek_key => entry, + _ => return Ok(None), + }; + + // Restore account's storage root. + let storage_root = StorageRoot::new_hashed(self.tx, hashed_address) + .with_hashed_cursor_factory(self.hashed_cursor_factory) + .root()?; + + self.account_rlp_buf.clear(); + EthAccount::from(account).with_storage_root(storage_root).encode(&mut self.account_rlp_buf); + + let leaf_node_key = Nibbles::unpack(hashed_address).slice_from(slice_at); + let leaf_node = LeafNode::new(&leaf_node_key, &self.account_rlp_buf); + + self.node_rlp_buf.clear(); + leaf_node.rlp(&mut self.node_rlp_buf); + Ok(Some(Bytes::from(self.node_rlp_buf.as_slice()))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::StateRoot; + use reth_db::{database::Database, test_utils::create_test_rw_db}; + use reth_primitives::{ChainSpec, StorageEntry, MAINNET}; + use reth_provider::{HashingWriter, ProviderFactory}; + use std::{str::FromStr, sync::Arc}; + + fn insert_genesis( + db: DB, + chain_spec: Arc, + ) -> reth_interfaces::Result<()> { + let provider_factory = ProviderFactory::new(db, chain_spec.clone()); + let mut provider = provider_factory.provider_rw()?; + + // Hash accounts and insert them into hashing table. + let genesis = chain_spec.genesis(); + let alloc_accounts = + genesis.alloc.clone().into_iter().map(|(addr, account)| (addr, Some(account.into()))); + provider.insert_account_for_hashing(alloc_accounts).unwrap(); + + let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { + // Only return `Some` if there is storage. + account.storage.map(|storage| { + ( + addr, + storage + .into_iter() + .map(|(key, value)| StorageEntry { key, value: value.into() }), + ) + }) + }); + provider.insert_storage_for_hashing(alloc_storage)?; + + let (_, updates) = StateRoot::new(provider.tx_ref()) + .root_with_updates() + .map_err(Into::::into)?; + updates.flush(provider.tx_mut())?; + + provider.commit()?; + + Ok(()) + } + + #[test] + fn genesis_account_proof() { + // Create test database and insert genesis accounts. + let db = create_test_rw_db(); + insert_genesis(db.clone(), MAINNET.clone()).unwrap(); + + // Address from mainnet genesis allocation. + // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743280").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743280 --block 0` + let expected_account_proof = [ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0dae48f5b47930c28bb116fbd55e52cd47242c71bf55373b55eb2805ee2e4a929a00f1f37f337ec800e2e5974e2e7355f10f1a4832b39b846d916c3597a460e0676a0da8f627bb8fbeead17b318e0a8e4f528db310f591bb6ab2deda4a9f7ca902ab5a0971c662648d58295d0d0aa4b8055588da0037619951217c22052802549d94a2fa0ccc701efe4b3413fd6a61a6c9f40e955af774649a8d9fd212d046a5a39ddbb67a0d607cdb32e2bd635ee7f2f9e07bc94ddbd09b10ec0901b66628e15667aec570ba05b89203dc940e6fa70ec19ad4e01d01849d3a5baa0a8f9c0525256ed490b159fa0b84227d48df68aecc772939a59afa9e1a4ab578f7b698bdb1289e29b6044668ea0fd1c992070b94ace57e48cbf6511a16aa770c645f9f5efba87bbe59d0a042913a0e16a7ccea6748ae90de92f8aef3b3dc248a557b9ac4e296934313f24f7fced5fa042373cf4a00630d94de90d0a23b8f38ced6b0f7cb818b8925fee8f0c2a28a25aa05f89d2161c1741ff428864f7889866484cef622de5023a46e795dfdec336319fa07597a017664526c8c795ce1da27b8b72455c49657113e0455552dbc068c5ba31a0d5be9089012fda2c585a1b961e988ea5efcd3a06988e150a8682091f694b37c5a0f7b0352e38c315b2d9a14d51baea4ddee1770974c806e209355233c3c89dce6ea049bf6e8df0acafd0eff86defeeb305568e44d52d2235cf340ae15c6034e2b24180", + "0xf901f1a0cf67e0f5d5f8d70e53a6278056a14ddca46846f5ef69c7bde6810d058d4a9eda80a06732ada65afd192197fe7ce57792a7f25d26978e64e954b7b84a1f7857ac279da05439f8d011683a6fc07efb90afca198fd7270c795c835c7c85d91402cda992eaa0449b93033b6152d289045fdb0bf3f44926f831566faa0e616b7be1abaad2cb2da031be6c3752bcd7afb99b1bb102baf200f8567c394d464315323a363697646616a0a40e3ed11d906749aa501279392ffde868bd35102db41364d9c601fd651f974aa0044bfa4fe8dd1a58e6c7144da79326e94d1331c0b00373f6ae7f3662f45534b7a098005e3e48db68cb1dc9b9f034ff74d2392028ddf718b0f2084133017da2c2e7a02a62bc40414ee95b02e202a9e89babbabd24bef0abc3fc6dcd3e9144ceb0b725a0239facd895bbf092830390a8676f34b35b29792ae561f196f86614e0448a5792a0a4080f88925daff6b4ce26d188428841bd65655d8e93509f2106020e76d41eefa04918987904be42a6894256ca60203283d1b89139cf21f09f5719c44b8cdbb8f7a06201fc3ef0827e594d953b5e3165520af4fceb719e11cc95fd8d3481519bfd8ca05d0e353d596bd725b09de49c01ede0f29023f0153d7b6d401556aeb525b2959ba0cd367d0679950e9c5f2aa4298fd4b081ade2ea429d71ff390c50f8520e16e30880", + "0xf87180808080808080a0dbee8b33c73b86df839f309f7ac92eee19836e08b39302ffa33921b3c6a09f66a06068b283d51aeeee682b8fb5458354315d0b91737441ede5e137c18b4775174a8080808080a0fe7779c7d58c2fda43eba0a6644043c86ebb9ceb4836f89e30831f23eb059ece8080", + "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ].into_iter().map(Bytes::from_str).collect::, _>>().unwrap(); + + let tx = db.tx().unwrap(); + let proof = Proof::new(&tx).account_proof(target).unwrap(); + pretty_assertions::assert_eq!(proof, expected_account_proof); + } + + #[test] + fn genesis_account_proof_nonexistent() { + // Create test database and insert genesis accounts. + let db = create_test_rw_db(); + insert_genesis(db.clone(), MAINNET.clone()).unwrap(); + + // Address that does not exist in mainnet genesis allocation. + // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743281").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743281 --block 0` + let expected_account_proof = [ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0586b1ddec8db4824154209d355a1989b6c43aa69aba36e9d70c9faa53e7452baa0f86db47d628c73764d74b9ccaed73b8486d97a7731d57008fc9efaf417411860a0d9faed7b9ea107b5d98524246c977e782377f976e34f70717e8b1207f2f9b981a00218f59ccedf797c95e27c56405b9bf16845050fb43e773b66b26bc6992744f5a0dbf396f480c4e024156644adea7c331688d03742369e9d87ab8913bc439ff975a0aced524f39b22c62a5be512ddbca89f0b89b47c311065ccf423dee7013c7ea83a0c06b05f80b237b403adc019c0bc95b5de935021b14a75cbc18509eec60dfd83aa085339d45c4a52b7d523c301701f1ab339964e9c907440cff0a871c98dcf8811ea03ae9f6b8e227ec9be9461f0947b01696f78524c4519a6dee9fba14d209952cf9a0af17f551f9fa1ba4be41d0b342b160e2e8468d7e98a65a2dbf9d5fe5d6928024a0b850ac3bc03e9a309cc59ce5f1ab8db264870a7a22786081753d1db91897b8e6a09e796a4904bd78cb2655b5f346c94350e2d5f0dbf2bc00ac00871cd7ba46b241a0f6f0377427b900529caf32abf32ba1eb93f5f70153aa50b90bf55319a434c252a0725eaf27c8ee07e9b2511a6d6a0d71c649d855e8a9ed26e667903e2e94ae47cba0e4139fb48aa1a524d47f6e0df80314b88b52202d7e853da33c276aa8572283a8a05e9003d54a45935fdebae3513dc7cd16626dc05e1d903ae7f47f1a35aa6e234580", + "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" + ].into_iter().map(Bytes::from_str).collect::, _>>().unwrap(); + + let tx = db.tx().unwrap(); + let proof = Proof::new(&tx).account_proof(target).unwrap(); + pretty_assertions::assert_eq!(proof, expected_account_proof); + } +} From 890eacbe54dc942bfa0252cb014061ced20efd67 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 17:39:06 +0200 Subject: [PATCH 466/722] fix: record geth selfdestructs (#4264) --- .../revm-inspectors/src/tracing/builder/geth.rs | 14 +++++++++++++- crates/revm/revm-inspectors/src/tracing/types.rs | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 91db8c2af2d3..255cfdba96b4 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -118,7 +118,13 @@ impl GethTraceBuilder { let include_logs = opts.with_log.unwrap_or_default(); // first fill up the root let main_trace_node = &self.nodes[0]; - let root_call_frame = main_trace_node.geth_empty_call_frame(include_logs); + let mut root_call_frame = main_trace_node.geth_empty_call_frame(include_logs); + + // selfdestructs are not recorded as individual call traces but are derived from + // the call trace and are added as additional `CallFrame` objects to the parent call + if let Some(selfdestruct) = main_trace_node.geth_selfdestruct_call_trace() { + root_call_frame.calls.push(selfdestruct); + } if opts.only_top_call.unwrap_or_default() { return root_call_frame @@ -129,7 +135,13 @@ impl GethTraceBuilder { // so we can populate the call frame tree by walking up the call tree let mut call_frames = Vec::with_capacity(self.nodes.len()); call_frames.push((0, root_call_frame)); + for (idx, trace) in self.nodes.iter().enumerate().skip(1) { + // selfdestructs are not recorded as individual call traces but are derived from + // the call trace and are added as additional `CallFrame` objects to the parent call + if let Some(selfdestruct) = trace.geth_selfdestruct_call_trace() { + call_frames.last_mut().expect("not empty").1.calls.push(selfdestruct); + } call_frames.push((idx, trace.geth_empty_call_frame(include_logs))); } diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 552e7d33a0c7..6c877830541f 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -386,6 +386,21 @@ impl CallTraceNode { } } + /// If the trace is a selfdestruct, returns the `CallFrame` for a geth call trace + pub(crate) fn geth_selfdestruct_call_trace(&self) -> Option { + if self.is_selfdestruct() { + Some(CallFrame { + typ: "SELFDESTRUCT".to_string(), + from: self.trace.caller, + to: self.trace.selfdestruct_refund_target, + value: Some(self.trace.value), + ..Default::default() + }) + } else { + None + } + } + /// If the trace is a selfdestruct, returns the `TransactionTrace` for a parity trace. pub(crate) fn parity_selfdestruct_trace( &self, From 24632aca6f98cda6c8d425c7a0da4b563787c174 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 18 Aug 2023 16:58:07 +0100 Subject: [PATCH 467/722] feat(pruner): prune receipts based on log emitters during live sync (#4140) --- crates/primitives/src/prune/mod.rs | 47 ++++- crates/prune/src/pruner.rs | 165 ++++++++++++++++++ crates/storage/provider/src/post_state/mod.rs | 2 +- .../src/providers/database/provider.rs | 13 +- 4 files changed, 217 insertions(+), 10 deletions(-) diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index bec2bfa26254..5359c3d9c72f 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -24,24 +24,61 @@ impl ContractLogsPruneConfig { /// Given the `tip` block number, consolidates the structure so it can easily be queried for /// filtering across a range of blocks. /// - /// The [`BlockNumber`] key of the map should be viewed as `PruneMode::Before(block)`. + /// Example: + /// + /// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }` + /// + /// for `tip: 1000`, gets transformed to a map such as: + /// + /// `{ 500: [addrB], 872: [addrA, addrC] }` + /// + /// The [`BlockNumber`] key of the new map should be viewed as `PruneMode::Before(block)`, which + /// makes the previous result equivalent to + /// + /// `{ Before(500): [addrB], Before(872): [addrA, addrC] }` pub fn group_by_block( &self, tip: BlockNumber, + pruned_block: Option, ) -> Result>, PrunePartError> { let mut map = BTreeMap::new(); + let pruned_block = pruned_block.unwrap_or_default(); + for (address, mode) in self.0.iter() { // Getting `None`, means that there is nothing to prune yet, so we need it to include in // the BTreeMap (block = 0), otherwise it will be excluded. // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all // other receipts. - let block = mode - .prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? - .map(|(block, _)| block) - .unwrap_or_default(); + let block = (pruned_block + 1).max( + mode.prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? + .map(|(block, _)| block) + .unwrap_or_default(), + ); map.entry(block).or_insert_with(Vec::new).push(address) } Ok(map) } + + /// Returns the lowest block where we start filtering logs which use `PruneMode::Distance(_)`. + pub fn lowest_block_with_distance( + &self, + tip: BlockNumber, + pruned_block: Option, + ) -> Result, PrunePartError> { + let pruned_block = pruned_block.unwrap_or_default(); + let mut lowest = None; + + for (_, mode) in self.0.iter() { + if let PruneMode::Distance(_) = mode { + if let Some((block, _)) = + mode.prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? + { + lowest = Some(lowest.unwrap_or(u64::MAX).min(block)); + } + } + } + + Ok(lowest.map(|lowest| lowest.max(pruned_block))) + } } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index e39cd5b4c29d..5ae773c12a39 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -13,6 +13,7 @@ use reth_db::{ }; use reth_primitives::{ BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, + MINIMUM_PRUNING_DISTANCE, }; use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, @@ -102,6 +103,15 @@ impl Pruner { .record(part_start.elapsed()) } + if !self.modes.contract_logs_filter.is_empty() { + let part_start = Instant::now(); + self.prune_receipts_by_logs(&provider, tip_block_number)?; + self.metrics + .get_prune_part_metrics(PrunePart::ContractLogs) + .duration_seconds + .record(part_start.elapsed()) + } + if let Some((to_block, prune_mode)) = self.modes.prune_target_block_transaction_lookup(tip_block_number)? { @@ -251,6 +261,7 @@ impl Pruner { "Pruned receipts" ); }, + |_| false, )?; provider.save_prune_checkpoint( @@ -258,6 +269,160 @@ impl Pruner { PruneCheckpoint { block_number: to_block, prune_mode }, )?; + // `PrunePart::Receipts` overrides `PrunePart::ContractLogs`, so we can preemptively + // limit their pruning start point. + provider.save_prune_checkpoint( + PrunePart::ContractLogs, + PruneCheckpoint { block_number: to_block, prune_mode }, + )?; + + Ok(()) + } + + /// Prune receipts up to the provided block by filtering logs. Works as in inclusion list, and + /// removes every receipt not belonging to it. + #[instrument(level = "trace", skip(self, provider), target = "pruner")] + fn prune_receipts_by_logs( + &self, + provider: &DatabaseProviderRW<'_, DB>, + tip_block_number: BlockNumber, + ) -> PrunerResult { + // Contract log filtering removes every receipt possible except the ones in the list. So, + // for the other receipts it's as if they had a `PruneMode::Distance()` of 128. + let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) + .prune_target_block( + tip_block_number, + MINIMUM_PRUNING_DISTANCE, + PrunePart::ContractLogs, + )? + .map(|(bn, _)| bn) + .unwrap_or_default(); + + // Figure out what receipts have already been pruned, so we can have an accurate + // `address_filter` + let pruned = provider + .get_prune_checkpoint(PrunePart::ContractLogs)? + .map(|checkpoint| checkpoint.block_number); + + let address_filter = + self.modes.contract_logs_filter.group_by_block(tip_block_number, pruned)?; + + // Splits all transactions in different block ranges. Each block range will have its own + // filter address list and will check it while going through the table + // + // Example: + // For an `address_filter` such as: + // { block9: [a1, a2], block20: [a3, a4, a5] } + // + // The following structures will be created in the exact order as showed: + // `block_ranges`: [ + // (block0, block8, 0 addresses), + // (block9, block19, 2 addresses), + // (block20, to_block, 5 addresses) + // ] + // `filtered_addresses`: [a1, a2, a3, a4, a5] + // + // The first range will delete all receipts between block0 - block8 + // The second range will delete all receipts between block9 - 19, except the ones with + // emitter logs from these addresses: [a1, a2]. + // The third range will delete all receipts between block20 - to_block, except the ones with + // emitter logs from these addresses: [a1, a2, a3, a4, a5] + let mut block_ranges = vec![]; + let mut blocks_iter = address_filter.iter().peekable(); + let mut filtered_addresses = vec![]; + + while let Some((start_block, addresses)) = blocks_iter.next() { + filtered_addresses.extend_from_slice(addresses); + + // This will clear all receipts before the first appearance of a contract log + if block_ranges.is_empty() { + block_ranges.push((0, *start_block - 1, 0)); + } + + let end_block = + blocks_iter.peek().map(|(next_block, _)| *next_block - 1).unwrap_or(to_block); + + // Addresses in lower block ranges, are still included in the inclusion list for future + // ranges. + block_ranges.push((*start_block, end_block, filtered_addresses.len())); + } + + for (start_block, end_block, num_addresses) in block_ranges { + let range = match self.get_next_tx_num_range_from_checkpoint( + provider, + PrunePart::ContractLogs, + end_block, + )? { + Some(range) => range, + None => { + trace!( + target: "pruner", + block_range = format!("{start_block}..={end_block}"), + "No receipts to prune." + ); + continue + } + }; + + let total = range.clone().count(); + let mut processed = 0; + + provider.prune_table_with_iterator_in_batches::( + range, + self.batch_sizes.receipts, + |rows| { + processed += rows; + trace!( + target: "pruner", + %rows, + block_range = format!("{start_block}..={end_block}"), + progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), + "Pruned receipts" + ); + }, + |receipt| { + num_addresses > 0 && + receipt.logs.iter().any(|log| { + filtered_addresses[..num_addresses].contains(&&log.address) + }) + }, + )?; + + // If this is the last block range, avoid writing an unused checkpoint + if end_block != to_block { + // This allows us to query for the transactions in the next block range with + // [`get_next_tx_num_range_from_checkpoint`]. It's just a temporary intermediate + // checkpoint, which should be adjusted in the end. + provider.save_prune_checkpoint( + PrunePart::ContractLogs, + PruneCheckpoint { + block_number: end_block, + prune_mode: PruneMode::Before(end_block + 1), + }, + )?; + } + } + + // If there are contracts using `PruneMode::Distance(_)` there will be receipts before + // `to_block` that become eligible to be pruned in future runs. Therefore, our + // checkpoint is not actually `to_block`, but the `lowest_block_with_distance` from any + // contract. This ensures that in future pruner runs we can + // prune all these receipts between the previous `lowest_block_with_distance` and the new + // one using `get_next_tx_num_range_from_checkpoint`. + let checkpoint_block = self + .modes + .contract_logs_filter + .lowest_block_with_distance(tip_block_number, pruned)? + .unwrap_or(to_block); + + provider.save_prune_checkpoint( + PrunePart::ContractLogs, + PruneCheckpoint { + block_number: checkpoint_block - 1, + prune_mode: PruneMode::Before(checkpoint_block), + }, + )?; + Ok(()) } diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 4032531fff38..aa1cb7489d37 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -662,7 +662,7 @@ impl PostState { let contract_log_pruner = self .prune_modes .contract_logs_filter - .group_by_block(tip) + .group_by_block(tip, None) .map_err(|e| Error::Custom(e.to_string()))?; // Empty implies that there is going to be diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 0ac58a3b7db2..b013ee697737 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -629,27 +629,32 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { &self, keys: impl IntoIterator, ) -> std::result::Result { - self.prune_table_with_iterator_in_batches::(keys, usize::MAX, |_| {}) + self.prune_table_with_iterator_in_batches::(keys, usize::MAX, |_| {}, |_| false) } /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after /// every `batch_size` pruned rows with number of total rows pruned. /// + /// `skip_filter` can be used to skip pruning certain elements. + /// /// Returns number of rows pruned. pub fn prune_table_with_iterator_in_batches( &self, keys: impl IntoIterator, batch_size: usize, mut batch_callback: impl FnMut(usize), + skip_filter: impl Fn(&T::Value) -> bool, ) -> std::result::Result { let mut cursor = self.tx.cursor_write::()?; let mut deleted = 0; for key in keys { - if cursor.seek_exact(key)?.is_some() { - cursor.delete_current()?; + if let Some((_, value)) = cursor.seek_exact(key)? { + if !skip_filter(&value) { + cursor.delete_current()?; + deleted += 1; + } } - deleted += 1; if deleted % batch_size == 0 { batch_callback(deleted); From 8516fefa28d0235b93d0aa4a13a5ceb84e543193 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 19:45:33 +0200 Subject: [PATCH 468/722] feat: integrate blobstore in pool (#4266) --- bin/reth/src/node/mod.rs | 5 +- crates/transaction-pool/src/lib.rs | 43 +++++++++--- crates/transaction-pool/src/metrics.rs | 10 +++ crates/transaction-pool/src/pool/mod.rs | 67 ++++++++++++++++--- crates/transaction-pool/src/test_utils/mod.rs | 9 +-- examples/network-txpool.rs | 6 +- 6 files changed, 114 insertions(+), 26 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 986f3012f82c..0b1b98cc718e 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -65,7 +65,9 @@ use reth_stages::{ MetricEventsSender, MetricsListener, }; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{TransactionPool, TransactionValidationTaskExecutor}; +use reth_transaction_pool::{ + blobstore::InMemoryBlobStore, TransactionPool, TransactionValidationTaskExecutor, +}; use secp256k1::SecretKey; use std::{ net::{Ipv4Addr, SocketAddr, SocketAddrV4}, @@ -269,6 +271,7 @@ impl NodeCommand { ctx.task_executor.clone(), 1, ), + InMemoryBlobStore::default(), self.txpool.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index fb267c117f9d..154cabe64416 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -29,6 +29,7 @@ //! - providing existing transactions //! - ordering and providing the best transactions for block production //! - monitoring memory footprint and enforce pool size limits +//! - storing blob data for transactions in a separate blobstore on insertion //! //! ## Assumptions //! @@ -86,6 +87,13 @@ //! that provides the `TransactionPool` interface. //! //! +//! ## Blob Transactions +//! +//! Blob transaction can be quite large hence they are stored in a separate blobstore. The pool is +//! responsible for inserting blob data for new transactions into the blobstore. +//! See also [ValidTransaction](validate::ValidTransaction) +//! +//! //! ## Examples //! //! Listen for new transactions and print them: @@ -95,9 +103,11 @@ //! use reth_provider::{ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; +//! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! async fn t(client: C) where C: StateProviderFactory + ChainSpecProvider + Clone + 'static{ //! let pool = Pool::eth_pool( //! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), +//! InMemoryBlobStore::default(), //! Default::default(), //! ); //! let mut transactions = pool.pending_transactions_listener(); @@ -120,6 +130,7 @@ //! use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, StateProviderFactory}; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; +//! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! use reth_transaction_pool::maintain::maintain_transaction_pool_future; //! async fn t(client: C, stream: St) //! where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, @@ -127,6 +138,7 @@ //! { //! let pool = Pool::eth_pool( //! TransactionValidationTaskExecutor::eth(client.clone(), MAINNET.clone(), TokioTaskExecutor::default()), +//! InMemoryBlobStore::default(), //! Default::default(), //! ); //! @@ -151,6 +163,7 @@ use std::{ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; +use crate::blobstore::BlobStore; pub use crate::{ config::{ PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, @@ -194,25 +207,26 @@ pub mod test_utils; /// A shareable, generic, customizable `TransactionPool` implementation. #[derive(Debug)] -pub struct Pool { +pub struct Pool { /// Arc'ed instance of the pool internals - pool: Arc>, + pool: Arc>, } // === impl Pool === -impl Pool +impl Pool where V: TransactionValidator, T: TransactionOrdering::Transaction>, + S: BlobStore, { /// Create a new transaction pool instance. - pub fn new(validator: V, ordering: T, config: PoolConfig) -> Self { - Self { pool: Arc::new(PoolInner::new(validator, ordering, config)) } + pub fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { + Self { pool: Arc::new(PoolInner::new(validator, ordering, blob_store, config)) } } /// Returns the wrapped pool. - pub(crate) fn inner(&self) -> &PoolInner { + pub(crate) fn inner(&self) -> &PoolInner { &self.pool } @@ -261,13 +275,15 @@ where } } -impl +impl Pool< TransactionValidationTaskExecutor>, CoinbaseTipOrdering, + S, > where Client: StateProviderFactory + Clone + 'static, + S: BlobStore, { /// Returns a new [Pool] that uses the default [TransactionValidationTaskExecutor] when /// validating [EthPooledTransaction]s and ords via [CoinbaseTipOrdering] @@ -279,9 +295,11 @@ where /// use reth_primitives::MAINNET; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; + /// use reth_transaction_pool::blobstore::InMemoryBlobStore; /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static{ /// let pool = Pool::eth_pool( /// TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), + /// InMemoryBlobStore::default(), /// Default::default(), /// ); /// # } @@ -290,18 +308,20 @@ where validator: TransactionValidationTaskExecutor< EthTransactionValidator, >, + blob_store: S, config: PoolConfig, ) -> Self { - Self::new(validator, CoinbaseTipOrdering::default(), config) + Self::new(validator, CoinbaseTipOrdering::default(), blob_store, config) } } /// implements the `TransactionPool` interface for various transaction pool API consumers. #[async_trait::async_trait] -impl TransactionPool for Pool +impl TransactionPool for Pool where V: TransactionValidator, T: TransactionOrdering::Transaction>, + S: BlobStore, { type Transaction = T::Transaction; @@ -440,10 +460,11 @@ where } } -impl TransactionPoolExt for Pool +impl TransactionPoolExt for Pool where V: TransactionValidator, T: TransactionOrdering::Transaction>, + S: BlobStore, { #[instrument(skip(self), target = "txpool")] fn set_block_info(&self, info: BlockInfo) { @@ -460,7 +481,7 @@ where } } -impl Clone for Pool { +impl Clone for Pool { fn clone(&self) -> Self { Self { pool: Arc::clone(&self.pool) } } diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 574043aa2a8b..85c3d707f60c 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -38,6 +38,16 @@ pub struct TxPoolMetrics { pub(crate) performed_state_updates: Counter, } +/// Transaction pool blobstore metrics +#[derive(Metrics)] +#[metrics(scope = "transaction_pool")] +pub struct BlobStoreMetrics { + /// Number of failed inserts into the blobstore + pub(crate) blobstore_failed_inserts: Counter, + /// Number of failed deletes into the blobstore + pub(crate) blobstore_failed_deletes: Counter, +} + /// Transaction pool maintenance metrics #[derive(Metrics)] #[metrics(scope = "transaction_pool")] diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 940fff3d0b79..1e70b4528373 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -82,7 +82,7 @@ use crate::{ }; use best::BestTransactions; use parking_lot::{Mutex, RwLock}; -use reth_primitives::{Address, TxHash, H256}; +use reth_primitives::{Address, BlobTransactionSidecar, TxHash, H256}; use std::{ collections::{HashMap, HashSet}, fmt, @@ -90,14 +90,15 @@ use std::{ time::Instant, }; use tokio::sync::mpsc; -use tracing::{debug, trace}; +use tracing::{debug, trace, warn}; mod events; pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; use crate::{ - pool::txpool::UpdateOutcome, traits::PendingTransactionListenerKind, validate::ValidTransaction, + blobstore::BlobStore, metrics::BlobStoreMetrics, pool::txpool::UpdateOutcome, + traits::PendingTransactionListenerKind, validate::ValidTransaction, }; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -110,11 +111,16 @@ pub mod txpool; mod update; /// Transaction pool internals. -pub struct PoolInner { +pub struct PoolInner +where + T: TransactionOrdering, +{ /// Internal mapping of addresses to plain ints. identifiers: RwLock, /// Transaction validation. validator: V, + /// Storage for blob transactions + blob_store: S, /// The internal pool that manages all transactions. pool: RwLock>, /// Pool settings. @@ -125,17 +131,20 @@ pub struct PoolInner { pending_transaction_listener: Mutex>, /// Listeners for new transactions added to the pool. transaction_listener: Mutex>>>, + /// Metrics for the blob store + blob_store_metrics: BlobStoreMetrics, } // === impl PoolInner === -impl PoolInner +impl PoolInner where V: TransactionValidator, T: TransactionOrdering::Transaction>, + S: BlobStore, { /// Create a new transaction pool instance. - pub(crate) fn new(validator: V, ordering: T, config: PoolConfig) -> Self { + pub(crate) fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { Self { identifiers: Default::default(), validator, @@ -144,6 +153,8 @@ where pending_transaction_listener: Default::default(), transaction_listener: Default::default(), config, + blob_store, + blob_store_metrics: Default::default(), } } @@ -316,7 +327,8 @@ where let transaction_id = TransactionId::new(sender_id, transaction.nonce()); let encoded_length = transaction.encoded_length(); - let (transaction, _maybe_sidecar) = match transaction { + // split the valid transaction and the blob sidecar if it has any + let (transaction, maybe_sidecar) = match transaction { ValidTransaction::Valid(tx) => (tx, None), ValidTransaction::ValidWithSidecar { transaction, sidecar } => { debug_assert!( @@ -339,6 +351,16 @@ where let added = self.pool.write().add_transaction(tx, balance, state_nonce)?; let hash = *added.hash(); + // transaction was successfully inserted into the pool + if let Some(sidecar) = maybe_sidecar { + // store the sidecar in the blob store + self.insert_blob(hash, sidecar); + } + if let Some(replaced) = added.replaced_blob_transaction() { + // delete the replaced transaction from the blob store + self.delete_blob(replaced); + } + // Notify about new pending transactions if let Some(pending) = added.as_pending() { self.on_new_pending_transaction(pending); @@ -625,9 +647,25 @@ where pub(crate) fn discard_worst(&self) -> HashSet { self.pool.write().discard_worst().into_iter().map(|tx| *tx.hash()).collect() } + + /// Inserts a blob transaction into the blob store + fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) { + if let Err(err) = self.blob_store.insert(hash, blob) { + warn!(target: "txpool", ?err, "[{:?}] failed to insert blob", hash); + self.blob_store_metrics.blobstore_failed_inserts.increment(1); + } + } + + /// Delete a blob from the blob store + fn delete_blob(&self, blob: TxHash) { + if let Err(err) = self.blob_store.delete(blob) { + warn!(target: "txpool", ?err, "[{:?}] failed to delete blobs", blob); + self.blob_store_metrics.blobstore_failed_deletes.increment(1); + } + } } -impl fmt::Debug for PoolInner { +impl fmt::Debug for PoolInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PoolInner").field("config", &self.config).finish_non_exhaustive() } @@ -723,6 +761,19 @@ impl AddedTransaction { } } + /// Returns the the replaced transaction if there was one + pub(crate) fn replaced(&self) -> Option<&Arc>> { + match self { + AddedTransaction::Pending(tx) => tx.replaced.as_ref(), + AddedTransaction::Parked { replaced, .. } => replaced.as_ref(), + } + } + + /// Returns the hash of the replaced transaction if it is a blob transaction. + pub(crate) fn replaced_blob_transaction(&self) -> Option { + self.replaced().filter(|tx| tx.transaction.is_eip4844()).map(|tx| *tx.transaction.hash()) + } + /// Returns the hash of the transaction pub(crate) fn hash(&self) -> &TxHash { match self { diff --git a/crates/transaction-pool/src/test_utils/mod.rs b/crates/transaction-pool/src/test_utils/mod.rs index e93aabdb6cef..064e867b3537 100644 --- a/crates/transaction-pool/src/test_utils/mod.rs +++ b/crates/transaction-pool/src/test_utils/mod.rs @@ -5,15 +5,16 @@ mod mock; mod pool; use crate::{ - noop::MockTransactionValidator, Pool, PoolTransaction, TransactionOrigin, - TransactionValidationOutcome, TransactionValidator, + blobstore::InMemoryBlobStore, noop::MockTransactionValidator, Pool, PoolTransaction, + TransactionOrigin, TransactionValidationOutcome, TransactionValidator, }; use async_trait::async_trait; pub use mock::*; use std::{marker::PhantomData, sync::Arc}; /// A [Pool] used for testing -pub type TestPool = Pool, MockOrdering>; +pub type TestPool = + Pool, MockOrdering, InMemoryBlobStore>; /// Returns a new [Pool] used for testing purposes pub fn testing_pool() -> TestPool { @@ -23,5 +24,5 @@ pub fn testing_pool() -> TestPool { pub fn testing_pool_with_validator( validator: MockTransactionValidator, ) -> TestPool { - Pool::new(validator, MockOrdering::default(), Default::default()) + Pool::new(validator, MockOrdering::default(), InMemoryBlobStore::default(), Default::default()) } diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index 23580d1457b7..08d69e964cf4 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -10,8 +10,9 @@ use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ - validate::ValidTransaction, CoinbaseTipOrdering, EthPooledTransaction, PoolTransaction, - TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, + blobstore::InMemoryBlobStore, validate::ValidTransaction, CoinbaseTipOrdering, + EthPooledTransaction, PoolTransaction, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, }; #[tokio::main] @@ -25,6 +26,7 @@ async fn main() -> eyre::Result<()> { let pool = reth_transaction_pool::Pool::new( OkValidator::default(), CoinbaseTipOrdering::default(), + InMemoryBlobStore::default(), Default::default(), ); From 6077edf42aa5a498030d69c72f0a41481406cdd1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 19:57:42 +0200 Subject: [PATCH 469/722] feat: add recovered wrapper type and eth pool conversions (#4267) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/primitives/src/lib.rs | 8 +- crates/primitives/src/transaction/eip1559.rs | 25 ++++- crates/primitives/src/transaction/eip2930.rs | 25 ++++- crates/primitives/src/transaction/eip4844.rs | 23 +++++ crates/primitives/src/transaction/legacy.rs | 56 +++++++++++- crates/primitives/src/transaction/mod.rs | 96 +++++--------------- crates/primitives/src/transaction/pooled.rs | 90 +++++++++++++++++- crates/transaction-pool/src/traits.rs | 55 +++++++++-- 8 files changed, 288 insertions(+), 90 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 8d0ffb023615..fd973ff74191 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -92,10 +92,10 @@ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer, sign_message}, AccessList, AccessListItem, AccessListWithGasUsed, BlobTransaction, BlobTransactionSidecar, FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, - PooledTransactionsElement, Signature, Transaction, TransactionKind, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, - TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, Signature, Transaction, + TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, + EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index c446da2e1607..4bc5cc4754b8 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,5 +1,6 @@ use super::access_list::AccessList; -use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; +use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, H256}; +use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use std::mem; @@ -188,6 +189,28 @@ impl TxEip1559 { self.access_list.size() + // access_list self.input.len() // input } + + /// Encodes the legacy transaction in RLP for signing. + pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + out.put_u8(self.tx_type() as u8); + Header { list: true, payload_length: self.fields_len() }.encode(out); + self.encode_fields(out); + } + + /// Outputs the length of the signature RLP encoding for the transaction. + pub(crate) fn payload_len_for_signature(&self) -> usize { + let payload_length = self.fields_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + 1 + length_of_length(payload_length) + payload_length + } + + /// Outputs the signature hash of the transaction by first encoding without a signature, then + /// hashing. + pub(crate) fn signature_hash(&self) -> H256 { + let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + self.encode_for_signing(&mut buf); + keccak256(&buf) + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 3f2a8f8fa6b7..78e187889105 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,5 +1,6 @@ use super::access_list::AccessList; -use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; +use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, H256}; +use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use std::mem; @@ -153,6 +154,28 @@ impl TxEip2930 { pub(crate) fn tx_type(&self) -> TxType { TxType::EIP2930 } + + /// Encodes the legacy transaction in RLP for signing. + pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + out.put_u8(self.tx_type() as u8); + Header { list: true, payload_length: self.fields_len() }.encode(out); + self.encode_fields(out); + } + + /// Outputs the length of the signature RLP encoding for the transaction. + pub(crate) fn payload_len_for_signature(&self) -> usize { + let payload_length = self.fields_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + 1 + length_of_length(payload_length) + payload_length + } + + /// Outputs the signature hash of the transaction by first encoding without a signature, then + /// hashing. + pub(crate) fn signature_hash(&self) -> H256 { + let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + self.encode_for_signing(&mut buf); + keccak256(&buf) + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 90246031e500..3092f284f171 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -9,6 +9,7 @@ use crate::{ kzg_to_versioned_hash, Bytes, ChainId, Signature, Transaction, TransactionKind, TransactionSigned, TxHash, TxType, EIP4844_TX_TYPE_ID, H256, }; +use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header}; use serde::{Deserialize, Serialize}; @@ -226,6 +227,28 @@ impl TxEip4844 { pub(crate) fn tx_type(&self) -> TxType { TxType::EIP4844 } + + /// Encodes the legacy transaction in RLP for signing. + pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + out.put_u8(self.tx_type() as u8); + Header { list: true, payload_length: self.fields_len() }.encode(out); + self.encode_fields(out); + } + + /// Outputs the length of the signature RLP encoding for the transaction. + pub(crate) fn payload_len_for_signature(&self) -> usize { + let payload_length = self.fields_len(); + // 'transaction type byte length' + 'header length' + 'payload length' + 1 + length_of_length(payload_length) + payload_length + } + + /// Outputs the signature hash of the transaction by first encoding without a signature, then + /// hashing. + pub(crate) fn signature_hash(&self) -> H256 { + let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + self.encode_for_signing(&mut buf); + keccak256(&buf) + } } /// An error that can occur when validating a [BlobTransaction]. diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index fcbb627268b4..ec61490a54c5 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,4 +1,5 @@ -use crate::{Bytes, ChainId, Signature, TransactionKind, TxType}; +use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, H256}; +use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; use reth_rlp::{length_of_length, Encodable, Header}; use std::mem; @@ -105,6 +106,59 @@ impl TxLegacy { pub(crate) fn tx_type(&self) -> TxType { TxType::Legacy } + + /// Encodes EIP-155 arguments into the desired buffer. Only encodes values for legacy + /// transactions. + pub(crate) fn encode_eip155_fields(&self, out: &mut dyn bytes::BufMut) { + // if this is a legacy transaction without a chain ID, it must be pre-EIP-155 + // and does not need to encode the chain ID for the signature hash encoding + if let Some(id) = self.chain_id { + // EIP-155 encodes the chain ID and two zeroes + id.encode(out); + 0x00u8.encode(out); + 0x00u8.encode(out); + } + } + + /// Outputs the length of EIP-155 fields. Only outputs a non-zero value for EIP-155 legacy + /// transactions. + pub(crate) fn eip155_fields_len(&self) -> usize { + if let Some(id) = self.chain_id { + // EIP-155 encodes the chain ID and two zeroes, so we add 2 to the length of the chain + // ID to get the length of all 3 fields + // len(chain_id) + (0x00) + (0x00) + id.length() + 2 + } else { + // this is either a pre-EIP-155 legacy transaction or a typed transaction + 0 + } + } + + /// Encodes the legacy transaction in RLP for signing, including the EIP-155 fields if possible. + pub(crate) fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + Header { list: true, payload_length: self.fields_len() + self.eip155_fields_len() } + .encode(out); + self.encode_fields(out); + self.encode_eip155_fields(out); + } + + /// Outputs the length of the signature RLP encoding for the transaction, including the length + /// of the EIP-155 fields if possible. + pub(crate) fn payload_len_for_signature(&self) -> usize { + let payload_length = self.fields_len() + self.eip155_fields_len(); + // 'header length' + 'payload length' + length_of_length(payload_length) + payload_length + } + + /// Outputs the signature hash of the transaction by first encoding without a signature, then + /// hashing. + /// + /// See [Self::encode_for_signing] for more information on the encoding format. + pub(crate) fn signature_hash(&self) -> H256 { + let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); + self.encode_for_signing(&mut buf); + keccak256(&buf) + } } #[cfg(test)] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 962bd7e5bcdc..115bc83aea1f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -10,9 +10,7 @@ pub use meta::TransactionMeta; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; -use reth_rlp::{ - length_of_length, Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, -}; +use reth_rlp::{Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; pub use signature::Signature; use std::mem; @@ -24,7 +22,7 @@ pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; pub use eip4844::{BlobTransaction, BlobTransactionSidecar, TxEip4844}; pub use legacy::TxLegacy; -pub use pooled::PooledTransactionsElement; +pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; mod access_list; mod eip1559; @@ -100,9 +98,12 @@ impl Transaction { /// Heavy operation that return signature hash over rlp encoded transaction. /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> H256 { - let mut buf = BytesMut::new(); - self.encode(&mut buf); - keccak256(&buf) + match self { + Transaction::Legacy(tx) => tx.signature_hash(), + Transaction::Eip2930(tx) => tx.signature_hash(), + Transaction::Eip1559(tx) => tx.signature_hash(), + Transaction::Eip4844(tx) => tx.signature_hash(), + } } /// Get chain_id. @@ -316,54 +317,6 @@ impl Transaction { } } - /// Encodes EIP-155 arguments into the desired buffer. Only encodes values for legacy - /// transactions. - pub(crate) fn encode_eip155_fields(&self, out: &mut dyn bytes::BufMut) { - // if this is a legacy transaction without a chain ID, it must be pre-EIP-155 - // and does not need to encode the chain ID for the signature hash encoding - if let Transaction::Legacy(TxLegacy { chain_id: Some(id), .. }) = self { - // EIP-155 encodes the chain ID and two zeroes - id.encode(out); - 0x00u8.encode(out); - 0x00u8.encode(out); - } - } - - /// Outputs the length of EIP-155 fields. Only outputs a non-zero value for EIP-155 legacy - /// transactions. - pub(crate) fn eip155_fields_len(&self) -> usize { - if let Transaction::Legacy(TxLegacy { chain_id: Some(id), .. }) = self { - // EIP-155 encodes the chain ID and two zeroes, so we add 2 to the length of the chain - // ID to get the length of all 3 fields - // len(chain_id) + (0x00) + (0x00) - id.length() + 2 - } else { - // this is either a pre-EIP-155 legacy transaction or a typed transaction - 0 - } - } - - /// Outputs the length of the transaction's fields, without a RLP header or length of the - /// eip155 fields. - pub(crate) fn fields_len(&self) -> usize { - match self { - Transaction::Legacy(legacy_tx) => legacy_tx.fields_len(), - Transaction::Eip2930(access_list_tx) => access_list_tx.fields_len(), - Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.fields_len(), - Transaction::Eip4844(blob_tx) => blob_tx.fields_len(), - } - } - - /// Encodes only the transaction's fields into the desired buffer, without a RLP header. - pub(crate) fn encode_fields(&self, out: &mut dyn bytes::BufMut) { - match self { - Transaction::Legacy(legacy_tx) => legacy_tx.encode_fields(out), - Transaction::Eip2930(access_list_tx) => access_list_tx.encode_fields(out), - Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.encode_fields(out), - Transaction::Eip4844(blob_tx) => blob_tx.encode_fields(out), - } - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { @@ -496,32 +449,27 @@ impl Default for Transaction { impl Encodable for Transaction { fn encode(&self, out: &mut dyn bytes::BufMut) { match self { - Transaction::Legacy { .. } => { - Header { list: true, payload_length: self.fields_len() + self.eip155_fields_len() } - .encode(out); - self.encode_fields(out); - self.encode_eip155_fields(out); + Transaction::Legacy(legacy_tx) => { + legacy_tx.encode_for_signing(out); } - _ => { - out.put_u8(self.tx_type() as u8); - Header { list: true, payload_length: self.fields_len() }.encode(out); - self.encode_fields(out); + Transaction::Eip2930(access_list_tx) => { + access_list_tx.encode_for_signing(out); + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.encode_for_signing(out); + } + Transaction::Eip4844(blob_tx) => { + blob_tx.encode_for_signing(out); } } } fn length(&self) -> usize { match self { - Transaction::Legacy { .. } => { - let payload_length = self.fields_len() + self.eip155_fields_len(); - // 'header length' + 'payload length' - length_of_length(payload_length) + payload_length - } - _ => { - let payload_length = self.fields_len(); - // 'transaction type byte length' + 'header length' + 'payload length' - 1 + length_of_length(payload_length) + payload_length - } + Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), + Transaction::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), + Transaction::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), + Transaction::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), } } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ebaf5ce144de..bcc870a2c0f2 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,10 +1,11 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. use crate::{ - BlobTransaction, Bytes, Signature, Transaction, TransactionSigned, TxEip1559, TxEip2930, - TxHash, TxLegacy, EIP4844_TX_TYPE_ID, + Address, BlobTransaction, Bytes, Signature, Transaction, TransactionSigned, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxHash, TxLegacy, EIP4844_TX_TYPE_ID, H256, }; use bytes::Buf; +use derive_more::{AsRef, Deref}; use reth_rlp::{Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE}; use serde::{Deserialize, Serialize}; @@ -45,6 +46,46 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { + /// Heavy operation that return signature hash over rlp encoded transaction. + /// It is only for signature signing or signer recovery. + pub fn signature_hash(&self) -> H256 { + match self { + Self::Legacy { transaction, .. } => transaction.signature_hash(), + Self::Eip2930 { transaction, .. } => transaction.signature_hash(), + Self::Eip1559 { transaction, .. } => transaction.signature_hash(), + Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), + } + } + + /// Returns the signature of the transaction. + pub fn signature(&self) -> &Signature { + match self { + Self::Legacy { signature, .. } => signature, + Self::Eip2930 { signature, .. } => signature, + Self::Eip1559 { signature, .. } => signature, + Self::BlobTransaction(blob_tx) => &blob_tx.signature, + } + } + + /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + pub fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + self.signature().recover_signer(signature_hash) + } + + /// Tries to recover signer and return [`PooledTransactionsElementEcRecovered`]. + /// + /// Returns `Err(Self)` if the transaction's signature is invalid, see also + /// [Self::recover_signer]. + pub fn try_into_ecrecovered(self) -> Result { + match self.recover_signer() { + None => Err(self), + Some(signer) => Ok(PooledTransactionsElementEcRecovered { transaction: self, signer }), + } + } + /// Decodes the "raw" format of transaction (e.g. `eth_sendRawTransaction`). /// /// The raw transaction is either a legacy transaction or EIP-2718 typed transaction @@ -117,6 +158,12 @@ impl PooledTransactionsElement { } } + /// Create [`TransactionSignedEcRecovered`] by converting this transaction into + /// [`TransactionSigned`] and [`Address`] of the signer. + pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { + TransactionSignedEcRecovered::from_signed_transaction(self.into_transaction(), signer) + } + /// Returns the inner [TransactionSigned]. pub fn into_transaction(self) -> TransactionSigned { match self { @@ -301,3 +348,42 @@ impl From for PooledTransactionsElement { } } } + +/// A signed pooled transaction with recovered signer. +#[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] +pub struct PooledTransactionsElementEcRecovered { + /// Signer of the transaction + signer: Address, + /// Signed transaction + #[deref] + #[as_ref] + transaction: PooledTransactionsElement, +} + +// === impl PooledTransactionsElementEcRecovered === + +impl PooledTransactionsElementEcRecovered { + /// Signer of transaction recovered from signature + pub fn signer(&self) -> Address { + self.signer + } + + /// Transform back to [`PooledTransactionsElement`] + pub fn into_transaction(self) -> PooledTransactionsElement { + self.transaction + } + + /// Desolve Self to its component + pub fn into_components(self) -> (PooledTransactionsElement, Address) { + (self.transaction, self.signer) + } + + /// Create [`TransactionSignedEcRecovered`] from [`PooledTransactionsElement`] and [`Address`] + /// of the signer. + pub fn from_signed_transaction( + transaction: PooledTransactionsElement, + signer: Address, + ) -> Self { + Self { transaction, signer } + } +} diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 4d740da9ab53..2169c700391f 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -6,9 +6,9 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_primitives::{ - Address, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, Transaction, - TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - H256, U256, + Address, BlobTransactionSidecar, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, Transaction, TransactionKind, + TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, U256, }; use reth_rlp::Encodable; use std::{ @@ -569,7 +569,7 @@ pub trait PoolTransaction: /// /// This type is essentially a wrapper around [TransactionSignedEcRecovered] with additional fields /// derived from the transaction that are frequently used by the pools for ordering. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct EthPooledTransaction { /// EcRecovered transaction info pub(crate) transaction: TransactionSignedEcRecovered, @@ -577,21 +577,41 @@ pub struct EthPooledTransaction { /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. pub(crate) cost: U256, - // TODO optional sidecar + + /// The blob side car this transaction + pub(crate) blob_sidecar: EthBlobTransactionSidecar, +} + +/// Represents the blob sidecar of the [EthPooledTransaction]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum EthBlobTransactionSidecar { + /// This transaction does not have a blob sidecar + None, + /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// + /// It was either extracted after being inserted into the pool or re-injected after reorg + /// without the blob sidecar + Missing, + /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + Present(BlobTransactionSidecar), } impl EthPooledTransaction { /// Create new instance of [Self]. pub fn new(transaction: TransactionSignedEcRecovered) -> Self { + let mut blob_sidecar = EthBlobTransactionSidecar::None; let gas_cost = match &transaction.transaction { Transaction::Legacy(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), Transaction::Eip2930(t) => U256::from(t.gas_price) * U256::from(t.gas_limit), Transaction::Eip1559(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), - Transaction::Eip4844(t) => U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit), + Transaction::Eip4844(t) => { + blob_sidecar = EthBlobTransactionSidecar::Missing; + U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit) + } }; let cost = gas_cost + U256::from(transaction.value()); - Self { transaction, cost } + Self { transaction, cost, blob_sidecar } } /// Return the reference to the underlying transaction. @@ -600,6 +620,27 @@ impl EthPooledTransaction { } } +/// Conversion from the network transaction type to the pool transaction type. +impl From for EthPooledTransaction { + fn from(tx: PooledTransactionsElementEcRecovered) -> Self { + let (tx, signer) = tx.into_components(); + match tx { + PooledTransactionsElement::BlobTransaction(tx) => { + // include the blob sidecar + let (tx, blob) = tx.into_parts(); + let tx = TransactionSignedEcRecovered::from_signed_transaction(tx, signer); + let mut pooled = EthPooledTransaction::new(tx); + pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); + pooled + } + tx => { + // no blob sidecar + EthPooledTransaction::new(tx.into_ecrecovered_transaction(signer)) + } + } + } +} + impl PoolTransaction for EthPooledTransaction { /// Returns hash of the transaction. fn hash(&self) -> &TxHash { From 9d46ab48637b96ef5b32fb013a5d920e793659cf Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 18 Aug 2023 14:29:53 -0400 Subject: [PATCH 470/722] feat: validate engine cancun fields based on method version (#4256) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 112 ++++++++++++++++---- crates/rpc/rpc-engine-api/src/error.rs | 17 +++ crates/rpc/rpc-engine-api/src/lib.rs | 3 + crates/rpc/rpc-engine-api/src/payload.rs | 56 ++++++++++ 4 files changed, 166 insertions(+), 22 deletions(-) create mode 100644 crates/rpc/rpc-engine-api/src/payload.rs diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 56ddc71cdf90..057b3954ee25 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,4 +1,6 @@ -use crate::{EngineApiError, EngineApiMessageVersion, EngineApiResult}; +use crate::{ + payload::PayloadOrAttributes, EngineApiError, EngineApiMessageVersion, EngineApiResult, +}; use async_trait::async_trait; use jsonrpsee_core::RpcResult; use reth_beacon_consensus::BeaconConsensusEngineHandle; @@ -69,10 +71,9 @@ where &self, payload: ExecutionPayload, ) -> EngineApiResult { - self.validate_withdrawals_presence( + self.validate_version_specific_fields( EngineApiMessageVersion::V1, - payload.timestamp.as_u64(), - payload.withdrawals.is_some(), + PayloadOrAttributes::from_execution_payload(&payload, None), )?; Ok(self.inner.beacon_consensus.new_payload(payload).await?) } @@ -82,11 +83,26 @@ where &self, payload: ExecutionPayload, ) -> EngineApiResult { - self.validate_withdrawals_presence( + self.validate_version_specific_fields( EngineApiMessageVersion::V2, - payload.timestamp.as_u64(), - payload.withdrawals.is_some(), + PayloadOrAttributes::from_execution_payload(&payload, None), + )?; + Ok(self.inner.beacon_consensus.new_payload(payload).await?) + } + + /// See also + pub async fn new_payload_v3( + &self, + payload: ExecutionPayload, + _versioned_hashes: Vec, + parent_beacon_block_root: H256, + ) -> EngineApiResult { + self.validate_version_specific_fields( + EngineApiMessageVersion::V3, + PayloadOrAttributes::from_execution_payload(&payload, Some(parent_beacon_block_root)), )?; + + // TODO: validate versioned hashes and figure out what to do with parent_beacon_block_root Ok(self.inner.beacon_consensus.new_payload(payload).await?) } @@ -102,11 +118,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_withdrawals_presence( - EngineApiMessageVersion::V1, - attrs.timestamp.as_u64(), - attrs.withdrawals.is_some(), - )?; + self.validate_version_specific_fields(EngineApiMessageVersion::V1, attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } @@ -121,11 +133,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_withdrawals_presence( - EngineApiMessageVersion::V2, - attrs.timestamp.as_u64(), - attrs.withdrawals.is_some(), - )?; + self.validate_version_specific_fields(EngineApiMessageVersion::V2, attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } @@ -140,11 +148,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_withdrawals_presence( - EngineApiMessageVersion::V3, - attrs.timestamp.as_u64(), - attrs.withdrawals.is_some(), - )?; + self.validate_version_specific_fields(EngineApiMessageVersion::V3, attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) @@ -353,6 +357,70 @@ where Ok(()) } + + /// Validate the presence of the `parentBeaconBlockRoot` field according to the payload + /// timestamp. + /// + /// After Cancun, `parentBeaconBlockRoot` field must be [Some]. + /// Before Cancun, `parentBeaconBlockRoot` field must be [None]. + /// + /// If the payload attribute's timestamp is before the Cancun fork and the engine API message + /// version is V3, then this will return [EngineApiError::UnsupportedFork]. + /// + /// If the engine API message version is V1 or V2, and the payload attribute's timestamp is + /// post-Cancun, then this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// + /// Implements the following Engine API spec rule: + /// + /// * Client software MUST return `-38005: Unsupported fork` error if the timestamp of the + /// payload does not fall within the time frame of the Cancun fork. + fn validate_parent_beacon_block_root_presence( + &self, + version: EngineApiMessageVersion, + timestamp: u64, + has_parent_beacon_block_root: bool, + ) -> EngineApiResult<()> { + let is_cancun = self.inner.chain_spec.fork(Hardfork::Cancun).active_at_timestamp(timestamp); + + match version { + EngineApiMessageVersion::V1 | EngineApiMessageVersion::V2 => { + if has_parent_beacon_block_root { + return Err(EngineApiError::ParentBeaconBlockRootNotSupportedBeforeV3) + } + if is_cancun { + return Err(EngineApiError::NoParentBeaconBlockRootPostCancun) + } + } + EngineApiMessageVersion::V3 => { + if !is_cancun { + return Err(EngineApiError::UnsupportedFork) + } else if !has_parent_beacon_block_root { + return Err(EngineApiError::NoParentBeaconBlockRootPostCancun) + } + } + }; + + Ok(()) + } + + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes + /// and the message version. + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_>, + ) -> EngineApiResult<()> { + self.validate_withdrawals_presence( + version, + payload_or_attrs.timestamp(), + payload_or_attrs.withdrawals().is_some(), + )?; + self.validate_parent_beacon_block_root_presence( + version, + payload_or_attrs.timestamp(), + payload_or_attrs.parent_beacon_block_root().is_some(), + ) + } } #[async_trait] diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 26c29916c300..a5bf6b111d3b 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -7,6 +7,8 @@ use thiserror::Error; /// The Engine API result type pub type EngineApiResult = Result; +/// Payload unsupported fork code. +pub const UNSUPPORTED_FORK_CODE: i32 = -38005; /// Payload unknown error code. pub const UNKNOWN_PAYLOAD_CODE: i32 = -38001; /// Request too large error code. @@ -34,6 +36,10 @@ pub enum EngineApiError { /// requested number of items count: u64, }, + /// Thrown if `PayloadAttributes` provided in engine_forkchoiceUpdated before V3 contains a + /// parent beacon block root + #[error("parent beacon block root not supported before V3")] + ParentBeaconBlockRootNotSupportedBeforeV3, /// Thrown if engine_forkchoiceUpdatedV1 contains withdrawals #[error("withdrawals not supported in V1")] WithdrawalsNotSupportedInV1, @@ -43,6 +49,14 @@ pub enum EngineApiError { /// Thrown if engine_forkchoiceUpdated contains withdrawals before Shanghai #[error("withdrawals pre-shanghai")] HasWithdrawalsPreShanghai, + /// Thrown if the `PayloadAttributes` provided in engine_forkchoiceUpdated contains no parent + /// beacon block root after Cancun + #[error("no parent beacon block root post-cancun")] + NoParentBeaconBlockRootPostCancun, + /// Thrown if `PayloadAttributes` were provided with a timestamp, but the version of the engine + /// method called is meant for a fork that occurs after the provided timestamp. + #[error("unsupported fork")] + UnsupportedFork, /// Terminal total difficulty mismatch during transition configuration exchange. #[error( "Invalid transition terminal total difficulty. Execution: {execution}. Consensus: {consensus}" @@ -82,10 +96,13 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { let code = match error { EngineApiError::InvalidBodiesRange { .. } | EngineApiError::WithdrawalsNotSupportedInV1 | + EngineApiError::ParentBeaconBlockRootNotSupportedBeforeV3 | + EngineApiError::NoParentBeaconBlockRootPostCancun | EngineApiError::NoWithdrawalsPostShanghai | EngineApiError::HasWithdrawalsPreShanghai => INVALID_PARAMS_CODE, EngineApiError::UnknownPayload => UNKNOWN_PAYLOAD_CODE, EngineApiError::PayloadRequestTooLarge { .. } => REQUEST_TOO_LARGE_CODE, + EngineApiError::UnsupportedFork => UNSUPPORTED_FORK_CODE, // Error responses from the consensus engine EngineApiError::ForkChoiceUpdate(ref err) => match err { diff --git a/crates/rpc/rpc-engine-api/src/lib.rs b/crates/rpc/rpc-engine-api/src/lib.rs index ba36670c5aa3..92631faa10ab 100644 --- a/crates/rpc/rpc-engine-api/src/lib.rs +++ b/crates/rpc/rpc-engine-api/src/lib.rs @@ -20,6 +20,9 @@ mod engine_api; /// The Engine API message type. mod message; +/// An type representing either an execution payload or payload attributes. +mod payload; + /// Engine API error. mod error; diff --git a/crates/rpc/rpc-engine-api/src/payload.rs b/crates/rpc/rpc-engine-api/src/payload.rs new file mode 100644 index 000000000000..95db05a3f7a8 --- /dev/null +++ b/crates/rpc/rpc-engine-api/src/payload.rs @@ -0,0 +1,56 @@ +use reth_primitives::{Withdrawal, H256}; +use reth_rpc_types::engine::{ExecutionPayload, PayloadAttributes}; + +/// Either an [ExecutionPayload] or a [PayloadAttributes]. +pub(crate) enum PayloadOrAttributes<'a> { + /// An [ExecutionPayload] and optional parent beacon block root. + ExecutionPayload { + /// The inner execution payload + payload: &'a ExecutionPayload, + /// The parent beacon block root + parent_beacon_block_root: Option, + }, + /// A [PayloadAttributes]. + PayloadAttributes(&'a PayloadAttributes), +} + +impl<'a> PayloadOrAttributes<'a> { + /// Construct a [PayloadOrAttributes] from an [ExecutionPayload] and optional parent beacon + /// block root. + pub(crate) fn from_execution_payload( + payload: &'a ExecutionPayload, + parent_beacon_block_root: Option, + ) -> Self { + Self::ExecutionPayload { payload, parent_beacon_block_root } + } + + /// Return the withdrawals for the payload or attributes. + pub(crate) fn withdrawals(&self) -> &Option> { + match self { + Self::ExecutionPayload { payload, .. } => &payload.withdrawals, + Self::PayloadAttributes(attributes) => &attributes.withdrawals, + } + } + + /// Return the timestamp for the payload or attributes. + pub(crate) fn timestamp(&self) -> u64 { + match self { + Self::ExecutionPayload { payload, .. } => payload.timestamp.as_u64(), + Self::PayloadAttributes(attributes) => attributes.timestamp.as_u64(), + } + } + + /// Return the parent beacon block root for the payload or attributes. + pub(crate) fn parent_beacon_block_root(&self) -> Option { + match self { + Self::ExecutionPayload { parent_beacon_block_root, .. } => *parent_beacon_block_root, + Self::PayloadAttributes(attributes) => attributes.parent_beacon_block_root, + } + } +} + +impl<'a> From<&'a PayloadAttributes> for PayloadOrAttributes<'a> { + fn from(attributes: &'a PayloadAttributes) -> Self { + Self::PayloadAttributes(attributes) + } +} From 82a42c98a3143281aeaf065bade1638df6ec0d7f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 20:43:16 +0200 Subject: [PATCH 471/722] feat: add TransactionPool blob getters (#4272) --- crates/transaction-pool/src/lib.rs | 15 +++++++++++++-- crates/transaction-pool/src/noop.rs | 23 +++++++++++++++++------ crates/transaction-pool/src/pool/mod.rs | 5 +++++ crates/transaction-pool/src/traits.rs | 15 +++++++++++++++ 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 154cabe64416..296e676a0ca7 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -154,7 +154,7 @@ //! - `test-utils`: Export utilities for testing use crate::pool::PoolInner; use aquamarine as _; -use reth_primitives::{Address, TxHash, U256}; +use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; use reth_provider::StateProviderFactory; use std::{ collections::{HashMap, HashSet}, @@ -163,7 +163,7 @@ use std::{ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; -use crate::blobstore::BlobStore; +use crate::blobstore::{BlobStore, BlobStoreError}; pub use crate::{ config::{ PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, @@ -458,6 +458,17 @@ where fn unique_senders(&self) -> HashSet
{ self.pool.unique_senders() } + + fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError> { + self.pool.blob_store().get(tx_hash) + } + + fn get_all_blobs( + &self, + tx_hashes: Vec, + ) -> Result, BlobStoreError> { + self.pool.blob_store().get_all(tx_hashes) + } } impl TransactionPoolExt for Pool diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 0c0aa860662d..027c106e9994 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,13 +4,13 @@ //! to be generic over it. use crate::{ - error::PoolError, traits::PendingTransactionListenerKind, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, - NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, - TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, - TransactionValidator, ValidPoolTransaction, + blobstore::BlobStoreError, error::PoolError, traits::PendingTransactionListenerKind, + validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, + BlockInfo, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use reth_primitives::{Address, TxHash}; +use reth_primitives::{Address, BlobTransactionSidecar, TxHash}; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -162,6 +162,17 @@ impl TransactionPool for NoopTransactionPool { fn unique_senders(&self) -> HashSet
{ Default::default() } + + fn get_blob(&self, _tx_hash: TxHash) -> Result, BlobStoreError> { + Ok(None) + } + + fn get_all_blobs( + &self, + _tx_hashes: Vec, + ) -> Result, BlobStoreError> { + Ok(vec![]) + } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 1e70b4528373..3b633eec2283 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -158,6 +158,11 @@ where } } + /// Returns the configured blob store. + pub(crate) fn blob_store(&self) -> &S { + &self.blob_store + } + /// Returns stats about the size of the pool. pub(crate) fn size(&self) -> PoolSize { self.pool.read().size() diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2169c700391f..cc7f1ae8a5d6 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -20,6 +20,7 @@ use std::{ }; use tokio::sync::mpsc::Receiver; +use crate::blobstore::BlobStoreError; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -266,6 +267,20 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns a set of all senders of transactions in the pool fn unique_senders(&self) -> HashSet
; + + /// Returns the [BlobTransactionSidecar] for the given transaction hash if it exists in the blob + /// store. + fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError>; + + /// Returns all [BlobTransactionSidecar] for the given transaction hashes if they exists in the + /// blob store. + /// + /// This only returns the blobs that were found in the store. + /// If there's no blob it will not be returned. + fn get_all_blobs( + &self, + tx_hashes: Vec, + ) -> Result, BlobStoreError>; } /// Extension for [TransactionPool] trait that allows to set the current block info. From cbf3eb4333120cd48acf4be9533b1fb375daa842 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Aug 2023 22:52:49 +0200 Subject: [PATCH 472/722] feat: integrate blobstore in validator (#4273) --- bin/reth/src/node/mod.rs | 4 +- crates/transaction-pool/src/blobstore/mod.rs | 3 +- crates/transaction-pool/src/lib.rs | 17 +- crates/transaction-pool/src/validate/eth.rs | 380 ++++++++++--------- crates/transaction-pool/src/validate/task.rs | 15 +- 5 files changed, 220 insertions(+), 199 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 0b1b98cc718e..06c90465d418 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -264,14 +264,16 @@ impl NodeCommand { let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?; + let blob_store = InMemoryBlobStore::default(); let transaction_pool = reth_transaction_pool::Pool::eth_pool( TransactionValidationTaskExecutor::eth_with_additional_tasks( blockchain_db.clone(), Arc::clone(&self.chain), + blob_store.clone(), ctx.task_executor.clone(), 1, ), - InMemoryBlobStore::default(), + blob_store, self.txpool.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 0bdd14218048..dcc6764389e6 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,6 +1,7 @@ //! Storage for blob data of EIP4844 transactions. use reth_primitives::{BlobTransactionSidecar, H256}; +use std::fmt; mod maintain; mod mem; mod noop; @@ -15,7 +16,7 @@ pub use noop::NoopBlobStore; /// finalization). /// /// Note: this is Clone because it is expected to be wrapped in an Arc. -pub trait BlobStore: Send + Sync + 'static { +pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Inserts the blob sidecar into the store fn insert(&self, tx: H256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError>; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 296e676a0ca7..d7fc2f1de83e 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -105,9 +105,10 @@ //! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool}; //! use reth_transaction_pool::blobstore::InMemoryBlobStore; //! async fn t(client: C) where C: StateProviderFactory + ChainSpecProvider + Clone + 'static{ +//! let blob_store = InMemoryBlobStore::default(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), -//! InMemoryBlobStore::default(), +//! TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default()), +//! blob_store, //! Default::default(), //! ); //! let mut transactions = pool.pending_transactions_listener(); @@ -136,9 +137,10 @@ //! where C: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + 'static, //! St: Stream + Send + Unpin + 'static, //! { +//! let blob_store = InMemoryBlobStore::default(); //! let pool = Pool::eth_pool( -//! TransactionValidationTaskExecutor::eth(client.clone(), MAINNET.clone(), TokioTaskExecutor::default()), -//! InMemoryBlobStore::default(), +//! TransactionValidationTaskExecutor::eth(client.clone(), MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default()), +//! blob_store, //! Default::default(), //! ); //! @@ -296,10 +298,11 @@ where /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool}; /// use reth_transaction_pool::blobstore::InMemoryBlobStore; - /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static{ + /// # fn t(client: C) where C: StateProviderFactory + Clone + 'static { + /// let blob_store = InMemoryBlobStore::default(); /// let pool = Pool::eth_pool( - /// TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), TokioTaskExecutor::default()), - /// InMemoryBlobStore::default(), + /// TransactionValidationTaskExecutor::eth(client, MAINNET.clone(), blob_store.clone(), TokioTaskExecutor::default()), + /// blob_store, /// Default::default(), /// ); /// # } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 837dee8a6400..efb0b9e0a590 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -1,6 +1,7 @@ //! Ethereum transaction validator. use crate::{ + blobstore::BlobStore, error::InvalidPoolTransactionError, traits::{PoolTransaction, TransactionOrigin}, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_SIZE, TX_MAX_SIZE}, @@ -22,198 +23,16 @@ pub struct EthTransactionValidator { pub inner: Arc>, } -/// A builder for [TransactionValidationTaskExecutor] -#[derive(Debug, Clone)] -pub struct EthTransactionValidatorBuilder { - chain_spec: Arc, - /// Fork indicator whether we are in the Shanghai stage. - shanghai: bool, - /// Fork indicator whether we are in the Cancun hardfork. - cancun: bool, - /// Fork indicator whether we are using EIP-2718 type transactions. - eip2718: bool, - /// Fork indicator whether we are using EIP-1559 type transactions. - eip1559: bool, - /// Fork indicator whether we are using EIP-4844 blob transactions. - eip4844: bool, - /// The current max gas limit - block_gas_limit: u64, - /// Minimum priority fee to enforce for acceptance into the pool. - minimum_priority_fee: Option, - /// Determines how many additional tasks to spawn - /// - /// Default is 1 - additional_tasks: usize, - /// Toggle to determine if a local transaction should be propagated - propagate_local_transactions: bool, -} - -impl EthTransactionValidatorBuilder { - /// Creates a new builder for the given [ChainSpec] - pub fn new(chain_spec: Arc) -> Self { - Self { - chain_spec, - shanghai: true, - eip2718: true, - eip1559: true, - block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - minimum_priority_fee: None, - additional_tasks: 1, - // default to true, can potentially take this as a param in the future - propagate_local_transactions: true, - - // TODO: can hard enable by default once transitioned - cancun: false, - eip4844: false, - } - } - - /// Disables the Cancun fork. - pub fn no_cancun(self) -> Self { - self.set_cancun(false) - } - - /// Set the Cancun fork. - pub fn set_cancun(mut self, cancun: bool) -> Self { - self.cancun = cancun; - self - } - - /// Disables the Shanghai fork. - pub fn no_shanghai(self) -> Self { - self.set_shanghai(false) - } - - /// Set the Shanghai fork. - pub fn set_shanghai(mut self, shanghai: bool) -> Self { - self.shanghai = shanghai; - self - } - - /// Disables the eip2718 support. - pub fn no_eip2718(self) -> Self { - self.set_eip2718(false) - } - - /// Set eip2718 support. - pub fn set_eip2718(mut self, eip2718: bool) -> Self { - self.eip2718 = eip2718; - self - } - - /// Disables the eip1559 support. - pub fn no_eip1559(self) -> Self { - self.set_eip1559(false) - } - - /// Set the eip1559 support. - pub fn set_eip1559(mut self, eip1559: bool) -> Self { - self.eip1559 = eip1559; - self - } - /// Sets toggle to propagate transactions received locally by this client (e.g - /// transactions from eth_Sendtransaction to this nodes' RPC server) - /// - /// If set to false, only transactions received by network peers (via - /// p2p) will be marked as propagated in the local transaction pool and returned on a - /// GetPooledTransactions p2p request - pub fn set_propagate_local_transactions(mut self, propagate_local_txs: bool) -> Self { - self.propagate_local_transactions = propagate_local_txs; - self - } - /// Disables propagating transactions recieved locally by this client - /// - /// For more information, check docs for set_propagate_local_transactions - pub fn no_local_transaction_propagation(mut self) -> Self { - self.propagate_local_transactions = false; - self - } - - /// Sets a minimum priority fee that's enforced for acceptance into the pool. - pub fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { - self.minimum_priority_fee = Some(minimum_priority_fee); - self - } - - /// Sets the number of additional tasks to spawn. - pub fn with_additional_tasks(mut self, additional_tasks: usize) -> Self { - self.additional_tasks = additional_tasks; - self - } - - /// Builds a [TransactionValidationTaskExecutor] - /// - /// The validator will spawn `additional_tasks` additional tasks for validation. - /// - /// By default this will spawn 1 additional task. - pub fn build( - self, - client: Client, - tasks: T, - ) -> TransactionValidationTaskExecutor> - where - T: TaskSpawner, - { - let Self { - chain_spec, - shanghai, - cancun, - eip2718, - eip1559, - eip4844, - block_gas_limit, - minimum_priority_fee, - additional_tasks, - propagate_local_transactions, - } = self; - - let inner = EthTransactionValidatorInner { - chain_spec, - client, - shanghai, - eip2718, - eip1559, - cancun, - eip4844, - block_gas_limit, - minimum_priority_fee, - propagate_local_transactions, - _marker: Default::default(), - }; - - let (tx, task) = ValidationTask::new(); - - // Spawn validation tasks, they are blocking because they perform db lookups - for _ in 0..additional_tasks { - let task = task.clone(); - tasks.spawn_blocking(Box::pin(async move { - task.run().await; - })); - } - - tasks.spawn_critical_blocking( - "transaction-validation-service", - Box::pin(async move { - task.run().await; - }), - ); - - let to_validation_task = Arc::new(Mutex::new(tx)); - - TransactionValidationTaskExecutor { - validator: EthTransactionValidator { inner: Arc::new(inner) }, - to_validation_task, - } - } -} - /// A [TransactionValidator] implementation that validates ethereum transaction. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct EthTransactionValidatorInner { /// Spec of the chain chain_spec: Arc, /// This type fetches account info from the db client: Client, + /// Blobstore used for fetching re-injected blob transactions. + #[allow(unused)] + blob_store: Box, /// Fork indicator whether we are in the Shanghai stage. shanghai: bool, /// Fork indicator whether we are in the Cancun hardfork. @@ -412,3 +231,192 @@ where } } } + +/// A builder for [TransactionValidationTaskExecutor] +#[derive(Debug, Clone)] +pub struct EthTransactionValidatorBuilder { + chain_spec: Arc, + /// Fork indicator whether we are in the Shanghai stage. + shanghai: bool, + /// Fork indicator whether we are in the Cancun hardfork. + cancun: bool, + /// Fork indicator whether we are using EIP-2718 type transactions. + eip2718: bool, + /// Fork indicator whether we are using EIP-1559 type transactions. + eip1559: bool, + /// Fork indicator whether we are using EIP-4844 blob transactions. + eip4844: bool, + /// The current max gas limit + block_gas_limit: u64, + /// Minimum priority fee to enforce for acceptance into the pool. + minimum_priority_fee: Option, + /// Determines how many additional tasks to spawn + /// + /// Default is 1 + additional_tasks: usize, + /// Toggle to determine if a local transaction should be propagated + propagate_local_transactions: bool, +} + +impl EthTransactionValidatorBuilder { + /// Creates a new builder for the given [ChainSpec] + pub fn new(chain_spec: Arc) -> Self { + Self { + chain_spec, + shanghai: true, + eip2718: true, + eip1559: true, + block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + minimum_priority_fee: None, + additional_tasks: 1, + // default to true, can potentially take this as a param in the future + propagate_local_transactions: true, + + // TODO: can hard enable by default once transitioned + cancun: false, + eip4844: false, + } + } + + /// Disables the Cancun fork. + pub fn no_cancun(self) -> Self { + self.set_cancun(false) + } + + /// Set the Cancun fork. + pub fn set_cancun(mut self, cancun: bool) -> Self { + self.cancun = cancun; + self + } + + /// Disables the Shanghai fork. + pub fn no_shanghai(self) -> Self { + self.set_shanghai(false) + } + + /// Set the Shanghai fork. + pub fn set_shanghai(mut self, shanghai: bool) -> Self { + self.shanghai = shanghai; + self + } + + /// Disables the eip2718 support. + pub fn no_eip2718(self) -> Self { + self.set_eip2718(false) + } + + /// Set eip2718 support. + pub fn set_eip2718(mut self, eip2718: bool) -> Self { + self.eip2718 = eip2718; + self + } + + /// Disables the eip1559 support. + pub fn no_eip1559(self) -> Self { + self.set_eip1559(false) + } + + /// Set the eip1559 support. + pub fn set_eip1559(mut self, eip1559: bool) -> Self { + self.eip1559 = eip1559; + self + } + /// Sets toggle to propagate transactions received locally by this client (e.g + /// transactions from eth_Sendtransaction to this nodes' RPC server) + /// + /// If set to false, only transactions received by network peers (via + /// p2p) will be marked as propagated in the local transaction pool and returned on a + /// GetPooledTransactions p2p request + pub fn set_propagate_local_transactions(mut self, propagate_local_txs: bool) -> Self { + self.propagate_local_transactions = propagate_local_txs; + self + } + /// Disables propagating transactions recieved locally by this client + /// + /// For more information, check docs for set_propagate_local_transactions + pub fn no_local_transaction_propagation(mut self) -> Self { + self.propagate_local_transactions = false; + self + } + + /// Sets a minimum priority fee that's enforced for acceptance into the pool. + pub fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { + self.minimum_priority_fee = Some(minimum_priority_fee); + self + } + + /// Sets the number of additional tasks to spawn. + pub fn with_additional_tasks(mut self, additional_tasks: usize) -> Self { + self.additional_tasks = additional_tasks; + self + } + + /// Builds a the [EthTransactionValidator] and spawns validation tasks via the + /// [TransactionValidationTaskExecutor] + /// + /// The validator will spawn `additional_tasks` additional tasks for validation. + /// + /// By default this will spawn 1 additional task. + pub fn build_with_tasks( + self, + client: Client, + tasks: T, + blob_store: S, + ) -> TransactionValidationTaskExecutor> + where + T: TaskSpawner, + S: BlobStore, + { + let Self { + chain_spec, + shanghai, + cancun, + eip2718, + eip1559, + eip4844, + block_gas_limit, + minimum_priority_fee, + additional_tasks, + propagate_local_transactions, + } = self; + + let inner = EthTransactionValidatorInner { + chain_spec, + client, + shanghai, + eip2718, + eip1559, + cancun, + eip4844, + block_gas_limit, + minimum_priority_fee, + propagate_local_transactions, + blob_store: Box::new(blob_store), + _marker: Default::default(), + }; + + let (tx, task) = ValidationTask::new(); + + // Spawn validation tasks, they are blocking because they perform db lookups + for _ in 0..additional_tasks { + let task = task.clone(); + tasks.spawn_blocking(Box::pin(async move { + task.run().await; + })); + } + + tasks.spawn_critical_blocking( + "transaction-validation-service", + Box::pin(async move { + task.run().await; + }), + ); + + let to_validation_task = Arc::new(Mutex::new(tx)); + + TransactionValidationTaskExecutor { + validator: EthTransactionValidator { inner: Arc::new(inner) }, + to_validation_task, + } + } +} diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 7e219157a30e..007aa2568a90 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -1,6 +1,7 @@ //! A validation service for transactions. use crate::{ + blobstore::BlobStore, validate::{EthTransactionValidatorBuilder, TransactionValidatorError}, EthTransactionValidator, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, @@ -96,11 +97,16 @@ impl TransactionValidationTaskExecutor(client: Client, chain_spec: Arc, tasks: T) -> Self + pub fn eth( + client: Client, + chain_spec: Arc, + blob_store: S, + tasks: T, + ) -> Self where T: TaskSpawner, { - Self::eth_with_additional_tasks(client, chain_spec, tasks, 0) + Self::eth_with_additional_tasks(client, chain_spec, blob_store, tasks, 0) } /// Creates a new instance for the given [ChainSpec] @@ -112,9 +118,10 @@ impl TransactionValidationTaskExecutor( + pub fn eth_with_additional_tasks( client: Client, chain_spec: Arc, + blob_store: S, tasks: T, num_additional_tasks: usize, ) -> Self @@ -123,7 +130,7 @@ impl TransactionValidationTaskExecutor(client, tasks) + .build_with_tasks::(client, tasks, blob_store) } /// Returns the configured chain id From 2abfe2317173921edbbd119c76f8f2051673b884 Mon Sep 17 00:00:00 2001 From: ftupas <35031356+ftupas@users.noreply.github.com> Date: Fri, 18 Aug 2023 23:37:52 +0200 Subject: [PATCH 473/722] dev: use U64 for `transaction_index` (#4261) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/eth/transaction/receipt.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs index 9e0f2250f218..b4a9fef21ad0 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs @@ -9,7 +9,7 @@ pub struct TransactionReceipt { /// Transaction Hash. pub transaction_hash: Option, /// Index within the block. - pub transaction_index: Option, + pub transaction_index: U64, /// Hash of the block this transaction was included within. pub block_hash: Option, /// Number of the block this transaction was included within. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 770a9e8c1e15..87d72789267a 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -859,7 +859,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( let mut res_receipt = TransactionReceipt { transaction_hash: Some(meta.tx_hash), - transaction_index: Some(U256::from(meta.index)), + transaction_index: meta.index.into(), block_hash: Some(meta.block_hash), block_number: Some(U256::from(meta.block_number)), from: transaction.signer(), From 65126033483de03a6468c4bba61db37437a52bf1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 20 Aug 2023 12:36:06 +0200 Subject: [PATCH 474/722] chore(deps): weekly `cargo update` (#4281) Co-authored-by: github-merge-queue --- Cargo.lock | 358 ++++++++++++++++++++++++++--------------------------- 1 file changed, 179 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b3866cf227e..299c165541d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,9 +230,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c6f84b74db2535ebae81eede2f39b947dcbf01d093ae5f791e5dd414a1bf289" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "aquamarine" @@ -244,7 +244,7 @@ dependencies = [ "itertools 0.10.5", "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -301,7 +301,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -311,7 +311,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -323,7 +323,7 @@ checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -336,7 +336,7 @@ dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -434,8 +434,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -487,7 +487,7 @@ checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -594,7 +594,7 @@ dependencies = [ "lazycell", "peeking_take_while", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", @@ -614,11 +614,11 @@ dependencies = [ "log", "peeking_take_while", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.28", + "syn 2.0.29", "which", ] @@ -636,11 +636,11 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -723,7 +723,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -736,7 +736,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -774,7 +774,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +785,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +798,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "boa_gc", "boa_macros", @@ -813,18 +813,18 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "synstructure 0.13.0", ] [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -844,7 +844,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" [[package]] name = "boyer-moore-magiclen" @@ -1083,9 +1083,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.21" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" +checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" dependencies = [ "clap_builder", "clap_derive", @@ -1094,9 +1094,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.21" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" +checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" dependencies = [ "anstream", "anstyle", @@ -1112,8 +1112,8 @@ checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1135,9 +1135,9 @@ dependencies = [ "convert_case 0.6.0", "parity-scale-codec", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1283,9 +1283,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpp_demangle" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee34052ee3d93d6d8f3e6f81d85c47921f6653a19a7b70e939e3e602d893a674" +checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" dependencies = [ "cfg-if", ] @@ -1520,8 +1520,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1553,7 +1553,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "strsim 0.9.3", "syn 1.0.109", ] @@ -1567,9 +1567,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "strsim 0.10.0", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1579,7 +1579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1590,8 +1590,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1664,7 +1664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1675,8 +1675,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1688,7 +1688,7 @@ dependencies = [ "darling 0.10.2", "derive_builder_core", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1700,7 +1700,7 @@ checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1712,7 +1712,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1849,8 +1849,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1879,9 +1879,9 @@ checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "dyn-clone" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" +checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" [[package]] name = "ecdsa" @@ -1929,7 +1929,7 @@ checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" dependencies = [ "enum-ordinalize", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -2044,7 +2044,7 @@ checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -2056,7 +2056,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -2069,8 +2069,8 @@ dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -2080,8 +2080,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -2225,11 +2225,11 @@ dependencies = [ "hex", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "serde", "serde_json", - "syn 2.0.28", + "syn 2.0.29", "toml 0.7.6", "walkdir", ] @@ -2245,9 +2245,9 @@ dependencies = [ "ethers-core", "hex", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde_json", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -2273,7 +2273,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.28", + "syn 2.0.29", "tempfile", "thiserror", "tiny-keccak", @@ -2614,8 +2614,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -3195,7 +3195,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3283,7 +3283,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3303,7 +3303,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", ] [[package]] @@ -3588,7 +3588,7 @@ dependencies = [ "heck", "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3919,8 +3919,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -3935,7 +3935,7 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows 0.51.0", + "windows 0.51.1", ] [[package]] @@ -4023,7 +4023,7 @@ checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4044,7 +4044,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4231,8 +4231,8 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4243,8 +4243,8 @@ checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4308,7 +4308,7 @@ checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4326,9 +4326,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7417b1484e3641a8791af3c3123cdc083ac60a0d262a2f281b6125d58917caf4" +checksum = "126d3e6f3926bfb0fb24495b4f4da50626f547e54956594748e3d8882a0320b4" dependencies = [ "num-traits", ] @@ -4372,7 +4372,7 @@ checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4435,7 +4435,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec 1.11.0", - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -4533,8 +4533,8 @@ dependencies = [ "phf_generator", "phf_shared", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4562,8 +4562,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4747,7 +4747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -4782,7 +4782,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "version_check", ] @@ -4794,7 +4794,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "version_check", ] @@ -4923,9 +4923,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2 1.0.66", ] @@ -5665,10 +5665,10 @@ dependencies = [ "metrics", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "serial_test 0.10.0", - "syn 2.0.28", + "syn 2.0.29", "trybuild", ] @@ -5934,8 +5934,8 @@ name = "reth-rlp-derive" version = "0.1.0-alpha.6" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6333,7 +6333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6547,7 +6547,7 @@ checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6722,8 +6722,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6782,8 +6782,8 @@ checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6821,7 +6821,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6832,8 +6832,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7172,7 +7172,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustversion", "syn 1.0.109", ] @@ -7185,9 +7185,9 @@ checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustversion", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7269,18 +7269,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.28" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "unicode-ident", ] @@ -7291,7 +7291,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -7303,8 +7303,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "unicode-xid 0.2.4", ] @@ -7316,9 +7316,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand 2.0.0", @@ -7362,7 +7362,7 @@ checksum = "0f0528a7ad0bc85f826aa831434a37833aea622a5ae155f5b5dd431b25244213" dependencies = [ "cargo_metadata", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde", "strum_macros 0.25.2", ] @@ -7378,9 +7378,9 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "subprocess", - "syn 2.0.28", + "syn 2.0.29", "test-fuzz-internal", "toolchain_find", ] @@ -7407,22 +7407,22 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.46" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9207952ae1a003f42d3d5e892dac3c6ba42aa6ac0c79a6a91a2b5cb4253e75c" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.46" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1728216d3244de4f14f14f8c15c79be1a7c67867d28d69b719690e2a19fb445" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7511,9 +7511,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.31.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", @@ -7535,8 +7535,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7739,8 +7739,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7913,9 +7913,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a84e0202ea606ba5ebee8507ab2bfbe89b98551ed9b8f0be198109275cff284b" +checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" dependencies = [ "basic-toml", "glob", @@ -8195,8 +8195,8 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -8218,7 +8218,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "wasm-bindgen-macro-support", ] @@ -8229,8 +8229,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8317,26 +8317,26 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.51.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9763fb813068e9f4ab70a92a0c6ad61ff6b342f693b1ed0e5387c854386e670" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ "windows-core", - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] name = "windows-core" -version = "0.51.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b81650771e76355778637954dc9d7eb8d991cd89ad64ba26f21eeb3c22d8d836" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -8354,7 +8354,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -8374,17 +8374,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1eeca1c172a285ee6c2c84c341ccea837e7c01b12fbb2d0fe3c9e550ce49ec8" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.2", - "windows_aarch64_msvc 0.48.2", - "windows_i686_gnu 0.48.2", - "windows_i686_msvc 0.48.2", - "windows_x86_64_gnu 0.48.2", - "windows_x86_64_gnullvm 0.48.2", - "windows_x86_64_msvc 0.48.2", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -8395,9 +8395,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10d0c968ba7f6166195e13d593af609ec2e3d24f916f081690695cf5eaffb2f" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -8407,9 +8407,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571d8d4e62f26d4932099a9efe89660e8bd5087775a2ab5cdd8b747b811f1058" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -8419,9 +8419,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2229ad223e178db5fbbc8bd8d3835e51e566b8474bfca58d2e6150c48bb723cd" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -8431,9 +8431,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600956e2d840c194eedfc5d18f8242bc2e17c7775b6684488af3a9fff6fe3287" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -8443,9 +8443,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea99ff3f8b49fb7a8e0d305e5aec485bd068c2ba691b6e277d29eaeac945868a" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -8455,9 +8455,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1a05a1ece9a7a0d5a7ccf30ba2c33e3a61a30e042ffd247567d1de1d94120d" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -8467,15 +8467,15 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419259aba16b663966e29e6d7c6ecfa0bb8425818bb96f6f1f3c3eb71a6e7b9" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.11" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e461589e194280efaa97236b73623445efa195aa633fd7004f39805707a9d53" +checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" dependencies = [ "memchr", ] @@ -8579,7 +8579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8600,7 +8600,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8621,8 +8621,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -8643,7 +8643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] From 9ebeca3befca1dd92060a9f2ccd2761fe7a4e22b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 20 Aug 2023 13:07:32 +0200 Subject: [PATCH 475/722] chore: simplify builder fn (#4284) --- crates/net/network/src/config.rs | 14 ++++++++------ crates/net/network/src/lib.rs | 4 ++-- crates/net/network/src/manager.rs | 2 +- crates/net/network/tests/it/geth.rs | 2 +- examples/network-txpool.rs | 3 +-- examples/network.rs | 3 +-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9da1eed9345a..0b093e8f1250 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -76,16 +76,18 @@ pub struct NetworkConfig { // === impl NetworkConfig === -impl NetworkConfig { - /// Create a new instance with all mandatory fields set, rest is field with defaults. - pub fn new(client: C, secret_key: SecretKey) -> Self { - Self::builder(secret_key).build(client) - } - +impl NetworkConfig<()> { /// Convenience method for creating the corresponding builder type pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } +} + +impl NetworkConfig { + /// Create a new instance with all mandatory fields set, rest is field with defaults. + pub fn new(client: C, secret_key: SecretKey) -> Self { + NetworkConfig::builder(secret_key).build(client) + } /// Sets the config to use for the discovery v4 protocol. pub fn set_discovery_v4(mut self, discovery_config: Discv4Config) -> Self { diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index e8a7b6760a9d..968377d5e9e0 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -71,7 +71,7 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = NetworkConfig::::builder(local_key).boot_nodes( +//! let config = NetworkConfig::builder(local_key).boot_nodes( //! mainnet_nodes() //! ).build(client); //! @@ -101,7 +101,7 @@ //! let local_key = rng_secret_key(); //! //! let config = -//! NetworkConfig::::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); +//! NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); //! //! // create the network instance //! let (handle, network, transactions, request_handler) = NetworkManager::builder(config) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index d52fa7824ffd..a6ff002e1e73 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -271,7 +271,7 @@ where /// let local_key = rng_secret_key(); /// /// let config = - /// NetworkConfig::::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); + /// NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); /// /// // create the network instance /// let (handle, network, transactions, request_handler) = NetworkManager::builder(config) diff --git a/crates/net/network/tests/it/geth.rs b/crates/net/network/tests/it/geth.rs index 3c9f8d41ff2c..a21637ecf091 100644 --- a/crates/net/network/tests/it/geth.rs +++ b/crates/net/network/tests/it/geth.rs @@ -32,7 +32,7 @@ async fn can_peer_with_geth() { "setting up reth networking stack in keepalive test" ); - let config = NetworkConfig::>::builder(secret_key) + let config = NetworkConfig::builder(secret_key) .listener_addr(reth_p2p) .discovery_addr(reth_disc) .chain_spec(chainspec) diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index 08d69e964cf4..d9c61636cbe1 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -34,8 +34,7 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = - NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance let (_handle, network, txpool, _) = diff --git a/examples/network.rs b/examples/network.rs index 09a20f3118e8..8fe6a6a8ec78 100644 --- a/examples/network.rs +++ b/examples/network.rs @@ -19,8 +19,7 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = - NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance let network = NetworkManager::new(config).await?; From 5c881933280c54d21931eedc0a0a9e7faca10b93 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 20 Aug 2023 14:09:13 +0200 Subject: [PATCH 476/722] chore: rename test mod to make clippy happy (#4285) --- crates/net/network/tests/it/clique/{clique.rs => geth.rs} | 0 crates/net/network/tests/it/clique/mod.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename crates/net/network/tests/it/clique/{clique.rs => geth.rs} (100%) diff --git a/crates/net/network/tests/it/clique/clique.rs b/crates/net/network/tests/it/clique/geth.rs similarity index 100% rename from crates/net/network/tests/it/clique/clique.rs rename to crates/net/network/tests/it/clique/geth.rs diff --git a/crates/net/network/tests/it/clique/mod.rs b/crates/net/network/tests/it/clique/mod.rs index fd635c3cab2c..a8b2b8894db9 100644 --- a/crates/net/network/tests/it/clique/mod.rs +++ b/crates/net/network/tests/it/clique/mod.rs @@ -1,5 +1,5 @@ -pub mod clique; pub mod clique_middleware; +mod geth; -pub use clique::CliqueGethInstance; pub use clique_middleware::{CliqueError, CliqueMiddleware, CliqueMiddlewareError}; +pub use geth::CliqueGethInstance; From 7f9116b74734e61c70cf9408374e48459f47345a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 21 Aug 2023 12:33:50 +0300 Subject: [PATCH 477/722] dep: move `metrics` to workspace (#4289) --- Cargo.toml | 1 + bin/reth/Cargo.toml | 2 +- crates/blockchain-tree/Cargo.toml | 2 +- crates/consensus/beacon/Cargo.toml | 2 +- crates/metrics/Cargo.toml | 2 +- crates/metrics/metrics-derive/Cargo.toml | 2 +- crates/net/downloaders/Cargo.toml | 2 +- crates/net/eth-wire/Cargo.toml | 2 +- crates/net/network/Cargo.toml | 2 +- crates/payload/basic/Cargo.toml | 2 +- crates/payload/builder/Cargo.toml | 2 +- crates/prune/Cargo.toml | 2 +- crates/rpc/rpc-builder/Cargo.toml | 2 +- crates/rpc/rpc/Cargo.toml | 2 +- crates/stages/Cargo.toml | 2 +- crates/storage/db/Cargo.toml | 2 +- crates/tasks/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- 18 files changed, 18 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a18a52a0d32b..2bc3bf236cfe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,6 +124,7 @@ strum = "0.25" rayon = "1.7" itertools = "0.11" parking_lot = "0.12" +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation ### proc-macros proc-macro2 = "1.0" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 9666b44f295b..d09d0b9666ce 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -61,7 +61,7 @@ metrics-exporter-prometheus = "0.12.1" metrics-util = "0.15.0" metrics-process = "1.0.9" reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # test vectors generation proptest.workspace = true diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 12dbd33d2136..e18e2562eeaf 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -28,7 +28,7 @@ tracing.workspace = true # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc aquamarine = "0.3.0" diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 4d44fdc28b57..09e9ecc5f013 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -27,7 +27,7 @@ futures.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index dbc9ab908109..8a71b0ef5183 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -13,7 +13,7 @@ description = "reth metrics utilities" reth-metrics-derive = { path = "./metrics-derive" } # metrics -metrics = "0.21.1" +metrics.workspace = true # async tokio = { workspace = true, features = ["full"], optional = true } diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml index 4a2b7c2c7b52..a5e85a8422b3 100644 --- a/crates/metrics/metrics-derive/Cargo.toml +++ b/crates/metrics/metrics-derive/Cargo.toml @@ -18,6 +18,6 @@ regex = "1.6.0" once_cell = "1.17.0" [dev-dependencies] -metrics = "0.21.1" +metrics.workspace = true trybuild = "1.0" serial_test = "0.10" diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index aecd48abdaa6..ccc803c8d469 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -25,7 +25,7 @@ tokio-util = { workspace = true, features = ["codec"] } # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index d44ee3b68608..84e62184f882 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -27,7 +27,7 @@ reth-rlp = { workspace = true, features = [ # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # used for Chain and builders ethers-core = { workspace = true, default-features = false } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index e46386f29741..a4acb5f06589 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -47,7 +47,7 @@ serde_json = { workspace = true, optional = true } # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc auto_impl = "1" diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index da976dba7090..5af296de3e3c 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -28,7 +28,7 @@ futures-util.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc tracing.workspace = true diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 1a807017fa22..7fe3b992feaa 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -26,7 +26,7 @@ futures-util.workspace = true ## metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc thiserror.workspace = true diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 79bc0aa95469..be5346d025a5 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -19,7 +19,7 @@ reth-interfaces.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 072cdbb3a29f..c9246376a539 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -30,7 +30,7 @@ hyper = "0.14" # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc strum = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index bc3df6431c4d..44760ecd91a3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -53,7 +53,7 @@ rayon.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc bytes.workspace = true diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 98846248b375..7ae6f5fca871 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -38,7 +38,7 @@ serde.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc thiserror.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 433a0ccee9a9..f4bcd553aa78 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -32,7 +32,7 @@ modular-bitfield = "0.11.2" # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc bytes.workspace = true diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index fa9f52b33ed7..eaff30d28cff 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -17,7 +17,7 @@ futures-util.workspace = true ## metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc tracing.workspace = true diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index ecea45a1c419..9354ce2d1a86 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -33,7 +33,7 @@ tokio-stream.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc aquamarine = "0.3.0" From b710e57f9ade775cc27e8983f3cfc092c62a2853 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:10:15 +0200 Subject: [PATCH 478/722] feat: add load_trusted_setup_from_bytes (#4290) --- crates/primitives/src/constants/eip4844.rs | 44 +++++++++++++++++++--- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 602229d2cc85..69f6cbce76bf 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -28,15 +28,47 @@ pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER /// Used to determine the price for next data blob pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3_338_477u64; // 3338477 +/// Commitment version of a KZG commitment +pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; + /// KZG Trusted setup raw -const TRUSTED_SETUP_RAW: &str = include_str!("../../res/eip4844/trusted_setup.txt"); +const TRUSTED_SETUP_RAW: &[u8] = include_bytes!("../../res/eip4844/trusted_setup.txt"); /// KZG trusted setup pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { - let mut file = tempfile::NamedTempFile::new().unwrap(); - file.write_all(TRUSTED_SETUP_RAW.as_bytes()).unwrap(); - Arc::new(KzgSettings::load_trusted_setup_file(file.path().into()).unwrap()) + Arc::new( + load_trusted_setup_from_bytes(TRUSTED_SETUP_RAW).expect("Failed to load trusted setup"), + ) }); -/// Commitment version of a KZG commitment -pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; +/// Loads the trusted setup parameters from the given bytes and returns the [KzgSettings]. +/// +/// This creates a temp file to store the bytes and then loads the [KzgSettings] from the file via +/// [KzgSettings::load_trusted_setup_file]. +pub fn load_trusted_setup_from_bytes(bytes: &[u8]) -> Result { + let mut file = tempfile::NamedTempFile::new().map_err(LoadKzgSettingsError::TempFileErr)?; + file.write_all(bytes).map_err(LoadKzgSettingsError::TempFileErr)?; + KzgSettings::load_trusted_setup_file(file.path().into()).map_err(LoadKzgSettingsError::KzgError) +} + +/// Error type for loading the trusted setup. +#[derive(Debug, thiserror::Error)] +pub enum LoadKzgSettingsError { + /// Failed to create temp file to store bytes for loading [KzgSettings] via + /// [KzgSettings::load_trusted_setup_file]. + #[error("Failed to setup temp file: {0:?}")] + TempFileErr(#[from] std::io::Error), + /// Kzg error + #[error("Kzg error: {0:?}")] + KzgError(c_kzg::Error), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ensure_load_kzg_settings() { + let _settings = Arc::clone(&KZG_TRUSTED_SETUP); + } +} From b13424aa215d87db9cd8aa3eb1b88e6b6c500033 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:35:02 +0200 Subject: [PATCH 479/722] chore: reorder receipt fields (#4291) --- .../rpc-types/src/eth/transaction/receipt.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs index b4a9fef21ad0..e8339a9dbbe5 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs @@ -14,32 +14,32 @@ pub struct TransactionReceipt { pub block_hash: Option, /// Number of the block this transaction was included within. pub block_number: Option, - /// Address of the sender - pub from: Address, - /// Address of the receiver. null when its a contract creation transaction. - pub to: Option
, /// Cumulative gas used within the block after this was executed. pub cumulative_gas_used: U256, /// Gas used by this transaction alone. pub gas_used: Option, + /// The price paid post-execution by the transaction (i.e. base fee + priority fee). Both + /// fields in 1559-style transactions are maximums (max fee + max priority fee), the amount + /// that's actually paid by users can only be determined post-execution + pub effective_gas_price: U128, + /// Address of the sender + pub from: Address, + /// Address of the receiver. null when its a contract creation transaction. + pub to: Option
, /// Contract address created, or None if not a deployment. pub contract_address: Option
, /// Logs emitted by this transaction. pub logs: Vec, + /// Logs bloom + pub logs_bloom: Bloom, /// The post-transaction stateroot (pre Byzantium) /// /// EIP98 makes this optional field, if it's missing then skip serializing it #[serde(skip_serializing_if = "Option::is_none", rename = "root")] pub state_root: Option, - /// Logs bloom - pub logs_bloom: Bloom, /// Status: either 1 (success) or 0 (failure). Only present after activation of EIP-658 #[serde(skip_serializing_if = "Option::is_none", rename = "status")] pub status_code: Option, - /// The price paid post-execution by the transaction (i.e. base fee + priority fee). Both - /// fields in 1559-style transactions are maximums (max fee + max priority fee), the amount - /// that's actually paid by users can only be determined post-execution - pub effective_gas_price: U128, /// EIP-2718 Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type")] pub transaction_type: U8, From e45a0d3e4349dd9559ac981c4a32a5ee833ea6c2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:35:21 +0200 Subject: [PATCH 480/722] feat: integrate kzg setting in validator (#4286) --- crates/transaction-pool/src/validate/eth.rs | 24 ++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index efb0b9e0a590..de33fa16d806 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,8 +8,10 @@ use crate::{ TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ - constants::ETHEREUM_BLOCK_GAS_LIMIT, ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + constants::{eip4844::KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, + kzg::KzgSettings, + ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -49,6 +51,9 @@ pub struct EthTransactionValidatorInner { minimum_priority_fee: Option, /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, + /// Stores the setup and parameters needed for validating KZG proofs. + #[allow(unused)] + kzg_settings: Arc, /// Marker for the transaction type _marker: PhantomData, } @@ -173,7 +178,7 @@ where // blob tx checks if self.cancun { - // TODO: implement blob tx checks + // TODO: validate blob txs, if missing try load from blob store } let account = match self @@ -256,6 +261,9 @@ pub struct EthTransactionValidatorBuilder { additional_tasks: usize, /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, + + /// Stores the setup and parameters needed for validating KZG proofs. + kzg_settings: Arc, } impl EthTransactionValidatorBuilder { @@ -271,6 +279,7 @@ impl EthTransactionValidatorBuilder { additional_tasks: 1, // default to true, can potentially take this as a param in the future propagate_local_transactions: true, + kzg_settings: Arc::clone(&KZG_TRUSTED_SETUP), // TODO: can hard enable by default once transitioned cancun: false, @@ -321,6 +330,13 @@ impl EthTransactionValidatorBuilder { self.eip1559 = eip1559; self } + + /// Sets the [KzgSettings] to use for validating KZG proofs. + pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { + self.kzg_settings = kzg_settings; + self + } + /// Sets toggle to propagate transactions received locally by this client (e.g /// transactions from eth_Sendtransaction to this nodes' RPC server) /// @@ -378,6 +394,7 @@ impl EthTransactionValidatorBuilder { minimum_priority_fee, additional_tasks, propagate_local_transactions, + kzg_settings, } = self; let inner = EthTransactionValidatorInner { @@ -392,6 +409,7 @@ impl EthTransactionValidatorBuilder { minimum_priority_fee, propagate_local_transactions, blob_store: Box::new(blob_store), + kzg_settings, _marker: Default::default(), }; From 252315426007265e412591213c2497bf567975ae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:35:59 +0200 Subject: [PATCH 481/722] feat: add additional conversion trait for pooled tx element (#4279) --- crates/primitives/src/lib.rs | 11 ++++++----- crates/primitives/src/transaction/mod.rs | 10 ++++++++++ crates/primitives/src/transaction/pooled.rs | 6 ++++++ crates/transaction-pool/src/test_utils/mock.rs | 14 ++++++++++---- crates/transaction-pool/src/traits.rs | 18 +++++++++++++++--- 5 files changed, 47 insertions(+), 12 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index fd973ff74191..b2ff09300f43 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -91,11 +91,12 @@ pub use storage::StorageEntry; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer, sign_message}, AccessList, AccessListItem, AccessListWithGasUsed, BlobTransaction, BlobTransactionSidecar, - FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, - PooledTransactionsElement, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, + InvalidTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, + Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, + TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 115bc83aea1f..e65297ea72dd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1089,6 +1089,16 @@ impl FromRecoveredTransaction for TransactionSignedEcRecovered { } } +/// A transaction type that can be created from a [`PooledTransactionsElementEcRecovered`] +/// transaction. +/// +/// This is a conversion trait that'll ensure transactions received via P2P can be converted to the +/// transaction type that the transaction pool uses. +pub trait FromRecoveredPooledTransaction { + /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; +} + /// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the /// network pub trait IntoRecoveredTransaction { diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index bcc870a2c0f2..a293b0b67aef 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -373,6 +373,12 @@ impl PooledTransactionsElementEcRecovered { self.transaction } + /// Transform back to [`PooledTransactionsElement`] + pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { + let (tx, signer) = self.into_components(); + tx.into_ecrecovered_transaction(signer) + } + /// Desolve Self to its component pub fn into_components(self) -> (PooledTransactionsElement, Address) { (self.transaction, self.signer) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 19d663593caf..b9acbfe7a411 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -12,10 +12,10 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, hex, Address, FromRecoveredTransaction, - IntoRecoveredTransaction, Signature, Transaction, TransactionKind, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, H256, - U128, U256, + constants::MIN_PROTOCOL_BASE_FEE, hex, Address, FromRecoveredPooledTransaction, + FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, + TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, H256, U128, U256, }; use std::{ops::Range, sync::Arc, time::Instant}; @@ -523,6 +523,12 @@ impl FromRecoveredTransaction for MockTransaction { } } +impl FromRecoveredPooledTransaction for MockTransaction { + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + FromRecoveredTransaction::from_recovered_transaction(tx.into_ecrecovered_transaction()) + } +} + impl IntoRecoveredTransaction for MockTransaction { fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { let tx = Transaction::Legacy(TxLegacy { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index cc7f1ae8a5d6..eb41edd4b7d9 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -6,8 +6,9 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_primitives::{ - Address, BlobTransactionSidecar, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, - PooledTransactionsElement, PooledTransactionsElementEcRecovered, Transaction, TransactionKind, + Address, BlobTransactionSidecar, FromRecoveredPooledTransaction, FromRecoveredTransaction, + IntoRecoveredTransaction, PeerId, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, Transaction, TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, U256, }; use reth_rlp::Encodable; @@ -511,7 +512,12 @@ impl BestTransactions for std::iter::Empty { /// Trait for transaction types used inside the pool pub trait PoolTransaction: - fmt::Debug + Send + Sync + FromRecoveredTransaction + IntoRecoveredTransaction + fmt::Debug + + Send + + Sync + + FromRecoveredPooledTransaction + + FromRecoveredTransaction + + IntoRecoveredTransaction { /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -758,6 +764,12 @@ impl FromRecoveredTransaction for EthPooledTransaction { } } +impl FromRecoveredPooledTransaction for EthPooledTransaction { + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + EthPooledTransaction::from(tx) + } +} + impl IntoRecoveredTransaction for EthPooledTransaction { fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { self.transaction.clone() From 566e244e32fe9f3fd23cad3958ba2cd81687e33d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:41:21 +0200 Subject: [PATCH 482/722] feat: add blob store canon tracker (#4278) --- crates/primitives/src/transaction/mod.rs | 24 +++++ .../src/blobstore/maintain.rs | 27 ------ crates/transaction-pool/src/blobstore/mem.rs | 21 +++- crates/transaction-pool/src/blobstore/mod.rs | 13 ++- crates/transaction-pool/src/blobstore/noop.rs | 4 + .../transaction-pool/src/blobstore/tracker.rs | 95 +++++++++++++++++++ crates/transaction-pool/src/lib.rs | 8 ++ crates/transaction-pool/src/maintain.rs | 15 +++ crates/transaction-pool/src/metrics.rs | 4 + crates/transaction-pool/src/pool/mod.rs | 21 +++- crates/transaction-pool/src/traits.rs | 6 ++ 11 files changed, 201 insertions(+), 37 deletions(-) delete mode 100644 crates/transaction-pool/src/blobstore/maintain.rs create mode 100644 crates/transaction-pool/src/blobstore/tracker.rs diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e65297ea72dd..97afe6c79a81 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -388,6 +388,30 @@ impl Transaction { Transaction::Eip4844(tx) => tx.size(), } } + + /// Returns true if the transaction is a legacy transaction. + #[inline] + pub fn is_legacy(&self) -> bool { + matches!(self, Transaction::Legacy(_)) + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + pub fn is_eip2930(&self) -> bool { + matches!(self, Transaction::Eip2930(_)) + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + pub fn is_eip1559(&self) -> bool { + matches!(self, Transaction::Eip1559(_)) + } + + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + pub fn is_eip4844(&self) -> bool { + matches!(self, Transaction::Eip4844(_)) + } } impl Compact for Transaction { diff --git a/crates/transaction-pool/src/blobstore/maintain.rs b/crates/transaction-pool/src/blobstore/maintain.rs deleted file mode 100644 index cfc4c8fc68c1..000000000000 --- a/crates/transaction-pool/src/blobstore/maintain.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Support for maintaining the blob pool. - -use crate::blobstore::BlobStore; -use reth_primitives::H256; -use std::collections::BTreeMap; - -/// The type that is used to maintain the blob store and discard finalized transactions. -#[derive(Debug)] -#[allow(unused)] -pub struct BlobStoreMaintainer { - /// The blob store that holds all the blob data. - store: S, - /// Keeps track of the blob transactions that are in blocks. - blob_txs_in_blocks: BTreeMap>, -} - -impl BlobStoreMaintainer { - /// Creates a new blob store maintenance instance. - pub fn new(store: S) -> Self { - Self { store, blob_txs_in_blocks: Default::default() } - } -} - -impl BlobStoreMaintainer { - /// Invoked when a block is finalized. - pub fn on_finalized(&mut self, _block_number: u64) {} -} diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 6d1dcb76aa1a..187b9026f0b3 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -16,16 +16,17 @@ pub struct InMemoryBlobStore { struct InMemoryBlobStoreInner { /// Storage for all blob data. store: RwLock>, - size: AtomicUsize, + data_size: AtomicUsize, + num_blobs: AtomicUsize, } impl InMemoryBlobStoreInner { fn add_size(&self, add: usize) { - self.size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); + self.data_size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); } fn sub_size(&self, sub: usize) { - self.size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); + self.data_size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); } fn update_size(&self, add: usize, sub: usize) { @@ -35,6 +36,10 @@ impl InMemoryBlobStoreInner { self.sub_size(sub - add); } } + + fn update_len(&self, len: usize) { + self.num_blobs.store(len, std::sync::atomic::Ordering::Relaxed); + } } impl BlobStore for InMemoryBlobStore { @@ -42,6 +47,7 @@ impl BlobStore for InMemoryBlobStore { let mut store = self.inner.store.write(); let (add, sub) = insert_size(&mut store, tx, data); self.inner.update_size(add, sub); + self.inner.update_len(store.len()); Ok(()) } @@ -58,6 +64,7 @@ impl BlobStore for InMemoryBlobStore { total_sub += sub; } self.inner.update_size(total_add, total_sub); + self.inner.update_len(store.len()); Ok(()) } @@ -65,6 +72,7 @@ impl BlobStore for InMemoryBlobStore { let mut store = self.inner.store.write(); let sub = remove_size(&mut store, &tx); self.inner.sub_size(sub); + self.inner.update_len(store.len()); Ok(()) } @@ -78,6 +86,7 @@ impl BlobStore for InMemoryBlobStore { total_sub += remove_size(&mut store, &tx); } self.inner.sub_size(total_sub); + self.inner.update_len(store.len()); Ok(()) } @@ -103,7 +112,11 @@ impl BlobStore for InMemoryBlobStore { } fn data_size_hint(&self) -> Option { - Some(self.inner.size.load(std::sync::atomic::Ordering::Relaxed)) + Some(self.inner.data_size.load(std::sync::atomic::Ordering::Relaxed)) + } + + fn blobs_len(&self) -> usize { + self.inner.num_blobs.load(std::sync::atomic::Ordering::Relaxed) } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index dcc6764389e6..bf0db1046af3 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,14 +1,14 @@ //! Storage for blob data of EIP4844 transactions. +pub use mem::InMemoryBlobStore; +pub use noop::NoopBlobStore; use reth_primitives::{BlobTransactionSidecar, H256}; use std::fmt; -mod maintain; +pub use tracker::BlobStoreCanonTracker; + mod mem; mod noop; - -pub use maintain::BlobStoreMaintainer; -pub use mem::InMemoryBlobStore; -pub use noop::NoopBlobStore; +mod tracker; /// A blob store that can be used to store blob data of EIP4844 transactions. /// @@ -43,6 +43,9 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Data size of all transactions in the blob store. fn data_size_hint(&self) -> Option; + + /// How many blobs are in the blob store. + fn blobs_len(&self) -> usize; } /// Error variants that can occur when interacting with a blob store. diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index d21bf59ef183..3cb30a22e9e9 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -37,4 +37,8 @@ impl BlobStore for NoopBlobStore { fn data_size_hint(&self) -> Option { Some(0) } + + fn blobs_len(&self) -> usize { + 0 + } } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs new file mode 100644 index 000000000000..0d1f783331a7 --- /dev/null +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -0,0 +1,95 @@ +//! Support for maintaining the blob pool. + +use reth_primitives::{BlockNumber, H256}; +use reth_provider::chain::ChainBlocks; +use std::collections::BTreeMap; + +/// The type that is used to track canonical blob transactions. +#[derive(Debug, Default, Eq, PartialEq)] +pub struct BlobStoreCanonTracker { + /// Keeps track of the blob transactions included in blocks. + blob_txs_in_blocks: BTreeMap>, +} + +impl BlobStoreCanonTracker { + /// Adds a block to the blob store maintenance. + pub(crate) fn add_block( + &mut self, + block_number: BlockNumber, + blob_txs: impl IntoIterator, + ) { + self.blob_txs_in_blocks.insert(block_number, blob_txs.into_iter().collect()); + } + + /// Adds all blocks to the tracked list of blocks. + pub(crate) fn add_blocks( + &mut self, + blocks: impl IntoIterator)>, + ) { + for (block_number, blob_txs) in blocks { + self.add_block(block_number, blob_txs); + } + } + + /// Adds all blob transactions from the given chain to the tracker. + pub(crate) fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + let blob_txs = blocks.iter().map(|(num, blocks)| { + let iter = + blocks.body.iter().filter(|tx| tx.transaction.is_eip4844()).map(|tx| tx.hash); + (*num, iter) + }); + self.add_blocks(blob_txs); + } + + /// Invoked when a block is finalized. + #[allow(unused)] + pub(crate) fn on_finalized_block(&mut self, number: BlockNumber) -> BlobStoreUpdates { + let mut finalized = Vec::new(); + while let Some(entry) = self.blob_txs_in_blocks.first_entry() { + if *entry.key() <= number { + finalized.extend(entry.remove_entry().1); + } else { + break + } + } + + if finalized.is_empty() { + BlobStoreUpdates::None + } else { + BlobStoreUpdates::Finalized(finalized) + } + } +} + +/// Updates that should be applied to the blob store. +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum BlobStoreUpdates { + /// No updates. + None, + /// Delete the given finalized transactions from the blob store. + Finalized(Vec), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_finalized_tracker() { + let mut tracker = BlobStoreCanonTracker::default(); + + let block1 = vec![H256::random()]; + let block2 = vec![H256::random()]; + let block3 = vec![H256::random()]; + tracker.add_block(1, block1.clone()); + tracker.add_block(2, block2.clone()); + tracker.add_block(3, block3.clone()); + + assert_eq!(tracker.on_finalized_block(0), BlobStoreUpdates::None); + assert_eq!(tracker.on_finalized_block(1), BlobStoreUpdates::Finalized(block1)); + assert_eq!( + tracker.on_finalized_block(3), + BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::>()) + ); + } +} diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index d7fc2f1de83e..60f56cace21f 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -493,6 +493,14 @@ where fn update_accounts(&self, accounts: Vec) { self.pool.update_accounts(accounts); } + + fn delete_blob(&self, tx: TxHash) { + self.pool.delete_blob(tx) + } + + fn delete_blobs(&self, txs: Vec) { + self.pool.delete_blobs(txs) + } } impl Clone for Pool { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 05bb7824a09d..c7c55a619620 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,6 +1,7 @@ //! Support for maintaining the state of the transaction pool use crate::{ + blobstore::BlobStoreCanonTracker, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, ChangedAccount, TransactionPoolExt}, BlockInfo, TransactionPool, @@ -93,6 +94,9 @@ pub async fn maintain_transaction_pool( pool.set_block_info(info); } + // keeps track of mined blob transaction so we can clean finalized transactions + let mut blob_store_tracker = BlobStoreCanonTracker::default(); + // keeps track of any dirty accounts that we know of are out of sync with the pool let mut dirty_addresses = HashSet::new(); @@ -283,6 +287,10 @@ pub async fn maintain_transaction_pool( // Note: we no longer know if the tx was local or external metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; + + // keep track of mined blob transactions + // TODO(mattsse): handle reorged transactions + blob_store_tracker.add_new_chain_blocks(&new_blocks); } CanonStateNotification::Commit { new } => { let (blocks, state) = new.inner(); @@ -314,6 +322,10 @@ pub async fn maintain_transaction_pool( pending_basefee: pending_block_base_fee, }; pool.set_block_info(info); + + // keep track of mined blob transactions + blob_store_tracker.add_new_chain_blocks(&blocks); + continue } @@ -344,6 +356,9 @@ pub async fn maintain_transaction_pool( timestamp: tip.timestamp, }; pool.on_canonical_state_change(update); + + // keep track of mined blob transactions + blob_store_tracker.add_new_chain_blocks(&blocks); } } } diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 85c3d707f60c..966834c63b35 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -46,6 +46,10 @@ pub struct BlobStoreMetrics { pub(crate) blobstore_failed_inserts: Counter, /// Number of failed deletes into the blobstore pub(crate) blobstore_failed_deletes: Counter, + /// The number of bytes the blobs in the blobstore take up + pub(crate) blobstore_byte_size: Gauge, + /// How many blobs are currently in the blobstore + pub(crate) blobstore_entries: Gauge, } /// Transaction pool maintenance metrics diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 3b633eec2283..718fb4070443 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -659,14 +659,33 @@ where warn!(target: "txpool", ?err, "[{:?}] failed to insert blob", hash); self.blob_store_metrics.blobstore_failed_inserts.increment(1); } + self.update_blob_store_metrics(); } /// Delete a blob from the blob store - fn delete_blob(&self, blob: TxHash) { + pub(crate) fn delete_blob(&self, blob: TxHash) { if let Err(err) = self.blob_store.delete(blob) { warn!(target: "txpool", ?err, "[{:?}] failed to delete blobs", blob); self.blob_store_metrics.blobstore_failed_deletes.increment(1); } + self.update_blob_store_metrics(); + } + + /// Delete all blobs from the blob store + pub(crate) fn delete_blobs(&self, txs: Vec) { + let num = txs.len(); + if let Err(err) = self.blob_store.delete_all(txs) { + warn!(target: "txpool", ?err,?num, "failed to delete blobs"); + self.blob_store_metrics.blobstore_failed_deletes.increment(num as u64); + } + self.update_blob_store_metrics(); + } + + fn update_blob_store_metrics(&self) { + if let Some(data_size) = self.blob_store.data_size_hint() { + self.blob_store_metrics.blobstore_byte_size.set(data_size as f64); + } + self.blob_store_metrics.blobstore_entries.set(self.blob_store.blobs_len() as f64); } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index eb41edd4b7d9..a9411dd212ab 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -299,6 +299,12 @@ pub trait TransactionPoolExt: TransactionPool { /// Updates the accounts in the pool fn update_accounts(&self, accounts: Vec); + + /// Deletes the blob sidecar for the given transaction from the blob store + fn delete_blob(&self, tx: H256); + + /// Deletes multiple blob sidecars from the blob store + fn delete_blobs(&self, txs: Vec); } /// Determines what kind of new pending transactions should be emitted by a stream of pending From eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 14:41:31 +0200 Subject: [PATCH 483/722] feat: provide a way to opt out of pool updates (#4270) --- crates/transaction-pool/src/pool/best.rs | 12 ++++++++++-- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/transaction-pool/src/traits.rs | 9 +++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 7beb11e8ec0a..5fc5ebc93137 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -25,6 +25,10 @@ impl crate::traits::BestTransactions for BestTransaction fn mark_invalid(&mut self, tx: &Self::Item) { BestTransactions::mark_invalid(&mut self.best, tx) } + + fn no_updates(&mut self) { + self.best.no_updates() + } } impl Iterator for BestTransactionsWithBasefee { @@ -67,7 +71,7 @@ pub(crate) struct BestTransactions { /// /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value - pub(crate) new_transaction_reciever: Receiver>, + pub(crate) new_transaction_receiver: Option>>, } impl BestTransactions { @@ -87,7 +91,7 @@ impl BestTransactions { /// Non-blocking read on the new pending transactions subscription channel fn try_recv(&mut self) -> Option> { loop { - match self.new_transaction_reciever.try_recv() { + match self.new_transaction_receiver.as_mut()?.try_recv() { Ok(tx) => return Some(tx), // note TryRecvError::Lagged can be returned here, which is an error that attempts // to correct itself on consecutive try_recv() attempts @@ -126,6 +130,10 @@ impl crate::traits::BestTransactions for BestTransaction fn mark_invalid(&mut self, tx: &Self::Item) { BestTransactions::mark_invalid(self, tx) } + + fn no_updates(&mut self) { + self.new_transaction_receiver.take(); + } } impl Iterator for BestTransactions { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index eaff315459be..65be2a7c1e85 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -88,7 +88,7 @@ impl PendingPool { all: self.by_id.clone(), independent: self.independent_transactions.clone(), invalid: Default::default(), - new_transaction_reciever: self.new_transaction_notifier.subscribe(), + new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index a9411dd212ab..9d3c5026dce5 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -509,11 +509,20 @@ pub trait BestTransactions: Iterator + Send { /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. fn mark_invalid(&mut self, transaction: &Self::Item); + + /// An iterator may be able to receive additional pending transactions that weren't present it + /// the pool when it was created. + /// + /// This ensures that iterator will return the best transaction that it currently knows and not + /// listen to pool updates. + fn no_updates(&mut self); } /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { fn mark_invalid(&mut self, _tx: &T) {} + + fn no_updates(&mut self) {} } /// Trait for transaction types used inside the pool From 1563506aea09049a85e5cc72c2894f3f7a371581 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 15:35:10 +0200 Subject: [PATCH 484/722] chore: add additional docs about root call gas limit (#4292) --- crates/revm/revm-inspectors/src/tracing/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 48f772ebde81..c01d19c8e94d 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -175,6 +175,8 @@ impl TracingInspector { if self.trace_stack.is_empty() { // this is the root call which should get the original gas limit of the transaction, // because initialization costs are already subtracted from gas_limit + // For the root call this value should use the transaction's gas limit + // See and gas_limit = data.env.tx.gas_limit; // we set the spec id here because we only need to do this once and this condition is From f5a304286f9c7f2a0fe0d2c75b01ea12551f7b44 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 16:45:08 +0200 Subject: [PATCH 485/722] fix: use gas_used from execution result (#4293) --- .../revm/revm-inspectors/src/tracing/builder/geth.rs | 6 +++++- crates/rpc/rpc/src/debug.rs | 11 +++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 255cfdba96b4..f2bbdeda7565 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -110,7 +110,10 @@ impl GethTraceBuilder { /// Generate a geth-style traces for the call tracer. /// /// This decodes all call frames from the recorded traces. - pub fn geth_call_traces(&self, opts: CallConfig) -> CallFrame { + /// + /// This expects the gas used and return value for the + /// [ExecutionResult](revm::primitives::ExecutionResult) of the executed transaction. + pub fn geth_call_traces(&self, opts: CallConfig, gas_used: u64) -> CallFrame { if self.nodes.is_empty() { return Default::default() } @@ -119,6 +122,7 @@ impl GethTraceBuilder { // first fill up the root let main_trace_node = &self.nodes[0]; let mut root_call_frame = main_trace_node.geth_empty_call_frame(include_logs); + root_call_frame.gas_used = U256::from(gas_used); // selfdestructs are not recorded as individual call traces but are derived from // the call trace and are added as additional `CallFrame` objects to the parent call diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index aac4f176efdf..f492599963f1 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -248,9 +248,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - inspect(db, env, &mut inspector)?; - let frame = - inspector.into_geth_builder().geth_call_traces(call_config); + let (res, _) = inspect(db, env, &mut inspector)?; + let frame = inspector + .into_geth_builder() + .geth_call_traces(call_config, res.result.gas_used()); Ok(frame.into()) }) .await?; @@ -469,7 +470,9 @@ where let (res, _) = inspect(db, env, &mut inspector)?; - let frame = inspector.into_geth_builder().geth_call_traces(call_config); + let frame = inspector + .into_geth_builder() + .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } From 0d47e4cf4f57a2d237950a6a1bc245954c4d07ef Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 21 Aug 2023 17:03:23 +0200 Subject: [PATCH 486/722] docs: typos & cleanup (#4296) --- .../libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md | 1577 ----------------- .../libmdbx-rs/mdbx-sys/libmdbx/README.md | 797 --------- docs/crates/db.md | 24 +- docs/crates/discv4.md | 42 +- docs/crates/eth-wire.md | 32 +- docs/crates/network.md | 70 +- docs/crates/stages.md | 12 +- docs/design/database.md | 2 +- 8 files changed, 104 insertions(+), 2452 deletions(-) delete mode 100644 crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md delete mode 100644 crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md deleted file mode 100644 index a1b8321dfacc..000000000000 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md +++ /dev/null @@ -1,1577 +0,0 @@ -ChangeLog ---------- - -English version [by Google](https://gitflic-ru.translate.goog/project/erthink/libmdbx/blob?file=ChangeLog.md&_x_tr_sl=ru&_x_tr_tl=en) -and [by Yandex](https://translated.turbopages.org/proxy_u/ru-en.en/https/gitflic.ru/project/erthink/libmdbx/blob?file=ChangeLog.md). - - -## v0.12.6 "ЦСКА" от 2023-04-29 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок и устранением -недочетов, в день 100-летнего юбилея спортивного клуба [«ЦСКА»](https://ru.wikipedia.org/wiki/Центральный_спортивный_клуб_Армии). - -``` -14 files changed, 117 insertions(+), 83 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Мелочи: - - - Обновление патча для старых версий buildroot. - - Использование clang-format-16. - - Использование `enum`-типов вместо `int` для устранения предупреждений GCC 13, - что могло ломать сборку в Fedora 38. - - --------------------------------------------------------------------------------- - - -## v0.12.5 "Динамо" от 2023-04-18 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок и устранением -недочетов, в день 100-летнего юбилея спортивного общества [«Динамо»](https://ru.wikipedia.org/wiki/Динамо_(спортивное_общество)). - -``` -16 files changed, 686 insertions(+), 247 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - Max за сообщение о проблеме экспорта из DSO/DLL - устаревших функций API. - - [`@calvin3721`](https://t.me/calvin3721) за сообщение о проблеме работы - `MainDB` с флагами не по-умолчанию. - -Исправления: - - - Поправлен экспорт из DSO/DLL устаревших функций, - которые заменены на inline в текущем API. - - Устранено использование неверного компаратора при создании или пересоздании - `MainDB` с флагами/опциями предполагающим использование специфического - компаратора (не по-умолчанию). - -Мелочи: - - - Удалена дублирующая диагностика внутри `node_read_bigdata()`. - - Исправлены ссылки в описании `mdbx_env_set_geometry()`. - - Добавлен отдельный тест `extra/upsert_alldups` для специфического - сценария замены/перезаписи одним значением всех multi-значений - соответствующих ключу, т.е. замена всех «дубликатов» одним значением. - - В C++ API добавлены варианты `buffer::key_from()` с явным именованием по типу данных. - - Добавлен отдельный тест `extra/maindb_ordinal` для специфического - сценария создания `MainDB` с флагами требующими использования - компаратора не по-умолчанию. - - Рефакторинг проверки "когерентности" мета-страниц. - - Корректировка `osal_vasprintf()` для устранения предупреждений статических анализаторов. - - --------------------------------------------------------------------------------- - - -## v0.12.4 "Арта-333" от 2023-03-03 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок, устранением -недочетов и технических долгов. Ветка 0.12 считается готовой к -продуктовому использованию, получает статус стабильной и далее будет -получать только исправление ошибок. Разработка будет продолжена в ветке -0.13, а ветка 0.11 становится архивной. - -``` -63 files changed, 1161 insertions(+), 569 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - Max за сообщение о проблеме ERROR_SHARING_VIOLATION - в режиме MDBX_EXCLUSIVE на Windows. - - Alisher Ashyrov за сообщение о проблеме - с assert-проверкой и содействие в отладке. - - Masatoshi Fukunaga за сообщение о проблеме - `put(MDBX_UPSERT+MDBX_ALLDUPS)` для случая замены всех значений в subDb. - -Исправления: - - - Устранен регресс после коммита 474391c83c5f81def6fdf3b0b6f5716a87b78fbf, - приводящий к возврату ERROR_SHARING_VIOLATION в Windows при открытии БД - в режиме MDBX_EXCLUSIVE для чтения-записи. - - - Добавлено ограничение размера отображения при коротком read-only файле, для - предотвращения ошибки ERROR_NOT_ENOUGH_MEMORY в Windows, которая возникает - в этом случае и совсем не информативна для пользователя. - - - Произведен рефакторинг `dxb_resize()`, в том числе, для устранения срабатывания - assert-проверки `size_bytes == env->me_dxb_mmap.current` в специфических - многопоточных сценариях использования. Проверка срабатывала только в - отладочных сборках, при специфическом наложении во времени читающей и - пишущей транзакции в разных потоках, одновременно с изменением размера БД. - Кроме срабатывание проверки, каких-либо других последствий не возникало. - - - Устранена проблема в `put(MDBX_UPSERT+MDBX_ALLDUPS)` для случая замены - всех значений единственного ключа в subDb. В ходе этой операции subDb - становится полностью пустой, без каких-либо страниц и именно эта - ситуация не была учтена в коде, что приводило к повреждению БД - при фиксации такой транзакции. - - - Устранена излишняя assert-проверка внутри `override_meta()`. - Что в отладочных сборках могло приводить к ложным срабатываниям - при восстановлении БД, в том числе при автоматическом откате слабых - мета-страниц. - - - Скорректированы макросы `__cold`/`__hot`, в том числе для устранения проблемы - `error: inlining failed in call to ‘always_inline FOO(...)’: target specific option mismatch` - при сборке посредством GCC >10.x для SH4. - -Ликвидация технических долгов и мелочи: - - - Исправлены многочисленные опечатки в документации. - - Доработан тест для полной стохастической проверки `MDBX_EKEYMISMATCH` в режиме `MDBX_APPEND`. - - Расширены сценарии запуска `mdbx_chk` из CMake-тестов для проверки как в обычном, - так и эксклюзивном режимах чтения-записи. - - Уточнены спецификаторы `const` и `noexcept` для нескольких методов в C++ API. - - Устранено использование стека под буферы для `wchar`-преобразования путей. - - Для Windows добавлена функция `mdbx_env_get_path()` для получения пути к БД - в формате многобайтных символов. - - Добавлены doxygen-описания для API с широкими символами. - - Устранены предупреждения статического анализатора MSVC, - все они были несущественные, либо ложные. - - Устранено ложное предупреждение GCC при сборке для SH4. - - Добавлена поддержка ASAN (Address Sanitizer) при сборке посредством MSVC. - - Расширен набор перебираемых режимов в скрипте `test/long_stochastic.sh`, - добавлена опция `--extra`. - - В C++ API добавлена поддержка расширенных опций времени выполнения `mdbx::extra_runtime_option`, - аналогично `enum MDBX_option_t` из C API. - - Вывод всех счетчиков page-operations в `mdbx_stat`. - - --------------------------------------------------------------------------------- - - -## v0.12.3 "Акула" от 2023-01-07 - -Выпуск с существенными доработками и новой функциональностью в память о закрытом open-source -[проекте "Акула"](https://erigon.substack.com/p/winding-down-support-for-akula-project). - -Добавлена prefault-запись, переделан контроль “некогерентности” unified page/buffer cache, изменена тактика слияния страниц и т.д. -Стало ещё быстрее, в некоторых сценариях вдвое. - -``` -20 files changed, 4508 insertions(+), 2928 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - [Alex Sharov](https://t.me/AskAlexSharov) и команде [Erigon](https://github.com/ledgerwatch/erigon) за тестирование. - - [Simon Leier](https://t.me/leisim) за сообщение о сбоях и тестирование. - -Новое: - - - Использование адреса [https://libmdbx.dqdkfa.ru/dead-github](https://libmdbx.dqdkfa.ru/dead-github) - для отсылки к сохранённым в web.archive.org копиям ресурсов, уничтоженных администрацией Github. - - - Реализована prefault-запись при выделении страниц для read-write отображений. - Это приводит к кратному снижению системных издержек и существенному увеличению - производительности в соответствующих сценариях использования, когда: - - размер БД и объём данных существенно больше ОЗУ; - - используется режим `MDBX_WRITEMAP`; - - не-мелкие транзакции (по ходу транзакции выделяется многие сотни или тысячи страниц). - - В режиме `MDBX_WRITEMAP` выделение/переиспользование страниц приводит - к page-fault и чтению страницы с диска, даже если содержимое страницы - не нужно (будет перезаписано). Это является следствием работы подсистемы - виртуальной памяти, а штатный способ лечения через `MADV_REMOVE` - работает не на всех ФС и обычно дороже получаемой экономии. - - Теперь в libmdbx используется "упреждающая запись" таких страниц, - которая на системах с [unified page cache](https://www.opennet.ru/base/dev/ubc.txt.html) - приводит к "вталкиванию" данных, устраняя необходимость чтения с диска при - обращении к такой странице памяти. - - Новый функционал работает в согласованности с автоматическим управлением read-ahead - и кэшем статуса присутствия страниц в ОЗУ, посредством [mincore()](https://man7.org/linux/man-pages/man2/mincore.2.html). - - - Добавлена опция `MDBX_opt_prefault_write_enable` для возможности принудительного - включения/выключения prefault-записи. - - - Реализован динамический выбор между сквозной записью на диск и обычной записью - с последующим [fdatasync()](https://man7.org/linux/man-pages/man3/fdatasync.3p.html) - управляемый опцией `MDBX_opt_writethrough_threshold`. - - В долговечных (durable) режимах данные на диск могут быть сброшены двумя способами: - - сквозной записью через файловый дескриптор открытый с `O_DSYNC`; - - обычной записью с последующим вызовом `fdatasync()`. - - Первый способ выгоднее при записи малого количества страниц и/или если - канал взаимодействия с диском/носителем имеет близкую к нулю задержку. - Второй способ выгоднее если требуется записать много страниц и/или канал - взаимодействия имеет весомую задержку (датацентры, облака). Добавленная - опция `MDBX_opt_writethrough_threshold` позволяет во время выполнения - задать порог для динамического выбора способа записи в зависимости от - объема и конкретных условия использования. - - - Автоматическая установка `MDBX_opt_rp_augment_limit` в зависимости от размера БД. - - - Запрещение разного режима `MDBX_WRITEMAP` между процессами в режимах - с отложенной/ленивой записью, так как в этом случае невозможно - обеспечить сброс данных на диск во всех случаях на всех поддерживаемых платформах. - - - Добавлена опция сборки `MDBX_MMAP_USE_MS_ASYNC` позволяющая отключить - использование системного вызова `msync(MS_ASYNC)`, в использовании - которого нет необходимости на подавляющем большинстве актуальных ОС. - По-умолчанию `MDBX_MMAP_USE_MS_ASYNC=0` (выключено) на Linux и других - системах с unified page cache. Такое поведение (без использования - `msync(MS_ASYNC)`) соответствует неизменяемой (hardcoded) логике LMDB. В - результате, в простых/наивных бенчмарках, libmdbx опережает LMDB - примерно также как при реальном применении. - - На всякий случай стоит еще раз отметить/напомнить, что на Windows - предположительно libmdbx будет отставать от LMDB в сценариях с - множеством мелких транзакций, так как libmdbx осознанно использует на - Windows файловые блокировки, которые медленные (плохо реализованы в ядре - ОС), но позволяют застраховать пользователей от массы неверных действий - приводящих к повреждению БД. - - - Поддержка не-печатных имен для subDb. - - - Добавлен явный выбор `tls_model("local-dynamic")` для обхода проблемы - `relocation R_X86_64_TPOFF32 against FOO cannot be used with -shared` - из-за ошибки в CLANG приводящей к использованию неверного режима `ls_model`. - - - Изменение тактики слияния страниц при удалении. - Теперь слияние выполняется преимущественно с уже измененной/грязной страницей. - Если же справа и слева обе страницы с одинаковым статусом, - то с наименее заполненной, как прежде. В сценариях с массивным удалением - это позволяет увеличить производительность до 50%. - - - Добавлен контроль отсутствия LCK-файлов с альтернативным именованием. - -Исправления (без корректировок новых функций): - - - Изменение размера отображения если это требуется для сброса данных на - диск при вызове `mdbx_env_sync()` из параллельного потока выполнения вне - работающей транзакции. - - - Исправление регресса после коммита db72763de049d6e4546f838277fe83b9081ad1de от 2022-10-08 - в логике возврата грязных страниц в режиме `MDBX_WRITEMAP`, из-за чего - освободившиеся страницы использовались не немедленно, а попадали в - retired-список совершаемой транзакции и происходил необоснованный рост - размера транзакции. - - - Устранение SIGSEGV или ошибочного вызова `free()` в ситуациях - повторного открытия среды посредством `mdbx_env_open()`. - - - Устранение ошибки совершенной в коммите fe20de136c22ed3bc4c6d3f673e79c106e824f60 от 2022-09-18, - в результате чего на Linux в режиме `MDBX_WRITEMAP` никогда не вызывался `msync()`. - Проблема существует только в релизе 0.12.2. - - - Добавление подсчета грязных страниц в `MDBX_WRITEMAP` для предоставления посредством `mdbx_txn_info()` - актуальной информации об объеме изменений в процессе транзакций чтения-записи. - - - Исправление несущественной опечатки в условиях `#if` определения порядка байт. - - - Исправление сборки для случая `MDBX_PNL_ASCENDING=1`. - -Ликвидация технических долгов и мелочи: - - - Доработка поддержки авто-слияния записей GC внутри `page_alloc_slowpath()`. - - Устранение несущественных предупреждений Coverity. - - Использование единого курсора для поиска в GC. - - Переработка внутренних флагов связанных с выделением страниц из GC. - - Доработка подготовки резерва перед обновлением GC при включенном BigFoot. - - Оптимизация `pnl_merge()` для случаев неперекрывающихся объединяемых списков. - - Оптимизация поддержки отсортированного списка страниц в `dpl_append()`. - - Ускорение работы `mdbx_chk` при обработке пользовательских записей в `@MAIN`. - - Переработка LRU-отметок для спиллинга. - - Переработка контроля "некогерентности" Unified page cache для уменьшения накладных расходов. - - Рефакторинг и микрооптимизация. - - --------------------------------------------------------------------------------- - - -## v0.12.2 "Иван Ярыгин" от 2022-11-11 - -Выпуск с существенными доработками и новой функциональностью -в память о российском борце [Иване Сергеевиче Ярыгине](https://ru.wikipedia.org/wiki/Ярыгин,_Иван_Сергеевич). - -На Олимпийских играх в Мюнхене в 1972 году Иван Ярыгин уложил всех соперников на лопатки, -суммарно затратив менее 9 минут. Этот рекорд никем не побит до сих пор. - -``` -64 files changed, 5573 insertions(+), 2510 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Новое: - - - Поддержка всех основных опций при сборке посредством CMake. - - - Требования к CMake понижены до версии 3.0.2 для возможности сборки для устаревших платформ. - - - Добавлена возможность профилирования работы GC в сложных и/или нагруженных - сценариях (например Ethereum/Erigon). По-умолчанию соответствующий код отключен, - а для его активации необходимо указать опцию сборки `MDBX_ENABLE_PROFGC=1`. - - - Добавлена функция `mdbx_env_warmup()` для "прогрева" БД с возможностью - закрепления страниц в памяти. - В утилиты `mdbx_chk`, `mdbx_copy` и `mdbx_dump` добавлены опции `-u` и `-U` - для активации соответствующего функционала. - - - Отключение учета «грязных» страниц в не требующих этого режимах - (`MDBX_WRITEMAP` при `MDBX_AVOID_MSYNC=0`). Доработка позволяет снизить - накладные расходы и была запланирована давно, но откладывалась так как - требовала других изменений. - - - Вытеснение из памяти (спиллинг) «грязных» страниц с учетом размера - large/overflow-страниц. Доработка позволяет корректно соблюдать политику - задаваемую опциями `MDBX_opt_txn_dp_limit`, - `MDBX_opt_spill_max_denominator`, `MDBX_opt_spill_min_denominator` и - была запланирована давно, но откладывалась так как требовала других - изменений. - - - Для Windows в API добавлены UNICODE-зависимые определения макросов - `MDBX_DATANAME`, `MDBX_LOCKNAME` и `MDBX_LOCK_SUFFIX`. - - - Переход на преимущественное использование типа `size_t` для - уменьшения накладных расходов на платформе Эльбрус. - - - В API добавлены функции `mdbx_limits_valsize4page_max()` и - `mdbx_env_get_valsize4page_max()` возвращающие максимальный размер в - байтах значения, которое может быть размещена в одной - large/overflow-странице, а не последовательности из двух или более таких - страниц. Для таблиц с поддержкой дубликатов вынос значений на - large/overflow-страницы не поддерживается, поэтому результат совпадает с - `mdbx_limits_valsize_max()`. - - - В API добавлены функции `mdbx_limits_pairsize4page_max()`и - `mdbx_env_get_pairsize4page_max()` возвращающие в байтах максимальный - суммарный размер пары ключ-значение для их размещения на одной листовой - страницы, без выноса значения на отдельную large/overflow-страницу. Для - таблиц с поддержкой дубликатов вынос значений на large/overflow-страницы - не поддерживается, поэтому результат определяет максимальный/допустимый - суммарный размер пары ключ-значение. - - - Реализовано использование асинхронной (overlapped) записи в Windows, - включая использования небуфферизированного ввода-вывода и `WriteGather()`. - Это позволяет сократить накладные расходы и частично обойти проблемы - Windows с низкой производительностью ввода-вывода, включая большие - задержки `FlushFileBuffers()`. Новый код также обеспечивает консолидацию - записываемых регионов на всех платформах, а на Windows использование - событий (events) сведено к минимум, одновременно с автоматических - использованием `WriteGather()`. Поэтому ожидается существенное снижение - накладных расходов взаимодействия с ОС, а в Windows это ускорение, в - некоторых сценариях, может быть кратным в сравнении с LMDB. - - - Добавлена опция сборки `MDBX_AVOID_MSYNC`, которая определяет - поведение libmdbx в режиме `MDBX_WRITE_MAP` (когда данные изменяются - непосредственно в отображенных в ОЗУ страницах БД): - - * Если `MDBX_AVOID_MSYNC=0` (по умолчанию на всех системах кроме Windows), - то (как прежде) сохранение данных выполняется посредством `msync()`, - либо `FlushViewOfFile()` на Windows. На платформах с полноценной - подсистемой виртуальной памяти и адекватным файловым вводом-выводом - это обеспечивает минимум накладных расходов (один системный вызов) - и максимальную производительность. Однако, на Windows приводит - к значительной деградации, в том числе из-за того что после - `FlushViewOfFile()` требуется также вызов `FlushFileBuffers()` - с массой проблем и суеты внутри ядра ОС. - - * Если `MDBX_AVOID_MSYNC=1` (по умолчанию только на Windows), то - сохранение данных выполняется явной записью в файл каждой измененной - страницы БД. Это требует дополнительных накладных расходов, как - на отслеживание измененных страниц (ведение списков "грязных" - страниц), так и на системные вызовы для их записи. - Кроме этого, с точки зрения подсистемы виртуальной памяти ядра ОС, - страницы БД измененные в ОЗУ и явно записанные в файл, могут либо - оставаться "грязными" и быть повторно записаны ядром ОС позже, - либо требовать дополнительных накладных расходов для отслеживания - PTE (Page Table Entries), их модификации и дополнительного копирования - данных. Тем не менее, по имеющейся информации, на Windows такой путь - записи данных в целом обеспечивает более высокую производительность. - - - Улучшение эвристики включения авто-слияния записей GC. - - - Изменение формата LCK и семантики некоторых внутренних полей. Версии - libmdbx использующие разный формат не смогут работать с одной БД - одновременно, а только поочередно (LCK-файл переписывается при открытии - первым открывающим БД процессом). - - - В `C++` API добавлены методы фиксации транзакции с получением информации - о задержках. - - - Added `MDBX_HAVE_BUILT IN_CPU_SUPPORTS` build option to control use GCC's - `__builtin_cpu_supports()` function, which could be unavailable on a fake - OSes (macos, ios, android, etc). - -Исправления (без корректировок вышеперечисленных новых функций): - - - Устранения ряда предупреждений при сборке посредством MinGW. - - Устранение ложно-положительных сообщений от Valgrind об использовании - не инициализированных данных из-за выравнивающих зазоров в `struct troika`. - - Исправлен возврат неожиданной ошибки `MDBX_BUSY` из функций `mdbx_env_set_option()`, - `mdbx_env_set_syncbytes()` и `mdbx_env_set_syncperiod()`. - - Небольшие исправления для совместимости с CMake 3.8 - - Больше контроля и осторожности (паранойи) для страховки от дефектов `mremap()`. - - Костыль для починки сборки со старыми версиями `stdatomic.h` из GNU Lib C, - где макросы `ATOMIC_*_LOCK_FREE` ошибочно переопределяются через функции. - - Использование `fcntl64(F_GETLK64/F_SETLK64/F_SETLKW64)` при наличии. - Это решает проблему срабатывания проверочного утверждения при сборке для - платформ где тип `off_t` шире соответствующих полей `структуры flock`, - используемой для блокировки файлов. - - Доработан сбор информации о задержках при фиксации транзакций: - * Устранено искажение замеров длительности обновления GC - при включении отладочного внутреннего аудита; - * Защита от undeflow-нуля только общей задержки в метриках, - чтобы исключить ситуации, когда сумма отдельных стадий - больше общей длительности. - - Ряд исправлений для устранения срабатываний проверочных утверждения в - отладочных сборках. - - Более осторожное преобразование к типу `mdbx_tid_t` для устранения - предупреждений. - - Исправление лишнего сброса данных на диск в режиме `MDBX_SAFE_NOSYNC` - при обновлении GC. - - Fixed an extra check for `MDBX_APPENDDUP` inside `mdbx_cursor_put()` - which could result in returning `MDBX_EKEYMISMATCH` for valid cases. - - Fixed nasty `clz()` bug (by using `_BitScanReverse()`, only MSVC builds affected). - -Мелочи: - - - Исторические ссылки cвязанные с удалённым на ~~github~~ проектом перенаправлены на [web.archive.org](https://web.archive.org/web/https://github.com/erthink/libmdbx). - - Синхронизированы конструкции CMake между проектами. - - Добавлено предупреждение о небезопасности RISC-V. - - Добавлено описание параметров `MDBX_debug_func` и `MDBX_debug_func`. - - Добавлено обходное решение для минимизации ложно-положительных - конфликтов при использовании файловых блокировок в Windows. - - Проверка атомарности C11-операций c 32/64-битными данными. - - Уменьшение в 42 раза значения по-умолчанию для `me_options.dp_limit` - в отладочных сборках. - - Добавление платформы `gcc-riscv64-linux-gnu` в список для цели `cross-gcc`. - - Небольшие правки скрипта `long_stochastic.sh` для работы в Windows. - - Удаление ненужного вызова `LockFileEx()` внутри `mdbx_env_copy()`. - - Добавлено описание использования файловых дескрипторов в различных режимах. - - Добавлено использование `_CrtDbgReport()` в отладочных сборках. - - Fixed an extra ensure/assertion check of `oldest_reader` inside `txn_end()`. - - Removed description of deprecated usage of `MDBX_NODUPDATA`. - - Fixed regression ASAN/Valgring-enabled builds. - - Fixed minor MingGW warning. - - --------------------------------------------------------------------------------- - - -## v0.12.1 "Positive Proxima" at 2022-08-24 - -The planned frontward release with new superior features on the day of 20 anniversary of [Positive Technologies](https://ptsecurty.com). - -``` -37 files changed, 7604 insertions(+), 7417 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -New: - - - The `Big Foot` feature which significantly reduces GC overhead for processing large lists of retired pages from huge transactions. - Now _libmdbx_ avoid creating large chunks of PNLs (page number lists) which required a long sequences of free pages, aka large/overflow pages. - Thus avoiding searching, allocating and storing such sequences inside GC. - - Improved hot/online validation and checking of database pages both for more robustness and performance. - - New solid and fast method to latch meta-pages called `Troika`. - The minimum of memory barriers, reads, comparisons and conditional transitions are used. - - New `MDBX_VALIDATION` environment options to extra validation of DB structure and pages content for carefully/safe handling damaged or untrusted DB. - - Accelerated ×16/×8/×4 by AVX512/AVX2/SSE2/Neon implementations of search page sequences. - - Added the `gcrtime_seconds16dot16` counter to the "Page Operation Statistics" that accumulates time spent for GC searching and reclaiming. - - Copy-with-compactification now clears/zeroes unused gaps inside database pages. - - The `C` and `C++` APIs has been extended and/or refined to simplify using `wchar_t` pathnames. - On Windows the `mdbx_env_openW()`, ``mdbx_env_get_pathW()`()`, `mdbx_env_copyW()`, `mdbx_env_open_for_recoveryW()` are available for now, - but the `mdbx_env_get_path()` has been replaced in favor of `mdbx_env_get_pathW()`. - - Added explicit error message for Buildroot's Microblaze toolchain maintainers. - - Added `MDBX_MANAGE_BUILD_FLAGS` build options for CMake. - - Speed-up internal `bsearch`/`lower_bound` implementation using branchless tactic, including workaround for CLANG x86 optimiser bug. - - A lot internal refinement and micro-optimisations. - - Internally counted volume of dirty pages (unused for now but for coming features). - -Fixes: - - - Never use modern `__cxa_thread_atexit()` on Apple's OSes. - - Don't check owner for finished transactions. - - Fixed typo in `MDBX_EINVAL` which breaks MingGW builds with CLANG. - - -## v0.12.0 at 2022-06-19 - -Not a release but preparation for changing feature set and API. - - -******************************************************************************** - - -## v0.11.14 "Sergey Kapitsa" at 2023-02-14 - -The stable bugfix release in memory of [Sergey Kapitsa](https://en.wikipedia.org/wiki/Sergey_Kapitsa) on his 95th birthday. - -``` -22 files changed, 250 insertions(+), 174 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - backport: Fixed insignificant typo of `||` inside `#if` byte-order condition. - - backport: Fixed `SIGSEGV` or an erroneous call to `free()` in situations where - errors occur when reopening by `mdbx_env_open()` of a previously used - environment. - - backport: Fixed `cursor_put_nochecklen()` internals for case when dupsort'ed named subDb - contains a single key with multiple values (aka duplicates), which are replaced - with a single value by put-operation with the `MDBX_UPSERT+MDBX_ALLDUPS` flags. - In this case, the database becomes completely empty, without any pages. - However exactly this condition was not considered and thus wasn't handled correctly. - See [issue#8](https://gitflic.ru/project/erthink/libmdbx/issue/8) for more information. - - backport: Fixed extra assertion inside `override_meta()`, which could - lead to false-positive failing of the assertion in a debug builds during - DB recovery and auto-rollback. - - backport: Refined the `__cold`/`__hot` macros to avoid the - `error: inlining failed in call to ‘always_inline FOO(...)’: target specific option mismatch` - issue during build using GCC >10.x for SH4 arch. - -Minors: - - - backport: Using the https://libmdbx.dqdkfa.ru/dead-github - for resources deleted by the Github' administration. - - backport: Fixed English typos. - - backport: Fixed proto of `__asan_default_options()`. - - backport: Fixed doxygen-description of C++ API, especially of C++20 concepts. - - backport: Refined `const` and `noexcept` for few C++ API methods. - - backport: Fixed copy&paste typo of "Getting started". - - backport: Update MithrilDB status. - - backport: Resolve false-posirive `used uninitialized` warning from GCC >10.x - while build for SH4 arch. - - --------------------------------------------------------------------------------- - - -## v0.11.13 at "Swashplate" 2022-11-10 - -The stable bugfix release in memory of [Boris Yuryev](https://ru.wikipedia.org/wiki/Юрьев,_Борис_Николаевич) on his 133rd birthday. - -``` -30 files changed, 405 insertions(+), 136 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed builds with older libc versions after using `fcntl64()` (backport). - - Fixed builds with older `stdatomic.h` versions, - where the `ATOMIC_*_LOCK_FREE` macros mistakenly redefined using functions (backport). - - Added workaround for `mremap()` defect to avoid assertion failure (backport). - - Workaround for `encryptfs` bug(s) in the `copy_file_range` implementation (backport). - - Fixed unexpected `MDBX_BUSY` from `mdbx_env_set_option()`, `mdbx_env_set_syncbytes()` - and `mdbx_env_set_syncperiod()` (backport). - - CMake requirements lowered to version 3.0.2 (backport). - -Minors: - - - Minor clarification output of `--help` for `mdbx_test` (backport). - - Added admonition of insecure for RISC-V (backport). - - Stochastic scripts and CMake files synchronized with the `devel` branch. - - Use `--dont-check-ram-size` for small-tests make-targets (backport). - - --------------------------------------------------------------------------------- - - -## v0.11.12 "Эребуни" at 2022-10-12 - -The stable bugfix release. - -``` -11 files changed, 96 insertions(+), 49 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed static assertion failure on platforms where the `off_t` type is wider - than corresponding fields of `struct flock` used for file locking (backport). - Now _libmdbx_ will use `fcntl64(F_GETLK64/F_SETLK64/F_SETLKW64)` if available. - - Fixed assertion check inside `page_retire_ex()` (backport). - -Minors: - - - Fixed `-Wint-to-pointer-cast` warnings while casting to `mdbx_tid_t` (backport). - - Removed needless `LockFileEx()` inside `mdbx_env_copy()` (backport). - - --------------------------------------------------------------------------------- - - -## v0.11.11 "Тендра-1790" at 2022-09-11 - -The stable bugfix release. - -``` -10 files changed, 38 insertions(+), 21 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed an extra check for `MDBX_APPENDDUP` inside `mdbx_cursor_put()` which could result in returning `MDBX_EKEYMISMATCH` for valid cases. - - Fixed an extra ensure/assertion check of `oldest_reader` inside `mdbx_txn_end()`. - - Fixed derived C++ builds by removing `MDBX_INTERNAL_FUNC` for `mdbx_w2mb()` and `mdbx_mb2w()`. - - --------------------------------------------------------------------------------- - - -## v0.11.10 "the TriColor" at 2022-08-22 - -The stable bugfix release. - -``` -14 files changed, 263 insertions(+), 252 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -New: - - - The C++ API has been refined to simplify support for `wchar_t` in path names. - - Added explicit error message for Buildroot's Microblaze toolchain maintainers. - -Fixes: - - - Never use modern `__cxa_thread_atexit()` on Apple's OSes. - - Use `MultiByteToWideChar(CP_THREAD_ACP)` instead of `mbstowcs()`. - - Don't check owner for finished transactions. - - Fixed typo in `MDBX_EINVAL` which breaks MingGW builds with CLANG. - -Minors: - - - Fixed variable name typo. - - Using `ldd` to check used dso. - - Added `MDBX_WEAK_IMPORT_ATTRIBUTE` macro. - - Use current transaction geometry for untouched parameters when `env_set_geometry()` called within a write transaction. - - Minor clarified `iov_page()` failure case. - - --------------------------------------------------------------------------------- - - -## v0.11.9 "Чирчик-1992" at 2022-08-02 - -The stable bugfix release. - -``` -18 files changed, 318 insertions(+), 178 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) and Erigon team for reporting and testing. - - [Andrew Ashikhmin](https://gitflic.ru/user/yperbasis) for contributing. - -New: - - - Ability to customise `MDBX_LOCK_SUFFIX`, `MDBX_DATANAME`, `MDBX_LOCKNAME` just by predefine ones during build. - - Added to [`mdbx::env_managed`](https://libmdbx.dqdkfa.ru/group__cxx__api.html#classmdbx_1_1env__managed)'s methods a few overloads with `const char* pathname` parameter (C++ API). - -Fixes: - - - Fixed hang copy-with-compactification of a corrupted DB - or in case the volume of output pages is a multiple of `MDBX_ENVCOPY_WRITEBUF`. - - Fixed standalone non-CMake build on MacOS (`#include AvailabilityMacros.h>`). - - Fixed unexpected `MDBX_PAGE_FULL` error in rare cases with large database page sizes. - -Minors: - - - Minor fixes Doxygen references, comments, descriptions, etc. - - Fixed copy&paste typo inside `meta_checktxnid()`. - - Minor fix `meta_checktxnid()` to avoid assertion in debug mode. - - Minor fix `mdbx_env_set_geometry()` to avoid returning `EINVAL` in particular rare cases. - - Minor refine/fix batch-get testcase for large page size. - - Added `--pagesize NN` option to long-stotastic test script. - - Updated Valgrind-suppressions file for modern GCC. - - Fixed `has no symbols` warning from Apple's ranlib. - - --------------------------------------------------------------------------------- - - -## v0.11.8 "Baked Apple" at 2022-06-12 - -The stable release with an important fixes and workaround for the critical macOS thread-local-storage issue. - -Acknowledgments: - - - [Masatoshi Fukunaga](https://github.com/mah0x211) for [Lua bindings](https://github.com/mah0x211/lua-libmdbx). - -New: - - - Added most of transactions flags to the public API. - - Added `MDBX_NOSUCCESS_EMPTY_COMMIT` build option to return non-success result (`MDBX_RESULT_TRUE`) on empty commit. - - Reworked validation and import of DBI-handles into a transaction. - Assumes these changes will be invisible to most users, but will cause fewer surprises in complex DBI cases. - - Added ability to open DB in without-LCK (exclusive read-only) mode in case no permissions to create/write LCK-file. - -Fixes: - - - A series of fixes and improvements for automatically generated documentation (Doxygen). - - Fixed copy&paste bug with could lead to `SIGSEGV` (nullptr dereference) in the exclusive/no-lck mode. - - Fixed minor warnings from modern Apple's CLANG 13. - - Fixed minor warnings from CLANG 14 and in-development CLANG 15. - - Fixed `SIGSEGV` regression in without-LCK (exclusive read-only) mode. - - Fixed `mdbx_check_fs_local()` for CDROM case on Windows. - - Fixed nasty typo of typename which caused false `MDBX_CORRUPTED` error in a rare execution path, - when the size of the thread-ID type not equal to 8. - - Fixed Elbrus/E2K LCC 1.26 compiler warnings (memory model for atomic operations, etc). - - Fixed write-after-free memory corruption on latest `macOS` during finalization/cleanup of thread(s) that executed read transaction(s). - > The issue was suddenly discovered by a [CI](https://en.wikipedia.org/wiki/Continuous_integration) - > after adding an iteration with macOS 11 "Big Sur", and then reproduced on recent release of macOS 12 "Monterey". - > The issue was never noticed nor reported on macOS 10 "Catalina" nor others. - > Analysis shown that the problem caused by a change in the behavior of the system library (internals of dyld and pthread) - > during thread finalization/cleanup: now a memory allocated for a `__thread` variable(s) is released - > before execution of the registered Thread-Local-Storage destructor(s), - > thus a TLS-destructor will write-after-free just by legitime dereference any `__thread` variable. - > This is unexpected crazy-like behavior since the order of resources releasing/destroying - > is not the reverse of ones acquiring/construction order. Nonetheless such surprise - > is now workarounded by using atomic compare-and-swap operations on a 64-bit signatures/cookies. - -Minors: - - - Refined `release-assets` GNU Make target. - - Added logging to `mdbx_fetch_sdb()` to help debugging complex DBI-handels use cases. - - Added explicit error message from probe of no-support for `std::filesystem`. - - Added contributors "score" table by `git fame` to generated docs. - - Added `mdbx_assert_fail()` to public API (mostly for backtracing). - - Now C++20 concepts used/enabled only when `__cpp_lib_concepts >= 202002`. - - Don't provide nor report package information if used as a CMake subproject. - - --------------------------------------------------------------------------------- - - -## v0.11.7 "Resurrected Sarmat" at 2022-04-22 - -The stable risen release after the Github's intentional malicious disaster. - -#### We have migrated to a reliable trusted infrastructure -The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -since on 2022-04-15 the Github administration, without any warning nor -explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. -For the same reason ~~Github~~ is blacklisted forever. - -GitFlic already support Russian and English languages, plan to support more, -including 和 中文. You are welcome! - -New: - - - Added the `tools-static` make target to build statically linked MDBX tools. - - Support for Microsoft Visual Studio 2022. - - Support build by MinGW' make from command line without CMake. - - Added `mdbx::filesystem` C++ API namespace that corresponds to `std::filesystem` or `std::experimental::filesystem`. - - Created [website](https://libmdbx.dqdkfa.ru/) for online auto-generated documentation. - - Used `https://web.archive.org/web/https://github.com/erthink/libmdbx` for dead (or temporarily lost) resources deleted by ~~Github~~. - - Added `--loglevel=` command-line option to the `mdbx_test` tool. - - Added few fast smoke-like tests into CMake builds. - -Fixes: - - - Fixed a race between starting a transaction and creating a DBI descriptor that could lead to `SIGSEGV` in the cursor tracking code. - - Clarified description of `MDBX_EPERM` error returned from `mdbx_env_set_geometry()`. - - Fixed non-promoting the parent transaction to be dirty in case the undo of the geometry update failed during abortion of a nested transaction. - - Resolved linking issues with `libstdc++fs`/`libc++fs`/`libc++experimental` for C++ `std::filesystem` or `std::experimental::filesystem` for legacy compilers. - - Added workaround for GNU Make 3.81 and earlier. - - Added workaround for Elbrus/LCC 1.25 compiler bug of class inline `static constexpr` member field. - - [Fixed](https://github.com/ledgerwatch/erigon/issues/3874) minor assertion regression (only debug builds were affected). - - Fixed detection of `C++20` concepts accessibility. - - Fixed detection of Clang's LTO availability for Android. - - Fixed extra definition of `_FILE_OFFSET_BITS=64` for Android that is problematic for 32-bit Bionic. - - Fixed build for ARM/ARM64 by MSVC. - - Fixed non-x86 Windows builds with `MDBX_WITHOUT_MSVC_CRT=ON` and `MDBX_BUILD_SHARED_LIBRARY=ON`. - -Minors: - - - Resolve minor MSVC warnings: avoid `/INCREMENTAL[:YES]` with `/LTCG`, `/W4` with `/W3`, the `C5105` warning. - - Switched to using `MDBX_EPERM` instead of `MDBX_RESULT_TRUE` to indicate that the geometry cannot be updated. - - Added `NULL` checking during memory allocation inside `mdbx_chk`. - - Resolved all warnings from MinGW while used without CMake. - - Added inheritable `target_include_directories()` to `CMakeLists.txt` for easy integration. - - Added build-time checks and paranoid runtime assertions for the `off_t` arguments of `fcntl()` which are used for locking. - - Added `-Wno-lto-type-mismatch` to avoid false-positive warnings from old GCC during LTO-enabled builds. - - Added checking for TID (system thread id) to avoid hang on 32-bit Bionic/Android within `pthread_mutex_lock()`. - - Reworked `MDBX_BUILD_TARGET` of CMake builds. - - Added `CMAKE_HOST_ARCH` and `CMAKE_HOST_CAN_RUN_EXECUTABLES_BUILT_FOR_TARGET`. - - --------------------------------------------------------------------------------- - - -## v0.11.6 at 2022-03-24 - -The stable release with the complete workaround for an incoherence flaw of Linux unified page/buffer cache. -Nonetheless the cause for this trouble may be an issue of Intel CPU cache/MESI. -See [issue#269](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for more information. - -Acknowledgments: - - - [David Bouyssié](https://github.com/david-bouyssie) for [Scala bindings](https://github.com/david-bouyssie/mdbx4s). - - [Michelangelo Riccobene](https://github.com/mriccobene) for reporting and testing. - -Fixes: - - - [Added complete workaround](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for an incoherence flaw of Linux unified page/buffer cache. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/272) cursor reusing for read-only transactions. - - Fixed copy&paste typo inside `mdbx::cursor::find_multivalue()`. - -Minors: - - - Minor refine C++ API for convenience. - - Minor internals refines. - - Added `lib-static` and `lib-shared` targets for make. - - Added minor workaround for AppleClang 13.3 bug. - - Clarified error messages of a signature/version mismatch. - - --------------------------------------------------------------------------------- - - -## v0.11.5 at 2022-02-23 - -The release with the temporary hotfix for a flaw of Linux unified page/buffer cache. -See [issue#269](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for more information. - -Acknowledgments: - - - [Simon Leier](https://github.com/leisim) for reporting and testing. - - [Kai Wetlesen](https://github.com/kaiwetlesen) for [RPMs](http://copr.fedorainfracloud.org/coprs/kwetlesen/libmdbx/). - - [Tullio Canepa](https://github.com/canepat) for reporting C++ API issue and contributing. - -Fixes: - - - [Added hotfix](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for a flaw of Linux unified page/buffer cache. - - [Fixed/Reworked](https://libmdbx.dqdkfa.ru/dead-github/pull/270) move-assignment operators for "managed" classes of C++ API. - - Fixed potential `SIGSEGV` while open DB with overrided non-default page size. - - [Made](https://libmdbx.dqdkfa.ru/dead-github/issues/267) `mdbx_env_open()` idempotence in failure cases. - - Refined/Fixed pages reservation inside `mdbx_update_gc()` to avoid non-reclamation in a rare cases. - - Fixed typo in a retained space calculation for the hsr-callback. - -Minors: - - - Reworked functions for meta-pages, split-off non-volatile. - - Disentangled C11-atomic fences/barriers and pure-functions (with `__attribute__((__pure__))`) to avoid compiler misoptimization. - - Fixed hypotetic unaligned access to 64-bit dwords on ARM with `__ARM_FEATURE_UNALIGNED` defined. - - Reasonable paranoia that makes clarity for code readers. - - Minor fixes Doxygen references, comments, descriptions, etc. - - --------------------------------------------------------------------------------- - - -## v0.11.4 at 2022-02-02 - -The stable release with fixes for large and huge databases sized of 4..128 TiB. - -Acknowledgments: - - - [Ledgerwatch](https://github.com/ledgerwatch), [Binance](https://github.com/binance-chain) and [Positive Technologies](https://www.ptsecurity.com/) teams for reporting, assistance in investigation and testing. - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation. - - [Kris Zyp](https://github.com/kriszyp) for [Deno](https://deno.land/) support. - -New features, extensions and improvements: - - - Added treating the `UINT64_MAX` value as maximum for given option inside `mdbx_env_set_option()`. - - Added `to_hex/to_base58/to_base64::output(std::ostream&)` overloads without using temporary string objects as buffers. - - Added `--geometry-jitter=YES|no` option to the test framework. - - Added support for [Deno](https://deno.land/) support by [Kris Zyp](https://github.com/kriszyp). - -Fixes: - - - Fixed handling `MDBX_opt_rp_augment_limit` for GC's records from huge transactions (Erigon/Akula/Ethereum). - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/258) build on Android (avoid including `sys/sem.h`). - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/pull/261) missing copy assignment operator for `mdbx::move_result`. - - Fixed missing `&` for `std::ostream &operator<<()` overloads. - - Fixed unexpected `EXDEV` (Cross-device link) error from `mdbx_env_copy()`. - - Fixed base64 encoding/decoding bugs in auxillary C++ API. - - Fixed overflow of `pgno_t` during checking PNL on 64-bit platforms. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/260) excessive PNL checking after sort for spilling. - - Reworked checking `MAX_PAGENO` and DB upper-size geometry limit. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/265) build for some combinations of versions of MSVC and Windows SDK. - -Minors: - - - Added workaround for CLANG bug [D79919/PR42445](https://reviews.llvm.org/D79919). - - Fixed build test on Android (using `pthread_barrier_t` stub). - - Disabled C++20 concepts for CLANG < 14 on Android. - - Fixed minor `unused parameter` warning. - - Added CI for Android. - - Refine/cleanup internal logging. - - Refined line splitting inside hex/base58/base64 encoding to avoid `\n` at the end. - - Added workaround for modern libstdc++ with CLANG < 4.x - - Relaxed txn-check rules for auxiliary functions. - - Clarified a comments and descriptions, etc. - - Using the `-fno-semantic interposition` option to reduce the overhead to calling self own public functions. - - --------------------------------------------------------------------------------- - - -## v0.11.3 at 2021-12-31 - -Acknowledgments: - - - [gcxfd ](https://github.com/gcxfd) for reporting, contributing and testing. - - [장세연 (Чан Се Ен)](https://github.com/sasgas) for reporting and testing. - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation. - -New features, extensions and improvements: - - - [Added](https://libmdbx.dqdkfa.ru/dead-github/issues/236) `mdbx_cursor_get_batch()`. - - [Added](https://libmdbx.dqdkfa.ru/dead-github/issues/250) `MDBX_SET_UPPERBOUND`. - - C++ API is finalized now. - - The GC update stage has been [significantly speeded](https://libmdbx.dqdkfa.ru/dead-github/issues/254) when fixing huge Erigon's transactions (Ethereum ecosystem). - -Fixes: - - - Disabled C++20 concepts for stupid AppleClang 13.x - - Fixed internal collision of `MDBX_SHRINK_ALLOWED` with `MDBX_ACCEDE`. - -Minors: - - - Fixed returning `MDBX_RESULT_TRUE` (unexpected -1) from `mdbx_env_set_option()`. - - Added `mdbx_env_get_syncbytes()` and `mdbx_env_get_syncperiod()`. - - [Clarified](https://libmdbx.dqdkfa.ru/dead-github/pull/249) description of `MDBX_INTEGERKEY`. - - Reworked/simplified `mdbx_env_sync_internal()`. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/248) extra assertion inside `mdbx_cursor_put()` for `MDBX_DUPFIXED` cases. - - Avoiding extra looping inside `mdbx_env_info_ex()`. - - Explicitly enabled core dumps from stochastic tests scripts on Linux. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/253) `mdbx_override_meta()` to avoid false-positive assertions. - - For compatibility reverted returning `MDBX_ENODATA`for some cases. - - --------------------------------------------------------------------------------- - - -## v0.11.2 at 2021-12-02 - -Acknowledgments: - - - [장세연 (Чан Се Ен)](https://github.com/sasgas) for contributing to C++ API. - - [Alain Picard](https://github.com/castortech) for [Java bindings](https://github.com/castortech/mdbxjni). - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Kris Zyp](https://github.com/kriszyp) for reporting and testing. - - [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs). - -Fixes: - - - [Fixed compilation](https://libmdbx.dqdkfa.ru/dead-github/pull/239) with `devtoolset-9` on CentOS/RHEL 7. - - [Fixed unexpected `MDBX_PROBLEM` error](https://libmdbx.dqdkfa.ru/dead-github/issues/242) because of update an obsolete meta-page. - - [Fixed returning `MDBX_NOTFOUND` error](https://libmdbx.dqdkfa.ru/dead-github/issues/243) in case an inexact value found for `MDBX_GET_BOTH` operation. - - [Fixed compilation](https://libmdbx.dqdkfa.ru/dead-github/issues/245) without kernel/libc-devel headers. - -Minors: - - - Fixed `constexpr`-related macros for legacy compilers. - - Allowed to define 'CMAKE_CXX_STANDARD` using an environment variable. - - Simplified collection statistics of page operation . - - Added `MDBX_FORCE_BUILD_AS_MAIN_PROJECT` cmake option. - - Remove unneeded `#undef P_DIRTY`. - - --------------------------------------------------------------------------------- - - -## v0.11.1 at 2021-10-23 - -### Backward compatibility break: - -The database format signature has been changed to prevent -forward-interoperability with an previous releases, which may lead to a -[false positive diagnosis of database corruption](https://libmdbx.dqdkfa.ru/dead-github/issues/238) -due to flaws of an old library versions. - -This change is mostly invisible: - - - previously versions are unable to read/write a new DBs; - - but the new release is able to handle an old DBs and will silently upgrade ones. - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - -******************************************************************************** - - -## v0.10.5 at 2021-10-13 (obsolete, please use v0.11.1) - -Unfortunately, the `v0.10.5` accidentally comes not full-compatible with previous releases: - - - `v0.10.5` can read/processing DBs created by previous releases, i.e. the backward-compatibility is provided; - - however, previous releases may lead to false-corrupted state with DB that was touched by `v0.10.5`, i.e. the forward-compatibility is broken for `v0.10.4` and earlier. - -This cannot be fixed, as it requires fixing past versions, which as a result we will just get a current version. -Therefore, it is recommended to use `v0.11.1` instead of `v0.10.5`. - -Acknowledgments: - - - [Noel Kuntze](https://github.com/Thermi) for immediately bug reporting. - -Fixes: - - - Fixed unaligned access regression after the `#pragma pack` fix for modern compilers. - - Added UBSAN-test to CI to avoid a regression(s) similar to lately fixed. - - Fixed possibility of meta-pages clashing after manually turn to a particular meta-page using `mdbx_chk` utility. - -Minors: - - - Refined handling of weak or invalid meta-pages while a DB opening. - - Refined providing information for the `@MAIN` and `@GC` sub-databases of a last committed modification transaction's ID. - - --------------------------------------------------------------------------------- - - -## v0.10.4 at 2021-10-10 - -Acknowledgments: - - - [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs). - - [Andrew Ashikhmin](https://github.com/yperbasis) for contributing to C++ API. - -Fixes: - - - Fixed possibility of looping update GC during transaction commit (no public issue since the problem was discovered inside [Positive Technologies](https://www.ptsecurity.ru)). - - Fixed `#pragma pack` to avoid provoking some compilers to generate code with [unaligned access](https://libmdbx.dqdkfa.ru/dead-github/issues/235). - - Fixed `noexcept` for potentially throwing `txn::put()` of C++ API. - -Minors: - - - Added stochastic test script for checking small transactions cases. - - Removed extra transaction commit/restart inside test framework. - - In debugging builds fixed a too small (single page) by default DB shrink threshold. - - --------------------------------------------------------------------------------- - - -## v0.10.3 at 2021-08-27 - -Acknowledgments: - - - [Francisco Vallarino](https://github.com/fjvallarino) for [Haskell bindings for libmdbx](https://hackage.haskell.org/package/libmdbx). - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for contributing. - -Extensions and improvements: - - - Added `cursor::erase()` overloads for `key` and for `key-value`. - - Resolve minor Coverity Scan issues (no fixes but some hint/comment were added). - - Resolve minor UndefinedBehaviorSanitizer issues (no fixes but some workaround were added). - -Fixes: - - - Always setup `madvise` while opening DB (fixes https://libmdbx.dqdkfa.ru/dead-github/issues/231). - - Fixed checking legacy `P_DIRTY` flag (`0x10`) for nested/sub-pages. - -Minors: - - - Fixed getting revision number from middle of history during amalgamation (GNU Makefile). - - Fixed search GCC tools for LTO (CMake scripts). - - Fixed/reorder dirs list for search CLANG tools for LTO (CMake scripts). - - Fixed/workarounds for CLANG < 9.x - - Fixed CMake warning about compatibility with 3.8.2 - - --------------------------------------------------------------------------------- - - -## v0.10.2 at 2021-07-26 - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for reporting bugs. - - [Lionel Debroux](https://github.com/debrouxl) for fuzzing tests and reporting bugs. - - [Sergey Fedotov](https://github.com/SergeyFromHell/) for [`node-mdbx` NodeJS bindings](https://www.npmjs.com/package/node-mdbx). - - [Kris Zyp](https://github.com/kriszyp) for [`lmdbx-store` NodeJS bindings](https://github.com/kriszyp/lmdbx-store). - - [Noel Kuntze](https://github.com/Thermi) for [draft Python bindings](https://libmdbx.dqdkfa.ru/dead-github/commits/python-bindings). - -New features, extensions and improvements: - - - [Allow to predefine/override `MDBX_BUILD_TIMESTAMP` for builds reproducibility](https://libmdbx.dqdkfa.ru/dead-github/issues/201). - - Added options support for `long-stochastic` script. - - Avoided `MDBX_TXN_FULL` error for large transactions when possible. - - The `MDBX_READERS_LIMIT` increased to `32767`. - - Raise `MDBX_TOO_LARGE` under Valgrind/ASAN if being opened DB is 100 larger than RAM (to avoid hangs and OOM). - - Minimized the size of poisoned/unpoisoned regions to avoid Valgrind/ASAN stuck. - - Added more workarounds for QEMU for testing builds for 32-bit platforms, Alpha and Sparc architectures. - - `mdbx_chk` now skips iteration & checking of DB' records if corresponding page-tree is corrupted (to avoid `SIGSEGV`, ASAN failures, etc). - - Added more checks for [rare/fuzzing corruption cases](https://libmdbx.dqdkfa.ru/dead-github/issues/217). - -Backward compatibility break: - - - Use file `VERSION.txt` for version information instead of `VERSION` to avoid collision with `#include `. - - Rename `slice::from/to_FOO_bytes()` to `slice::envisage_from/to_FOO_length()'. - - Rename `MDBX_TEST_EXTRA` make's variable to `MDBX_SMOKE_EXTRA`. - - Some details of the C++ API have been changed for subsequent freezing. - -Fixes: - - - Fixed excess meta-pages checks in case `mdbx_chk` is called to check the DB for a specific meta page and thus could prevent switching to the selected meta page, even if the check passed without errors. - - Fixed [recursive use of SRW-lock on Windows cause by `MDBX_NOTLS` option](https://libmdbx.dqdkfa.ru/dead-github/issues/203). - - Fixed [log a warning during a new DB creation](https://libmdbx.dqdkfa.ru/dead-github/issues/205). - - Fixed [false-negative `mdbx_cursor_eof()` result](https://libmdbx.dqdkfa.ru/dead-github/issues/207). - - Fixed [`make install` with non-GNU `install` utility (OSX, BSD)](https://libmdbx.dqdkfa.ru/dead-github/issues/208). - - Fixed [installation by `CMake` in special cases by complete use `GNUInstallDirs`'s variables](https://libmdbx.dqdkfa.ru/dead-github/issues/209). - - Fixed [C++ Buffer issue with `std::string` and alignment](https://libmdbx.dqdkfa.ru/dead-github/issues/191). - - Fixed `safe64_reset()` for platforms without atomic 64-bit compare-and-swap. - - Fixed hang/shutdown on big-endian platforms without `__cxa_thread_atexit()`. - - Fixed [using bad meta-pages if DB was partially/recoverable corrupted](https://libmdbx.dqdkfa.ru/dead-github/issues/217). - - Fixed extra `noexcept` for `buffer::&assign_reference()`. - - Fixed `bootid` generation on Windows for case of change system' time. - - Fixed [test framework keygen-related issue](https://libmdbx.dqdkfa.ru/dead-github/issues/127). - - --------------------------------------------------------------------------------- - - -## v0.10.1 at 2021-06-01 - -Acknowledgments: - - - [Alexey Akhunov](https://github.com/AlexeyAkhunov) and [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for bug reporting and testing related to WSL2. - -New features: - - - Added `-p` option to `mdbx_stat` utility for printing page operations statistic. - - Added explicit checking for and warning about using unfit github's archives. - - Added fallback from [OFD locking](https://bit.ly/3yFRtYC) to legacy non-OFD POSIX file locks on an `EINVAL` error. - - Added [Plan 9](https://en.wikipedia.org/wiki/9P_(protocol)) network file system to the whitelist for an ability to open a DB in exclusive mode. - - Support for opening from WSL2 environment a DB hosted on Windows drive and mounted via [DrvFs](https://docs.microsoft.com/it-it/archive/blogs/wsl/wsl-file-system-support#drvfs) (i.e by Plan 9 noted above). - -Fixes: - - - Fixed minor "foo not used" warnings from modern C++ compilers when building the C++ part of the library. - - Fixed confusing/messy errors when build library from unfit github's archives (https://libmdbx.dqdkfa.ru/dead-github/issues/197). - - Fixed `#​e​l​s​i​f` typo. - - Fixed rare unexpected `MDBX_PROBLEM` error during altering data in huge transactions due to wrong spilling/oust of dirty pages (https://libmdbx.dqdkfa.ru/dead-github/issues/195). - - Re-Fixed WSL1/WSL2 detection with distinguishing (https://libmdbx.dqdkfa.ru/dead-github/issues/97). - - --------------------------------------------------------------------------------- - - -## v0.10.0 at 2021-05-09 - -Acknowledgments: - - - [Mahlon E. Smith](https://github.com/mahlonsmith) for [Ruby bindings](https://rubygems.org/gems/mdbx/). - - [Alex Sharov](https://github.com/AskAlexSharov) for [mdbx-go](https://github.com/torquem-ch/mdbx-go), bug reporting and testing. - - [Artem Vorotnikov](https://github.com/vorot93) for bug reporting and PR. - - [Paolo Rebuffo](https://www.linkedin.com/in/paolo-rebuffo-8255766/), [Alexey Akhunov](https://github.com/AlexeyAkhunov) and Mark Grosberg for donations. - - [Noel Kuntze](https://github.com/Thermi) for preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings) - -New features: - - - Added `mdbx_env_set_option()` and `mdbx_env_get_option()` for controls - various runtime options for an environment (announce of this feature was missed in a previous news). - - Added `MDBX_DISABLE_PAGECHECKS` build option to disable some checks to reduce an overhead - and detection probability of database corruption to a values closer to the LMDB. - The `MDBX_DISABLE_PAGECHECKS=1` provides a performance boost of about 10% in CRUD scenarios, - and conjointly with the `MDBX_ENV_CHECKPID=0` and `MDBX_TXN_CHECKOWNER=0` options can yield - up to 30% more performance compared to LMDB. - - Using float point (exponential quantized) representation for internal 16-bit values - of grow step and shrink threshold when huge ones (https://libmdbx.dqdkfa.ru/dead-github/issues/166). - To minimize the impact on compatibility, only the odd values inside the upper half - of the range (i.e. 32769..65533) are used for the new representation. - - Added the `mdbx_drop` similar to LMDB command-line tool to purge or delete (sub)database(s). - - [Ruby bindings](https://rubygems.org/gems/mdbx/) is available now by [Mahlon E. Smith](https://github.com/mahlonsmith). - - Added `MDBX_ENABLE_MADVISE` build option which controls the use of POSIX `madvise()` hints and friends. - - The internal node sizes were refined, resulting in a reduction in large/overflow pages in some use cases - and a slight increase in limits for a keys size to ≈½ of page size. - - Added to `mdbx_chk` output number of keys/items on pages. - - Added explicit `install-strip` and `install-no-strip` targets to the `Makefile` (https://libmdbx.dqdkfa.ru/dead-github/pull/180). - - Major rework page splitting (af9b7b560505684249b76730997f9e00614b8113) for - - An "auto-appending" feature upon insertion for both ascending and - descending key sequences. As a result, the optimality of page filling - increases significantly (more densely, less slackness) while - inserting ordered sequences of keys, - - A "splitting at middle" to make page tree more balanced on average. - - Added `mdbx_get_sysraminfo()` to the API. - - Added guessing a reasonable maximum DB size for the default upper limit of geometry (https://libmdbx.dqdkfa.ru/dead-github/issues/183). - - Major rework internal labeling of a dirty pages (958fd5b9479f52f2124ab7e83c6b18b04b0e7dda) for - a "transparent spilling" feature with the gist to make a dirty pages - be ready to spilling (writing to a disk) without further altering ones. - Thus in the `MDBX_WRITEMAP` mode the OS kernel able to oust dirty pages - to DB file without further penalty during transaction commit. - As a result, page swapping and I/O could be significantly reduced during extra large transactions and/or lack of memory. - - Minimized reading leaf-pages during dropping subDB(s) and nested trees. - - Major rework a spilling of dirty pages to support [LRU](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)) - policy and prioritization for a large/overflow pages. - - Statistics of page operations (split, merge, copy, spill, etc) now available through `mdbx_env_info_ex()`. - - Auto-setup limit for length of dirty pages list (`MDBX_opt_txn_dp_limit` option). - - Support `make options` to list available build options. - - Support `make help` to list available make targets. - - Silently `make`'s build by default. - - Preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings) is available now - by [Noel Kuntze](https://github.com/Thermi) (https://libmdbx.dqdkfa.ru/dead-github/issues/147). - -Backward compatibility break: - - - The `MDBX_AVOID_CRT` build option was renamed to `MDBX_WITHOUT_MSVC_CRT`. - This option is only relevant when building for Windows. - - The `mdbx_env_stat()` always, and `mdbx_env_stat_ex()` when called with the zeroed transaction parameter, - now internally start temporary read transaction and thus may returns `MDBX_BAD_RSLOT` error. - So, just never use deprecated `mdbx_env_stat()' and call `mdbx_env_stat_ex()` with transaction parameter. - - The build option `MDBX_CONFIG_MANUAL_TLS_CALLBACK` was removed and now just a non-zero value of - the `MDBX_MANUAL_MODULE_HANDLER` macro indicates the requirement to manually call `mdbx_module_handler()` - when loading libraries and applications uses statically linked libmdbx on an obsolete Windows versions. - -Fixes: - - - Fixed performance regression due non-optimal C11 atomics usage (https://libmdbx.dqdkfa.ru/dead-github/issues/160). - - Fixed "reincarnation" of subDB after it deletion (https://libmdbx.dqdkfa.ru/dead-github/issues/168). - - Fixed (disallowing) implicit subDB deletion via operations on `@MAIN`'s DBI-handle. - - Fixed a crash of `mdbx_env_info_ex()` in case of a call for a non-open environment (https://libmdbx.dqdkfa.ru/dead-github/issues/171). - - Fixed the selecting/adjustment values inside `mdbx_env_set_geometry()` for implicit out-of-range cases (https://libmdbx.dqdkfa.ru/dead-github/issues/170). - - Fixed `mdbx_env_set_option()` for set initial and limit size of dirty page list ((https://libmdbx.dqdkfa.ru/dead-github/issues/179). - - Fixed an unreasonably huge default upper limit for DB geometry (https://libmdbx.dqdkfa.ru/dead-github/issues/183). - - Fixed `constexpr` specifier for the `slice::invalid()`. - - Fixed (no)readahead auto-handling (https://libmdbx.dqdkfa.ru/dead-github/issues/164). - - Fixed non-alloy build for Windows. - - Switched to using Heap-functions instead of LocalAlloc/LocalFree on Windows. - - Fixed `mdbx_env_stat_ex()` to returning statistics of the whole environment instead of MainDB only (https://libmdbx.dqdkfa.ru/dead-github/issues/190). - - Fixed building by GCC 4.8.5 (added workaround for a preprocessor's bug). - - Fixed building C++ part for iOS <= 13.0 (unavailability of `std::filesystem::path`). - - Fixed building for Windows target versions prior to Windows Vista (`WIN32_WINNT < 0x0600`). - - Fixed building by MinGW for Windows (https://libmdbx.dqdkfa.ru/dead-github/issues/155). - - -******************************************************************************** - - -## v0.9.3 at 2021-02-02 - -Acknowledgments: - - - [Mahlon E. Smith](http://www.martini.nu/) for [FreeBSD port of libmdbx](https://svnweb.freebsd.org/ports/head/databases/mdbx/). - - [장세연](http://www.castis.com) for bug fixing and PR. - - [Clément Renault](https://github.com/Kerollmops/heed) for [Heed](https://github.com/Kerollmops/heed) fully typed Rust wrapper. - - [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting. - - [Noel Kuntze](https://github.com/Thermi) for bug reporting. - -Removed options and features: - - - Drop `MDBX_HUGE_TRANSACTIONS` build-option (now no longer required). - -New features: - - - Package for FreeBSD is available now by Mahlon E. Smith. - - New API functions to get/set various options (https://libmdbx.dqdkfa.ru/dead-github/issues/128): - - the maximum number of named databases for the environment; - - the maximum number of threads/reader slots; - - threshold (since the last unsteady commit) to force flush the data buffers to disk; - - relative period (since the last unsteady commit) to force flush the data buffers to disk; - - limit to grow a list of reclaimed/recycled page's numbers for finding a sequence of contiguous pages for large data items; - - limit to grow a cache of dirty pages for reuse in the current transaction; - - limit of a pre-allocated memory items for dirty pages; - - limit of dirty pages for a write transaction; - - initial allocation size for dirty pages list of a write transaction; - - maximal part of the dirty pages may be spilled when necessary; - - minimal part of the dirty pages should be spilled when necessary; - - how much of the parent transaction dirty pages will be spilled while start each child transaction; - - Unlimited/Dynamic size of retired and dirty page lists (https://libmdbx.dqdkfa.ru/dead-github/issues/123). - - Added `-p` option (purge subDB before loading) to `mdbx_load` tool. - - Reworked spilling of large transaction and committing of nested transactions: - - page spilling code reworked to avoid the flaws and bugs inherited from LMDB; - - limit for number of dirty pages now is controllable at runtime; - - a spilled pages, including overflow/large pages, now can be reused and refunded/compactified in nested transactions; - - more effective refunding/compactification especially for the loosed page cache. - - Added `MDBX_ENABLE_REFUND` and `MDBX_PNL_ASCENDING` internal/advanced build options. - - Added `mdbx_default_pagesize()` function. - - Better support architectures with a weak/relaxed memory consistency model (ARM, AARCH64, PPC, MIPS, RISC-V, etc) by means [C11 atomics](https://en.cppreference.com/w/c/atomic). - - Speed up page number lists and dirty page lists (https://libmdbx.dqdkfa.ru/dead-github/issues/132). - - Added `LIBMDBX_NO_EXPORTS_LEGACY_API` build option. - -Fixes: - - - Fixed missing cleanup (null assigned) in the C++ commit/abort (https://libmdbx.dqdkfa.ru/dead-github/pull/143). - - Fixed `mdbx_realloc()` for case of nullptr and `MDBX_WITHOUT_MSVC_CRT=ON` for Windows. - - Fixed the possibility to use invalid and renewed (closed & re-opened, dropped & re-created) DBI-handles (https://libmdbx.dqdkfa.ru/dead-github/issues/146). - - Fixed 4-byte aligned access to 64-bit integers, including access to the `bootid` meta-page's field (https://libmdbx.dqdkfa.ru/dead-github/issues/153). - - Fixed minor/potential memory leak during page flushing and unspilling. - - Fixed handling states of cursors's and subDBs's for nested transactions. - - Fixed page leak in extra rare case the list of retired pages changed during update GC on transaction commit. - - Fixed assertions to avoid false-positive UB detection by CLANG/LLVM (https://libmdbx.dqdkfa.ru/dead-github/issues/153). - - Fixed `MDBX_TXN_FULL` and regressive `MDBX_KEYEXIST` during large transaction commit with `MDBX_LIFORECLAIM` (https://libmdbx.dqdkfa.ru/dead-github/issues/123). - - Fixed auto-recovery (`weak->steady` with the same boot-id) when Database size at last weak checkpoint is large than at last steady checkpoint. - - Fixed operation on systems with unusual small/large page size, including PowerPC (https://libmdbx.dqdkfa.ru/dead-github/issues/157). - - --------------------------------------------------------------------------------- - - -## v0.9.2 at 2020-11-27 - -Acknowledgments: - - - Jens Alfke (Mobile Architect at [Couchbase](https://www.couchbase.com/)) for [NimDBX](https://github.com/snej/nimdbx). - - Clément Renault (CTO at [MeiliSearch](https://www.meilisearch.com/)) for [mdbx-rs](https://github.com/Kerollmops/mdbx-rs). - - Alex Sharov (Go-Lang Teach Lead at [TurboGeth/Ethereum](https://ethereum.org/)) for an extreme test cases and bug reporting. - - George Hazan (CTO at [Miranda NG](https://www.miranda-ng.org/)) for bug reporting. - - [Positive Technologies](https://www.ptsecurity.com/) for funding and [The Standoff](https://standoff365.com/). - -Added features: - - - Provided package for [buildroot](https://buildroot.org/). - - Binding for Nim is [available](https://github.com/snej/nimdbx) now by Jens Alfke. - - Added `mdbx_env_delete()` for deletion an environment files in a proper and multiprocess-safe way. - - Added `mdbx_txn_commit_ex()` with collecting latency information. - - Fast completion pure nested transactions. - - Added `LIBMDBX_INLINE_API` macro and inline versions of some API functions. - - Added `mdbx_cursor_copy()` function. - - Extended tests for checking cursor tracking. - - Added `MDBX_SET_LOWERBOUND` operation for `mdbx_cursor_get()`. - -Fixes: - - - Fixed missing installation of `mdbx.h++`. - - Fixed use of obsolete `__noreturn`. - - Fixed use of `yield` instruction on ARM if unsupported. - - Added pthread workaround for buggy toolchain/cmake/buildroot. - - Fixed use of `pthread_yield()` for non-GLIBC. - - Fixed use of `RegGetValueA()` on Windows 2000/XP. - - Fixed use of `GetTickCount64()` on Windows 2000/XP. - - Fixed opening DB on a network shares (in the exclusive mode). - - Fixed copy&paste typos. - - Fixed minor false-positive GCC warning. - - Added workaround for broken `DEFINE_ENUM_FLAG_OPERATORS` from Windows SDK. - - Fixed cursor state after multimap/dupsort repeated deletes (https://libmdbx.dqdkfa.ru/dead-github/issues/121). - - Added `SIGPIPE` suppression for internal thread during `mdbx_env_copy()`. - - Fixed extra-rare `MDBX_KEY_EXIST` error during `mdbx_commit()` (https://libmdbx.dqdkfa.ru/dead-github/issues/131). - - Fixed spilled pages checking (https://libmdbx.dqdkfa.ru/dead-github/issues/126). - - Fixed `mdbx_load` for 'plain text' and without `-s name` cases (https://libmdbx.dqdkfa.ru/dead-github/issues/136). - - Fixed save/restore/commit of cursors for nested transactions. - - Fixed cursors state in rare/special cases (move next beyond end-of-data, after deletion and so on). - - Added workaround for MSVC 19.28 (Visual Studio 16.8) (but may still hang during compilation). - - Fixed paranoidal Clang C++ UB for bitwise operations with flags defined by enums. - - Fixed large pages checking (for compatibility and to avoid false-positive errors from `mdbx_chk`). - - Added workaround for Wine (https://github.com/miranda-ng/miranda-ng/issues/1209). - - Fixed `ERROR_NOT_SUPPORTED` while opening DB by UNC pathnames (https://github.com/miranda-ng/miranda-ng/issues/2627). - - Added handling `EXCEPTION_POSSIBLE_DEADLOCK` condition for Windows. - - --------------------------------------------------------------------------------- - - -## v0.9.1 2020-09-30 - -Added features: - - - Preliminary C++ API with support for C++17 polymorphic allocators. - - [Online C++ API reference](https://libmdbx.dqdkfa.ru/) by Doxygen. - - Quick reference for Insert/Update/Delete operations. - - Explicit `MDBX_SYNC_DURABLE` to sync modes for API clarity. - - Explicit `MDBX_ALLDUPS` and `MDBX_UPSERT` for API clarity. - - Support for read transactions preparation (`MDBX_TXN_RDONLY_PREPARE` flag). - - Support for cursor preparation/(pre)allocation and reusing (`mdbx_cursor_create()` and `mdbx_cursor_bind()` functions). - - Support for checking database using specified meta-page (see `mdbx_chk -h`). - - Support for turn to the specific meta-page after checking (see `mdbx_chk -h`). - - Support for explicit reader threads (de)registration. - - The `mdbx_txn_break()` function to explicitly mark a transaction as broken. - - Improved handling of corrupted databases by `mdbx_chk` utility and `mdbx_walk_tree()` function. - - Improved DB corruption detection by checking parent-page-txnid. - - Improved opening large DB (> 4Gb) from 32-bit code. - - Provided `pure-function` and `const-function` attributes to C API. - - Support for user-settable context for transactions & cursors. - - Revised API and documentation related to Handle-Slow-Readers callback feature. - -Deprecated functions and flags: - - - For clarity and API simplification the `MDBX_MAPASYNC` flag is deprecated. - Just use `MDBX_SAFE_NOSYNC` or `MDBX_UTTERLY_NOSYNC` instead of it. - - `MDBX_oom_func`, `mdbx_env_set_oomfunc()` and `mdbx_env_get_oomfunc()` - replaced with `MDBX_hsr_func`, `mdbx_env_get_hsr` and `mdbx_env_get_hsr()`. - -Fixes: - - - Fix `mdbx_strerror()` for `MDBX_BUSY` error (no error description is returned). - - Fix update internal meta-geo information in read-only mode (`EACCESS` or `EBADFD` error). - - Fix `mdbx_page_get()` null-defer when DB corrupted (crash by `SIGSEGV`). - - Fix `mdbx_env_open()` for re-opening after non-fatal errors (`mdbx_chk` unexpected failures). - - Workaround for MSVC 19.27 `static_assert()` bug. - - Doxygen descriptions and refinement. - - Update Valgrind's suppressions. - - Workaround to avoid infinite loop of 'nested' testcase on MIPS under QEMU. - - Fix a lot of typos & spelling (Thanks to Josh Soref for PR). - - Fix `getopt()` messages for Windows (Thanks to Andrey Sporaw for reporting). - - Fix MSVC compiler version requirements (Thanks to Andrey Sporaw for reporting). - - Workarounds for QEMU's bugs to run tests for cross-built[A library under QEMU. - - Now C++ compiler optional for building by CMake. - - --------------------------------------------------------------------------------- - - -## v0.9.0 2020-07-31 (not a release, but API changes) - -Added features: - - - [Online C API reference](https://libmdbx.dqdkfa.ru/) by Doxygen. - - Separated enums for environment, sub-databases, transactions, copying and data-update flags. - -Deprecated functions and flags: - - - Usage of custom comparators and the `mdbx_dbi_open_ex()` are deprecated, since such databases couldn't be checked by the `mdbx_chk` utility. - Please use the value-to-key functions to provide keys that are compatible with the built-in libmdbx comparators. - - -******************************************************************************** - - -## 2020-07-06 - - - Added support multi-opening the same DB in a process with SysV locking (BSD). - - Fixed warnings & minors for LCC compiler (E2K). - - Enabled to simultaneously open the same database from processes with and without the `MDBX_WRITEMAP` option. - - Added key-to-value, `mdbx_get_keycmp()` and `mdbx_get_datacmp()` functions (helpful to avoid using custom comparators). - - Added `ENABLE_UBSAN` CMake option to enabling the UndefinedBehaviorSanitizer from GCC/CLANG. - - Workaround for [CLANG bug](https://bugs.llvm.org/show_bug.cgi?id=43275). - - Returning `MDBX_CORRUPTED` in case all meta-pages are weak and no other error. - - Refined mode bits while auto-creating LCK-file. - - Avoids unnecessary database file re-mapping in case geometry changed by another process(es). - From the user's point of view, the `MDBX_UNABLE_EXTEND_MAPSIZE` error will now be returned less frequently and only when using the DB in the current process really requires it to be reopened. - - Remapping on-the-fly and of the database file was implemented. - Now remapping with a change of address is performed automatically if there are no dependent readers in the current process. - - -## 2020-06-12 - - - Minor change versioning. The last number in the version now means the number of commits since last release/tag. - - Provide ChangeLog file. - - Fix for using libmdbx as a C-only sub-project with CMake. - - Fix `mdbx_env_set_geometry()` for case it is called from an opened environment outside of a write transaction. - - Add support for huge transactions and `MDBX_HUGE_TRANSACTIONS` build-option (default `OFF`). - - Refine LTO (link time optimization) for clang. - - Force enabling exceptions handling for MSVC (`/EHsc` option). - - -## 2020-06-05 - - - Support for Android/Bionic. - - Support for iOS. - - Auto-handling `MDBX_NOSUBDIR` while opening for any existing database. - - Engage github-actions to make release-assets. - - Clarify API description. - - Extended keygen-cases in stochastic test. - - Fix fetching of first/lower key from LEAF2-page during page merge. - - Fix missing comma in array of error messages. - - Fix div-by-zero while copy-with-compaction for non-resizable environments. - - Fixes & enhancements for custom-comparators. - - Fix `MDBX_WITHOUT_MSVC_CRT` option and missing `ntdll.def`. - - Fix `mdbx_env_close()` to work correctly called concurrently from several threads. - - Fix null-deref in an ASAN-enabled builds while opening the environment with error and/or read-only. - - Fix AddressSanitizer errors after closing the environment. - - Fix/workaround to avoid GCC 10.x pedantic warnings. - - Fix using `ENODATA` for FreeBSD. - - Avoid invalidation of DBI-handle(s) when it just closes. - - Avoid using `pwritev()` for single-writes (up to 10% speedup for some kernels & scenarios). - - Avoiding `MDBX_UTTERLY_NOSYNC` as result of flags merge. - - Add `mdbx_dbi_dupsort_depthmask()` function. - - Add `MDBX_CP_FORCE_RESIZABLE` option. - - Add deprecated `MDBX_MAP_RESIZED` for compatibility. - - Add `MDBX_BUILD_TOOLS` option (default `ON`). - - Refine `mdbx_dbi_open_ex()` to safe concurrently opening the same handle from different threads. - - Truncate clk-file during environment closing. So a zero-length lck-file indicates that the environment was closed properly. - - Refine `mdbx_update_gc()` for huge transactions with small sizes of database page. - - Extends dump/load to support all MDBX attributes. - - Avoid upsertion the same key-value data, fix related assertions. - - Rework min/max length checking for keys & values. - - Checking the order of keys on all pages during checking. - - Support `CFLAGS_EXTRA` make-option for convenience. - - Preserve the last txnid while copying with compactification. - - Auto-reset running transaction in mdbx_txn_renew(). - - Automatically abort errored transaction in mdbx_txn_commit(). - - Auto-choose page size for large databases. - - Rearrange source files, rework build, options-support by CMake. - - Crutch for WSL1 (Windows subsystem for Linux). - - Refine install/uninstall targets. - - Support for Valgrind 3.14 and later. - - Add check-analyzer check-ubsan check-asan check-leak targets to Makefile. - - Minor fix/workaround to avoid UBSAN traps for `memcpy(ptr, NULL, 0)`. - - Avoid some GCC-analyzer false-positive warnings. - - -## 2020-03-18 - - - Workarounds for Wine (Windows compatibility layer for Linux). - - `MDBX_MAP_RESIZED` renamed to `MDBX_UNABLE_EXTEND_MAPSIZE`. - - Clarify API description, fix typos. - - Speedup runtime checks in debug/checked builds. - - Added checking for read/write transactions overlapping for the same thread, added `MDBX_TXN_OVERLAPPING` error and `MDBX_DBG_LEGACY_OVERLAP` option. - - Added `mdbx_key_from_jsonInteger()`, `mdbx_key_from_double()`, `mdbx_key_from_float()`, `mdbx_key_from_int64()` and `mdbx_key_from_int32()` functions. See `mdbx.h` for description. - - Fix compatibility (use zero for invalid DBI). - - Refine/clarify error messages. - - Avoids extra error messages "bad txn" from mdbx_chk when DB is corrupted. - - -## 2020-01-21 - - - Fix `mdbx_load` utility for custom comparators. - - Fix checks related to `MDBX_APPEND` flag inside `mdbx_cursor_put()`. - - Refine/fix dbi_bind() internals. - - Refine/fix handling `STATUS_CONFLICTING_ADDRESSES`. - - Rework `MDBX_DBG_DUMP` option to avoid disk I/O performance degradation. - - Add built-in help to test tool. - - Fix `mdbx_env_set_geometry()` for large page size. - - Fix env_set_geometry() for large pagesize. - - Clarify API description & comments, fix typos. - - -## 2019-12-31 - - - Fix returning MDBX_RESULT_TRUE from page_alloc(). - - Fix false-positive ASAN issue. - - Fix assertion for `MDBX_NOTLS` option. - - Rework `MADV_DONTNEED` threshold. - - Fix `mdbx_chk` utility for don't checking some numbers if walking on the B-tree was disabled. - - Use page's mp_txnid for basic integrity checking. - - Add `MDBX_FORCE_ASSERTIONS` built-time option. - - Rework `MDBX_DBG_DUMP` to avoid performance degradation. - - Rename `MDBX_NOSYNC` to `MDBX_SAFE_NOSYNC` for clarity. - - Interpret `ERROR_ACCESS_DENIED` from `OpenProcess()` as 'process exists'. - - Avoid using `FILE_FLAG_NO_BUFFERING` for compatibility with small database pages. - - Added install section for CMake. - - -## 2019-12-02 - - - Support for Mac OSX, FreeBSD, NetBSD, OpenBSD, DragonFly BSD, OpenSolaris, OpenIndiana (AIX and HP-UX pending). - - Use bootid for decisions of rollback. - - Counting retired pages and extended transaction info. - - Add `MDBX_ACCEDE` flag for database opening. - - Using OFD-locks and tracking for in-process multi-opening. - - Hot backup into pipe. - - Support for cmake & amalgamated sources. - - Fastest internal sort implementation. - - New internal dirty-list implementation with lazy sorting. - - Support for lazy-sync-to-disk with polling. - - Extended key length. - - Last update transaction number for each sub-database. - - Automatic read ahead enabling/disabling. - - More auto-compactification. - - Using -fsanitize=undefined and -Wpedantic options. - - Rework page merging. - - Nested transactions. - - API description. - - Checking for non-local filesystems to avoid DB corruption. - - -******************************************************************************** - - -For early changes see the git commit history. diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md deleted file mode 100644 index 46e1c5492022..000000000000 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md +++ /dev/null @@ -1,797 +0,0 @@ - - -### The origin has been migrated to [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -since on 2022-04-15 the Github administration, without any warning -nor explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. -For the same reason ~~Github~~ is blacklisted forever. - -GitFlic's developers plan to support other languages, -including English 和 中文, in the near future. - -### Основной репозиторий перемещен на [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -так как 15 апреля 2022 администрация Github без предупреждения и -объяснения причин удалила _libmdbx_ вместе с массой других проектов, -одновременно заблокировав доступ многим разработчикам. -По этой же причине ~~Github~~ навсегда занесен в черный список. - --------------------------------------------------------------------------------- - -*The Future will (be) [Positive](https://www.ptsecurity.com). Всё будет хорошо.* - -> Please refer to the online [documentation](https://libmdbx.dqdkfa.ru) -> with [`C` API description](https://libmdbx.dqdkfa.ru/group__c__api.html) -> and pay attention to the [`C++` API](https://gitflic.ru/project/erthink/libmdbx/blob?file=mdbx.h%2B%2B#line-num-1). - -> Questions, feedback and suggestions are welcome to the [Telegram' group](https://t.me/libmdbx). - -> For NEWS take a look to the [ChangeLog](https://gitflic.ru/project/erthink/libmdbx/blob?file=ChangeLog.md) -> or the [TODO](https://gitflic.ru/project/erthink/libmdbx/blob?file=TODO.md). - - -libmdbx -======== - - - -_libmdbx_ is an extremely fast, compact, powerful, embedded, transactional -[key-value database](https://en.wikipedia.org/wiki/Key-value_database), -with [permissive license](https://gitflic.ru/project/erthink/libmdbx/blob?file=LICENSE). -_libmdbx_ has a specific set of properties and capabilities, -focused on creating unique lightweight solutions. - -1. Allows **a swarm of multi-threaded processes to -[ACID](https://en.wikipedia.org/wiki/ACID)ly read and update** several -key-value [maps](https://en.wikipedia.org/wiki/Associative_array) and -[multimaps](https://en.wikipedia.org/wiki/Multimap) in a locally-shared -database. - -2. Provides **extraordinary performance**, minimal overhead through -[Memory-Mapping](https://en.wikipedia.org/wiki/Memory-mapped_file) and -`Olog(N)` operations costs by virtue of [B+ -tree](https://en.wikipedia.org/wiki/B%2B_tree). - -3. Requires **no maintenance and no crash recovery** since it doesn't use -[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging), but that might -be a caveat for write-intensive workloads with durability requirements. - -4. **Compact and friendly for fully embedding**. Only ≈25KLOC of `C11`, -≈64K x86 binary code of core, no internal threads neither server process(es), -but implements a simplified variant of the [Berkeley -DB](https://en.wikipedia.org/wiki/Berkeley_DB) and -[dbm](https://en.wikipedia.org/wiki/DBM_(computing)) API. - -5. Enforces [serializability](https://en.wikipedia.org/wiki/Serializability) for -writers just by single -[mutex](https://en.wikipedia.org/wiki/Mutual_exclusion) and affords -[wait-free](https://en.wikipedia.org/wiki/Non-blocking_algorithm#Wait-freedom) -for parallel readers without atomic/interlocked operations, while -**writing and reading transactions do not block each other**. - -6. **Guarantee data integrity** after crash unless this was explicitly -neglected in favour of write performance. - -7. Supports Linux, Windows, MacOS, Android, iOS, FreeBSD, DragonFly, Solaris, -OpenSolaris, OpenIndiana, NetBSD, OpenBSD and other systems compliant with -**POSIX.1-2008**. - - - -Historically, _libmdbx_ is a deeply revised and extended descendant of the amazing -[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database). -_libmdbx_ inherits all benefits from _LMDB_, but resolves some issues and adds [a set of improvements](#improvements-beyond-lmdb). - -### MithrilDB and Future - - - -The next version is under non-public development from scratch and will be -released as **MithrilDB** and `libmithrildb` for libraries & packages. -Admittedly mythical [Mithril](https://en.wikipedia.org/wiki/Mithril) is -resembling silver but being stronger and lighter than steel. Therefore -_MithrilDB_ is a rightly relevant name. - -_MithrilDB_ is radically different from _libmdbx_ by the new database -format and API based on C++20. The goal of this revolution is to provide -a clearer and robust API, add more features and new valuable properties -of the database. All fundamental architectural problems of libmdbx/LMDB -have been solved there, but now the active development has been -suspended for top-three reasons: - -1. For now _libmdbx_ «mostly» enough for all [our products](https://www.ptsecurity.com/ww-en/products/), -and I’m busy in development of replication for scalability. -2. Waiting for fresh [Elbrus CPU](https://wiki.elbrus.ru/) of [e2k architecture](https://en.wikipedia.org/wiki/Elbrus_2000), -especially with hardware acceleration of [Streebog](https://en.wikipedia.org/wiki/Streebog) and -[Kuznyechik](https://en.wikipedia.org/wiki/Kuznyechik), which are required for Merkle tree, etc. -3. The expectation of needs and opportunities due to the wide use of NVDIMM (aka persistent memory), -modern NVMe and [Ангара](https://ru.wikipedia.org/wiki/Ангара_(интерконнект)). - -However, _MithrilDB_ will not be available for countries unfriendly to -Russia (i.e. acceded the sanctions, devil adepts and/or NATO). But it is -not yet known whether such restriction will be implemented only through -a license and support, either the source code will not be open at all. -Basically we are not inclined to allow our work to contribute to the -profit that goes to weapons that kill our relatives and friends. -NO OPTIONS. - -Nonetheless, I try not to make any promises regarding _MithrilDB_ until release. - -Contrary to _MithrilDB_, _libmdbx_ will forever free and open source. -Moreover with high-quality support whenever possible. Tu deviens -responsable pour toujours de ce que tu as apprivois. So we will continue -to comply with the original open license and the principles of -constructive cooperation, in spite of outright Github sabotage and -sanctions. I will also try to keep (not drop) Windows support, despite -it is an unused obsolete technology for us. - - - -``` -$ objdump -f -h -j .text libmdbx.so - - libmdbx.so: формат файла elf64-e2k - архитектура: elbrus-v6:64, флаги 0x00000150: - HAS_SYMS, DYNAMIC, D_PAGED - начальный адрес 0x0000000000021680 - - Разделы: - Idx Name Разм VMA LMA Фа смещ. Выр. - 10 .text 000ddd28 0000000000021680 0000000000021680 00021680 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - -$ cc --version - lcc:1.26.12:Jun-05-2022:e2k-v6-linux - gcc (GCC) 9.3.0 compatible -``` - ------ - -## Table of Contents -- [Characteristics](#characteristics) - - [Features](#features) - - [Limitations](#limitations) - - [Gotchas](#gotchas) - - [Comparison with other databases](#comparison-with-other-databases) - - [Improvements beyond LMDB](#improvements-beyond-lmdb) - - [History & Acknowledgments](#history) -- [Usage](#usage) - - [Building and Testing](#building-and-testing) - - [API description](#api-description) - - [Bindings](#bindings) -- [Performance comparison](#performance-comparison) - - [Integral performance](#integral-performance) - - [Read scalability](#read-scalability) - - [Sync-write mode](#sync-write-mode) - - [Lazy-write mode](#lazy-write-mode) - - [Async-write mode](#async-write-mode) - - [Cost comparison](#cost-comparison) - -# Characteristics - - - -## Features - -- Key-value data model, keys are always sorted. - -- Fully [ACID](https://en.wikipedia.org/wiki/ACID)-compliant, through to -[MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) -and [CoW](https://en.wikipedia.org/wiki/Copy-on-write). - -- Multiple key-value sub-databases within a single datafile. - -- Range lookups, including range query estimation. - -- Efficient support for short fixed length keys, including native 32/64-bit integers. - -- Ultra-efficient support for [multimaps](https://en.wikipedia.org/wiki/Multimap). Multi-values sorted, searchable and iterable. Keys stored without duplication. - -- Data is [memory-mapped](https://en.wikipedia.org/wiki/Memory-mapped_file) and accessible directly/zero-copy. Traversal of database records is extremely-fast. - -- Transactions for readers and writers, ones do not block others. - -- Writes are strongly serialized. No transaction conflicts nor deadlocks. - -- Readers are [non-blocking](https://en.wikipedia.org/wiki/Non-blocking_algorithm), notwithstanding [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation). - -- Nested write transactions. - -- Reads scale linearly across CPUs. - -- Continuous zero-overhead database compactification. - -- Automatic on-the-fly database size adjustment. - -- Customizable database page size. - -- `Olog(N)` cost of lookup, insert, update, and delete operations by virtue of [B+ tree characteristics](https://en.wikipedia.org/wiki/B%2B_tree#Characteristics). - -- Online hot backup. - -- Append operation for efficient bulk insertion of pre-sorted data. - -- No [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) nor any -transaction journal. No crash recovery needed. No maintenance is required. - -- No internal cache and/or memory management, all done by basic OS services. - -## Limitations - -- **Page size**: a power of 2, minimum `256` (mostly for testing), maximum `65536` bytes, default `4096` bytes. -- **Key size**: minimum `0`, maximum ≈½ pagesize (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize). -- **Value size**: minimum `0`, maximum `2146435072` (`0x7FF00000`) bytes for maps, ≈½ pagesize for multimaps (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize). -- **Write transaction size**: up to `1327217884` pages (`4.944272` TiB for default 4K pagesize, `79.108351` TiB for 64K pagesize). -- **Database size**: up to `2147483648` pages (≈`8.0` TiB for default 4K pagesize, ≈`128.0` TiB for 64K pagesize). -- **Maximum sub-databases**: `32765`. - -## Gotchas - -1. There cannot be more than one writer at a time, i.e. no more than one write transaction at a time. - -2. _libmdbx_ is based on [B+ tree](https://en.wikipedia.org/wiki/B%2B_tree), so access to database pages is mostly random. -Thus SSDs provide a significant performance boost over spinning disks for large databases. - -3. _libmdbx_ uses [shadow paging](https://en.wikipedia.org/wiki/Shadow_paging) instead of [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging). -Thus syncing data to disk might be a bottleneck for write intensive workload. - -4. _libmdbx_ uses [copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write) for [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) during updates, -but read transactions prevents recycling an old retired/freed pages, since it read ones. Thus altering of data during a parallel -long-lived read operation will increase the process work set, may exhaust entire free database space, -the database can grow quickly, and result in performance degradation. -Try to avoid long running read transactions. - -5. _libmdbx_ is extraordinarily fast and provides minimal overhead for data access, -so you should reconsider using brute force techniques and double check your code. -On the one hand, in the case of _libmdbx_, a simple linear search may be more profitable than complex indexes. -On the other hand, if you make something suboptimally, you can notice detrimentally only on sufficiently large data. - -## Comparison with other databases -For now please refer to [chapter of "BoltDB comparison with other -databases"](https://github.com/coreos/bbolt#comparison-with-other-databases) -which is also (mostly) applicable to _libmdbx_. - - - - - -Improvements beyond LMDB -======================== - -_libmdbx_ is superior to legendary _[LMDB](https://symas.com/lmdb/)_ in -terms of features and reliability, not inferior in performance. In -comparison to _LMDB_, _libmdbx_ make things "just work" perfectly and -out-of-the-box, not silently and catastrophically break down. The list -below is pruned down to the improvements most notable and obvious from -the user's point of view. - -## Added Features - -1. Keys could be more than 2 times longer than _LMDB_. - > For DB with default page size _libmdbx_ support keys up to 2022 bytes - > and up to 32742 bytes for 64K page size. _LMDB_ allows key size up to - > 511 bytes and may silently loses data with large values. - -2. Up to 30% faster than _LMDB_ in [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) benchmarks. - > Benchmarks of the in-[tmpfs](https://en.wikipedia.org/wiki/Tmpfs) scenarios, - > that tests the speed of the engine itself, showned that _libmdbx_ 10-20% faster than _LMDB_, - > and up to 30% faster when _libmdbx_ compiled with specific build options - > which downgrades several runtime checks to be match with LMDB behaviour. - > - > These and other results could be easily reproduced with [ioArena](https://abf.io/erthink/ioarena.git) just by `make bench-quartet` command, - > including comparisons with [RockDB](https://en.wikipedia.org/wiki/RocksDB) - > and [WiredTiger](https://en.wikipedia.org/wiki/WiredTiger). - -3. Automatic on-the-fly database size adjustment, both increment and reduction. - > _libmdbx_ manages the database size according to parameters specified - > by `mdbx_env_set_geometry()` function, - > ones include the growth step and the truncation threshold. - > - > Unfortunately, on-the-fly database size adjustment doesn't work under [Wine](https://en.wikipedia.org/wiki/Wine_(software)) - > due to its internal limitations and unimplemented functions, i.e. the `MDBX_UNABLE_EXTEND_MAPSIZE` error will be returned. - -4. Automatic continuous zero-overhead database compactification. - > During each commit _libmdbx_ merges a freeing pages which adjacent with the unallocated area - > at the end of file, and then truncates unused space when a lot enough of. - -5. The same database format for 32- and 64-bit builds. - > _libmdbx_ database format depends only on the [endianness](https://en.wikipedia.org/wiki/Endianness) but not on the [bitness](https://en.wiktionary.org/wiki/bitness). - -6. LIFO policy for Garbage Collection recycling. This can significantly increase write performance due write-back disk cache up to several times in a best case scenario. - > LIFO means that for reuse will be taken the latest becomes unused pages. - > Therefore the loop of database pages circulation becomes as short as possible. - > In other words, the set of pages, that are (over)written in memory and on disk during a series of write transactions, will be as small as possible. - > Thus creates ideal conditions for the battery-backed or flash-backed disk cache efficiency. - -7. Fast estimation of range query result volume, i.e. how many items can -be found between a `KEY1` and a `KEY2`. This is a prerequisite for build -and/or optimize query execution plans. - > _libmdbx_ performs a rough estimate based on common B-tree pages of the paths from root to corresponding keys. - -8. `mdbx_chk` utility for database integrity check. -Since version 0.9.1, the utility supports checking the database using any of the three meta pages and the ability to switch to it. - -9. Support for opening databases in the exclusive mode, including on a network share. - -10. Zero-length for keys and values. - -11. Ability to determine whether the particular data is on a dirty page -or not, that allows to avoid copy-out before updates. - -12. Extended information of whole-database, sub-databases, transactions, readers enumeration. - > _libmdbx_ provides a lot of information, including dirty and leftover pages - > for a write transaction, reading lag and holdover space for read transactions. - -13. Extended update and delete operations. - > _libmdbx_ allows one _at once_ with getting previous value - > and addressing the particular item from multi-value with the same key. - -14. Useful runtime options for tuning engine to application's requirements and use cases specific. - -15. Automated steady sync-to-disk upon several thresholds and/or timeout via cheap polling. - -16. Sequence generation and three persistent 64-bit markers. - -17. Handle-Slow-Readers callback to resolve a database full/overflow issues due to long-lived read transaction(s). - -18. Ability to determine whether the cursor is pointed to a key-value -pair, to the first, to the last, or not set to anything. - - -## Other fixes and specifics - -1. Fixed more than 10 significant errors, in particular: page leaks, -wrong sub-database statistics, segfault in several conditions, -nonoptimal page merge strategy, updating an existing record with -a change in data size (including for multimap), etc. - -2. All cursors can be reused and should be closed explicitly, -regardless ones were opened within a write or read transaction. - -3. Opening database handles are spared from race conditions and -pre-opening is not needed. - -4. Returning `MDBX_EMULTIVAL` error in case of ambiguous update or delete. - -5. Guarantee of database integrity even in asynchronous unordered write-to-disk mode. - > _libmdbx_ propose additional trade-off by `MDBX_SAFE_NOSYNC` with append-like manner for updates, - > that avoids database corruption after a system crash contrary to LMDB. - > Nevertheless, the `MDBX_UTTERLY_NOSYNC` mode is available to match LMDB's behaviour for `MDB_NOSYNC`. - -6. On **MacOS & iOS** the `fcntl(F_FULLFSYNC)` syscall is used _by -default_ to synchronize data with the disk, as this is [the only way to -guarantee data -durability](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fsync.2.html) -in case of power failure. Unfortunately, in scenarios with high write -intensity, the use of `F_FULLFSYNC` significantly degrades performance -compared to LMDB, where the `fsync()` syscall is used. Therefore, -_libmdbx_ allows you to override this behavior by defining the -`MDBX_OSX_SPEED_INSTEADOF_DURABILITY=1` option while build the library. - -7. On **Windows** the `LockFileEx()` syscall is used for locking, since -it allows place the database on network drives, and provides protection -against incompetent user actions (aka -[poka-yoke](https://en.wikipedia.org/wiki/Poka-yoke)). Therefore -_libmdbx_ may be a little lag in performance tests from LMDB where the -named mutexes are used. - - - - -# History - -Historically, _libmdbx_ is a deeply revised and extended descendant of the -[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database). -At first the development was carried out within the -[ReOpenLDAP](https://web.archive.org/web/https://github.com/erthink/ReOpenLDAP) project. About a -year later _libmdbx_ was separated into a standalone project, which was -[presented at Highload++ 2015 -conference](http://www.highload.ru/2015/abstracts/1831.html). - -Since 2017 _libmdbx_ is used in [Fast Positive Tables](https://gitflic.ru/project/erthink/libfpta), -and development is funded by [Positive Technologies](https://www.ptsecurity.com). - -On 2022-04-15 the Github administration, without any warning nor -explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. Therefore on -2022-04-21 we have migrated to a reliable trusted infrastructure. -The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -with backup at [ABF by ROSA Лаб](https://abf.rosalinux.ru/erthink/libmdbx). -For the same reason ~~Github~~ is blacklisted forever. - -## Acknowledgments -Howard Chu is the author of LMDB, from which -originated the _libmdbx_ in 2015. - -Martin Hedenfalk is the author of `btree.c` code, which -was used to begin development of LMDB. - - - --------------------------------------------------------------------------------- - -Usage -===== - - - -Currently, libmdbx is only available in a -[source code](https://en.wikipedia.org/wiki/Source_code) form. -Packages support for common Linux distributions is planned in the future, -since release the version 1.0. - -## Source code embedding - -_libmdbx_ provides two official ways for integration in source code form: - -1. Using an amalgamated source code which available in the [releases section](https://gitflic.ru/project/erthink/libmdbx/release) on GitFlic. - > An amalgamated source code includes all files required to build and - > use _libmdbx_, but not for testing _libmdbx_ itself. - > Beside the releases an amalgamated sources could be created any time from the original clone of git - > repository on Linux by executing `make dist`. As a result, the desired - > set of files will be formed in the `dist` subdirectory. - -2. Adding the complete source code as a `git submodule` from the [origin git repository](https://gitflic.ru/project/erthink/libmdbx) on GitFlic. - > This allows you to build as _libmdbx_ and testing tool. - > On the other hand, this way requires you to pull git tags, and use C++11 compiler for test tool. - -_**Please, avoid using any other techniques.**_ Otherwise, at least -don't ask for support and don't name such chimeras `libmdbx`. - - - -## Building and Testing - -Both amalgamated and original source code provides build through the use -[CMake](https://cmake.org/) or [GNU -Make](https://www.gnu.org/software/make/) with -[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). All build ways -are completely traditional and have minimal prerequirements like -`build-essential`, i.e. the non-obsolete C/C++ compiler and a -[SDK](https://en.wikipedia.org/wiki/Software_development_kit) for the -target platform. Obviously you need building tools itself, i.e. `git`, -`cmake` or GNU `make` with `bash`. For your convenience, `make help` -and `make options` are also available for listing existing targets -and build options respectively. - -The only significant specificity is that git' tags are required -to build from complete (not amalgamated) source codes. -Executing **`git fetch --tags --force --prune`** is enough to get ones, -and `--unshallow` or `--update-shallow` is required for shallow cloned case. - -So just using CMake or GNU Make in your habitual manner and feel free to -fill an issue or make pull request in the case something will be -unexpected or broken down. - -### Testing -The amalgamated source code does not contain any tests for or several reasons. -Please read [the explanation](https://libmdbx.dqdkfa.ru/dead-github/issues/214#issuecomment-870717981) and don't ask to alter this. -So for testing _libmdbx_ itself you need a full source code, i.e. the clone of a git repository, there is no option. - -The full source code of _libmdbx_ has a [`test` subdirectory](https://gitflic.ru/project/erthink/libmdbx/tree/master/test) with minimalistic test "framework". -Actually yonder is a source code of the `mdbx_test` – console utility which has a set of command-line options that allow construct and run a reasonable enough test scenarios. -This test utility is intended for _libmdbx_'s developers for testing library itself, but not for use by users. -Therefore, only basic information is provided: - - - There are few CRUD-based test cases (hill, TTL, nested, append, jitter, etc), - which can be combined to test the concurrent operations within shared database in a multi-processes environment. - This is the `basic` test scenario. - - The `Makefile` provide several self-described targets for testing: `smoke`, `test`, `check`, `memcheck`, `test-valgrind`, - `test-asan`, `test-leak`, `test-ubsan`, `cross-gcc`, `cross-qemu`, `gcc-analyzer`, `smoke-fault`, `smoke-singleprocess`, - `test-singleprocess`, 'long-test'. Please run `make --help` if doubt. - - In addition to the `mdbx_test` utility, there is the script [`long_stochastic.sh`](https://gitflic.ru/project/erthink/libmdbx/blob/master/test/long_stochastic.sh), - which calls `mdbx_test` by going through set of modes and options, with gradually increasing the number of operations and the size of transactions. - This script is used for mostly of all automatic testing, including `Makefile` targets and Continuous Integration. - - Brief information of available command-line options is available by `--help`. - However, you should dive into source code to get all, there is no option. - -Anyway, no matter how thoroughly the _libmdbx_ is tested, you should rely only on your own tests for a few reasons: - -1. Mostly of all use cases are unique. - So it is no warranty that your use case was properly tested, even the _libmdbx_'s tests engages stochastic approach. -2. If there are problems, then your test on the one hand will help to verify whether you are using _libmdbx_ correctly, - on the other hand it will allow to reproduce the problem and insure against regression in a future. -3. Actually you should rely on than you checked by yourself or take a risk. - - -### Common important details - -#### Build reproducibility -By default _libmdbx_ track build time via `MDBX_BUILD_TIMESTAMP` build option and macro. -So for a [reproducible builds](https://en.wikipedia.org/wiki/Reproducible_builds) you should predefine/override it to known fixed string value. -For instance: - - - for reproducible build with make: `make MDBX_BUILD_TIMESTAMP=unknown ` ... - - or during configure by CMake: `cmake -DMDBX_BUILD_TIMESTAMP:STRING=unknown ` ... - -Of course, in addition to this, your toolchain must ensure the reproducibility of builds. -For more information please refer to [reproducible-builds.org](https://reproducible-builds.org/). - -#### Containers -There are no special traits nor quirks if you use libmdbx ONLY inside the single container. -But in a cross-container cases or with a host-container(s) mix the two major things MUST be -guaranteed: - -1. Coherence of memory mapping content and unified page cache inside OS kernel for host and all container(s) operated with a DB. -Basically this means must be only a single physical copy of each memory mapped DB' page in the system memory. - -2. Uniqueness of [PID](https://en.wikipedia.org/wiki/Process_identifier) values and/or a common space for ones: - - for POSIX systems: PID uniqueness for all processes operated with a DB. - I.e. the `--pid=host` is required for run DB-aware processes inside Docker, - either without host interaction a `--pid=container:` with the same name/id. - - for non-POSIX (i.e. Windows) systems: inter-visibility of processes handles. - I.e. the `OpenProcess(SYNCHRONIZE, ..., PID)` must return reasonable error, - including `ERROR_ACCESS_DENIED`, - but not the `ERROR_INVALID_PARAMETER` as for an invalid/non-existent PID. - -#### DSO/DLL unloading and destructors of Thread-Local-Storage objects -When building _libmdbx_ as a shared library or use static _libmdbx_ as a -part of another dynamic library, it is advisable to make sure that your -system ensures the correctness of the call destructors of -Thread-Local-Storage objects when unloading dynamic libraries. - -If this is not the case, then unloading a dynamic-link library with -_libmdbx_ code inside, can result in either a resource leak or a crash -due to calling destructors from an already unloaded DSO/DLL object. The -problem can only manifest in a multithreaded application, which makes -the unloading of shared dynamic libraries with _libmdbx_ code inside, -after using _libmdbx_. It is known that TLS-destructors are properly -maintained in the following cases: - -- On all modern versions of Windows (Windows 7 and later). - -- On systems with the -[`__cxa_thread_atexit_impl()`](https://sourceware.org/glibc/wiki/Destructor%20support%20for%20thread_local%20variables) -function in the standard C library, including systems with GNU libc -version 2.18 and later. - -- On systems with libpthread/ntpl from GNU libc with bug fixes -[#21031](https://sourceware.org/bugzilla/show_bug.cgi?id=21031) and -[#21032](https://sourceware.org/bugzilla/show_bug.cgi?id=21032), or -where there are no similar bugs in the pthreads implementation. - -### Linux and other platforms with GNU Make -To build the library it is enough to execute `make all` in the directory -of source code, and `make check` to execute the basic tests. - -If the `make` installed on the system is not GNU Make, there will be a -lot of errors from make when trying to build. In this case, perhaps you -should use `gmake` instead of `make`, or even `gnu-make`, etc. - -### FreeBSD and related platforms -As a rule on BSD and it derivatives the default is to use Berkeley Make and -[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is not installed. - -So you need to install the required components: GNU Make, Bash, C and C++ -compilers compatible with GCC or CLANG. After that, to build the -library, it is enough to execute `gmake all` (or `make all`) in the -directory with source code, and `gmake check` (or `make check`) to run -the basic tests. - -### Windows -For build _libmdbx_ on Windows the _original_ CMake and [Microsoft Visual -Studio 2019](https://en.wikipedia.org/wiki/Microsoft_Visual_Studio) are -recommended. Please use the recent versions of CMake, Visual Studio and Windows -SDK to avoid troubles with C11 support and `alignas()` feature. - -For build by MinGW the 10.2 or recent version coupled with a modern CMake are required. -So it is recommended to use [chocolatey](https://chocolatey.org/) to install and/or update the ones. - -Another ways to build is potentially possible but not supported and will not. -The `CMakeLists.txt` or `GNUMakefile` scripts will probably need to be modified accordingly. -Using other methods do not forget to add the `ntdll.lib` to linking. - -It should be noted that in _libmdbx_ was efforts to avoid -runtime dependencies from CRT and other MSVC libraries. -For this is enough to pass the `-DMDBX_WITHOUT_MSVC_CRT:BOOL=ON` option -during configure by CMake. - -An example of running a basic test script can be found in the -[CI-script](appveyor.yml) for [AppVeyor](https://www.appveyor.com/). To -run the [long stochastic test scenario](test/long_stochastic.sh), -[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is required, and -such testing is recommended with placing the test data on the -[RAM-disk](https://en.wikipedia.org/wiki/RAM_drive). - -### Windows Subsystem for Linux -_libmdbx_ could be used in [WSL2](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_2) -but NOT in [WSL1](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_1) environment. -This is a consequence of the fundamental shortcomings of _WSL1_ and cannot be fixed. -To avoid data loss, _libmdbx_ returns the `ENOLCK` (37, "No record locks available") -error when opening the database in a _WSL1_ environment. - -### MacOS -Current [native build tools](https://en.wikipedia.org/wiki/Xcode) for -MacOS include GNU Make, CLANG and an outdated version of Bash. -Therefore, to build the library, it is enough to run `make all` in the -directory with source code, and run `make check` to execute the base -tests. If something goes wrong, it is recommended to install -[Homebrew](https://brew.sh/) and try again. - -To run the [long stochastic test scenario](test/long_stochastic.sh), you -will need to install the current (not outdated) version of -[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). To do this, we -recommend that you install [Homebrew](https://brew.sh/) and then execute -`brew install bash`. - -### Android -We recommend using CMake to build _libmdbx_ for Android. -Please refer to the [official guide](https://developer.android.com/studio/projects/add-native-code). - -### iOS -To build _libmdbx_ for iOS, we recommend using CMake with the -["toolchain file"](https://cmake.org/cmake/help/latest/variable/CMAKE_TOOLCHAIN_FILE.html) -from the [ios-cmake](https://github.com/leetal/ios-cmake) project. - - - -## API description - -Please refer to the online [_libmdbx_ API reference](https://libmdbx.dqdkfa.ru/docs) -and/or see the [mdbx.h++](mdbx.h%2B%2B) and [mdbx.h](mdbx.h) headers. - - - -Bindings -======== - -| Runtime | Repo | Author | -| ------- | ------ | ------ | -| Scala | [mdbx4s](https://github.com/david-bouyssie/mdbx4s) | [David Bouyssié](https://github.com/david-bouyssie) | -| Haskell | [libmdbx-hs](https://hackage.haskell.org/package/libmdbx) | [Francisco Vallarino](https://github.com/fjvallarino) | -| NodeJS, [Deno](https://deno.land/) | [lmdbx-js](https://github.com/kriszyp/lmdbx-js) | [Kris Zyp](https://github.com/kriszyp/) -| NodeJS | [node-mdbx](https://www.npmjs.com/package/node-mdbx/) | [Сергей Федотов](mailto:sergey.fedotov@corp.mail.ru) | -| Ruby | [ruby-mdbx](https://rubygems.org/gems/mdbx/) | [Mahlon E. Smith](https://github.com/mahlonsmith) | -| Go | [mdbx-go](https://github.com/torquem-ch/mdbx-go) | [Alex Sharov](https://github.com/AskAlexSharov) | -| [Nim](https://en.wikipedia.org/wiki/Nim_(programming_language)) | [NimDBX](https://github.com/snej/nimdbx) | [Jens Alfke](https://github.com/snej) -| Lua | [lua-libmdbx](https://github.com/mah0x211/lua-libmdbx) | [Masatoshi Fukunaga](https://github.com/mah0x211) | -| Rust | [libmdbx-rs](https://github.com/vorot93/libmdbx-rs) | [Artem Vorotnikov](https://github.com/vorot93) | -| Rust | [mdbx](https://crates.io/crates/mdbx) | [gcxfd](https://github.com/gcxfd) | -| Java | [mdbxjni](https://github.com/castortech/mdbxjni) | [Castor Technologies](https://castortech.com/) | -| Python (draft) | [python-bindings](https://libmdbx.dqdkfa.ru/dead-github/commits/python-bindings) branch | [Noel Kuntze](https://github.com/Thermi) -| .NET (obsolete) | [mdbx.NET](https://github.com/wangjia184/mdbx.NET) | [Jerry Wang](https://github.com/wangjia184) | - - - --------------------------------------------------------------------------------- - - - -Performance comparison -====================== - -All benchmarks were done in 2015 by [IOArena](https://abf.io/erthink/ioarena.git) -and multiple [scripts](https://github.com/pmwkaa/ioarena/tree/HL%2B%2B2015) -runs on Lenovo Carbon-2 laptop, i7-4600U 2.1 GHz (2 physical cores, 4 HyperThreading cores), 8 Gb RAM, -SSD SAMSUNG MZNTD512HAGL-000L1 (DXT23L0Q) 512 Gb. - -## Integral performance - -Here showed sum of performance metrics in 3 benchmarks: - - - Read/Search on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores); - - - Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD) - operations in sync-write mode (fdatasync is called after each - transaction); - - - Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD) - operations in lazy-write mode (moment to sync data to persistent storage - is decided by OS). - -*Reasons why asynchronous mode isn't benchmarked here:* - - 1. It doesn't make sense as it has to be done with DB engines, oriented - for keeping data in memory e.g. [Tarantool](https://tarantool.io/), - [Redis](https://redis.io/)), etc. - - 2. Performance gap is too high to compare in any meaningful way. - -![Comparison #1: Integral Performance](https://libmdbx.dqdkfa.ru/img/perf-slide-1.png) - --------------------------------------------------------------------------------- - -## Read Scalability - -Summary performance with concurrent read/search queries in 1-2-4-8 -threads on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores). - -![Comparison #2: Read Scalability](https://libmdbx.dqdkfa.ru/img/perf-slide-2.png) - --------------------------------------------------------------------------------- - -## Sync-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean - transactions per second; - - - Logarithmic scale on right is in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**10,000 transactions in sync-write mode**. In case of a crash all data -is consistent and conforms to the last successful transaction. The -[fdatasync](https://linux.die.net/man/2/fdatasync) syscall is used after -each write transaction in this mode. - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 10,000 small key-value records. - -![Comparison #3: Sync-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-3.png) - --------------------------------------------------------------------------------- - -## Lazy-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean of - thousands transactions per second; - - - Logarithmic scale on right in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**100,000 transactions in lazy-write mode**. In case of a crash all data -is consistent and conforms to the one of last successful transactions, but -transactions after it will be lost. Other DB engines use -[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) or transaction -journal for that, which in turn depends on order of operations in the -journaled filesystem. _libmdbx_ doesn't use WAL and hands I/O operations -to filesystem and OS kernel (mmap). - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 100,000 small key-value -records. - - -![Comparison #4: Lazy-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-4.png) - --------------------------------------------------------------------------------- - -## Async-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean of - thousands transactions per second; - - - Logarithmic scale on right in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**1,000,000 transactions in async-write mode**. -In case of a crash all data is consistent and conforms to the one of last successful transactions, -but lost transaction count is much higher than in -lazy-write mode. All DB engines in this mode do as little writes as -possible on persistent storage. _libmdbx_ uses -[msync(MS_ASYNC)](https://linux.die.net/man/2/msync) in this mode. - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 10,000 small key-value records. - -![Comparison #5: Async-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-5.png) - --------------------------------------------------------------------------------- - -## Cost comparison - -Summary of used resources during lazy-write mode benchmarks: - - - Read and write IOPs; - - - Sum of user CPU time and sys CPU time; - - - Used space on persistent storage after the test and closed DB, but not - waiting for the end of all internal housekeeping operations (LSM - compactification, etc). - -_ForestDB_ is excluded because benchmark showed it's resource -consumption for each resource (CPU, IOPs) much higher than other engines -which prevents to meaningfully compare it with them. - -All benchmark data is gathered by -[getrusage()](http://man7.org/linux/man-pages/man2/getrusage.2.html) -syscall and by scanning the data directory. - -![Comparison #6: Cost comparison](https://libmdbx.dqdkfa.ru/img/perf-slide-6.png) - - diff --git a/docs/crates/db.md b/docs/crates/db.md index 79f93b1efad4..58729fc1434c 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -8,7 +8,7 @@ The database is a central component to Reth, enabling persistent storage for dat Within Reth, the database is organized via "tables". A table is any struct that implements the `Table` trait. -[File: crates/storage/db/src/abstraction/table.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/table.rs#L56-L65) +[File: crates/storage/db/src/abstraction/table.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/abstraction/table.rs#L55-L82) ```rust ignore pub trait Table: Send + Sync + Debug + 'static { @@ -23,7 +23,7 @@ pub trait Table: Send + Sync + Debug + 'static { } //--snip-- -pub trait Key: Encode + Decode + Ord {} +pub trait Key: Encode + Decode + Ord + Clone + Serialize + for<'a> Deserialize<'a> {} //--snip-- pub trait Value: Compress + Decompress + Serialize {} @@ -32,38 +32,42 @@ pub trait Value: Compress + Decompress + Serialize {} The `Table` trait has two generic values, `Key` and `Value`, which need to implement the `Key` and `Value` traits, respectively. The `Encode` trait is responsible for transforming data into bytes so it can be stored in the database, while the `Decode` trait transforms the bytes back into its original form. Similarly, the `Compress` and `Decompress` traits transform the data to and from a compressed format when storing or reading data from the database. -There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/tables/mod.rs#L36) if you would like to see the table definitions for any of the tables below. +There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/tables/mod.rs#L161-L188) if you would like to see the table definitions for any of the tables below. - CanonicalHeaders - HeaderTD - HeaderNumbers - Headers -- BlockBodies +- BlockBodyIndices - BlockOmmers +- BlockWithdrawals +- TransactionBlock - Transactions - TxHashNumber - Receipts -- Logs - PlainAccountState - PlainStorageState - Bytecodes -- BlockTransitionIndex -- TxTransitionIndex - AccountHistory - StorageHistory - AccountChangeSet - StorageChangeSet +- HashedAccount +- HashedStorage +- AccountsTrie +- StoragesTrie - TxSenders -- Config - SyncStage +- SyncStageProgress +- PruneCheckpoints
## Database -Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs#L33), which takes advantage of [generic associated types](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) and [a few design tricks](https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats) to implement the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21), which takes advantage of [generic associated types](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) and [a few design tricks](https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats) to implement the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. -[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/database.rs#L19) +[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21) ```rust ignore /// Main Database trait that spawns transactions to be executed. diff --git a/docs/crates/discv4.md b/docs/crates/discv4.md index fe37cf463db2..45ab4a52f84f 100644 --- a/docs/crates/discv4.md +++ b/docs/crates/discv4.md @@ -5,16 +5,20 @@ The `discv4` crate plays an important role in Reth, enabling discovery of other ## Starting the Node Discovery Protocol As mentioned in the network and stages chapters, when the node is first started up, the `node::Command::execute()` function is called, which initializes the node and starts to run the Reth pipeline. Throughout the initialization of the node, there are many processes that are are started. One of the processes that is initialized is the p2p network which starts the node discovery protocol amongst other tasks. -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs#L95) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs#L314-L322) ```rust ignore pub async fn execute(&self) -> eyre::Result<()> { //--snip-- - let network = config - .network_config(db.clone(), chain_id, genesis_hash, self.network.disable_discovery) - .start_network() - .await?; + let network = self + .start_network( + network_config, + &ctx.task_executor, + transaction_pool.clone(), + default_peers_path, + ) + .await?; - info!(peer_id = ?network.peer_id(), local_addr = %network.local_addr(), "Started p2p networking"); + info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); //--snip-- } @@ -22,7 +26,7 @@ As mentioned in the network and stages chapters, when the node is first started During this process, a new `NetworkManager` is created through the `NetworkManager::new()` function, which starts the discovery protocol through a handful of newly spawned tasks. Lets take a look at how this actually works under the hood. -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs#L147) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs#L89) ```rust ignore impl NetworkManager where @@ -37,6 +41,7 @@ where mut discovery_v4_config, discovery_addr, boot_nodes, + dns_discovery_config, //--snip-- .. } = config; @@ -50,16 +55,18 @@ where disc_config }); - let discovery = Discovery::new(discovery_addr, secret_key, discovery_v4_config).await?; + let discovery = + Discovery::new(discovery_addr, secret_key, discovery_v4_config, dns_discovery_config) + .await?; //--snip-- } } ``` -First, the `NetworkConfig` is deconstructed and the `disc_config` is updated to merge configured [bootstrap nodes](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/bootnodes.rs#L8) and add the `forkid` to adhere to [EIP 868](https://eips.ethereum.org/EIPS/eip-868). This updated configuration variable is then passed into the `Discovery::new()` function. Note that `Discovery` is a catch all for all discovery services, which include discv4, DNS discovery and others in the future. +First, the `NetworkConfig` is deconstructed and the `disc_config` is updated to merge configured [bootstrap nodes](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/net.rs#L120-L151) and add the `forkid` to adhere to [EIP 868](https://eips.ethereum.org/EIPS/eip-868). This updated configuration variable is then passed into the `Discovery::new()` function. Note that `Discovery` is a catch all for all discovery services, which include discv4, DNS discovery and others in the future. -[File: crates/net/network/src/discovery.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/discovery.rs#L51) +[File: crates/net/network/src/discovery.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/discovery.rs#L53) ```rust ignore impl Discovery { /// Spawns the discovery service. @@ -70,6 +77,7 @@ impl Discovery { discovery_addr: SocketAddr, sk: SecretKey, discv4_config: Option, + dns_discovery_config: Option, ) -> Result { let local_enr = NodeRecord::from_secret_key(discovery_addr, &sk); @@ -86,6 +94,20 @@ impl Discovery { (None, None, None) }; + // setup DNS discovery + let (_dns_discovery, dns_discovery_updates, _dns_disc_service) = + if let Some(dns_config) = dns_discovery_config { + let (mut service, dns_disc) = DnsDiscoveryService::new_pair( + Arc::new(DnsResolver::from_system_conf()?), + dns_config, + ); + let dns_discovery_updates = service.node_record_stream(); + let dns_disc_service = service.spawn(); + (Some(dns_disc), Some(dns_discovery_updates), Some(dns_disc_service)) + } else { + (None, None, None) + }; + Ok(Self { local_enr, discv4, diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 0c216644fce4..0270668d3f95 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -12,7 +12,7 @@ This crate can be thought of as having 2 components: ## Types The most basic Eth-wire type is an `ProtocolMessage`. It describes all messages that reth can send/receive. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -50,7 +50,7 @@ pub enum EthMessageID { Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is described using a `RequestPair` struct, which is simply a concatenation of the underlying message with a request id. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RequestPair { @@ -62,7 +62,7 @@ pub struct RequestPair { Every `Ethmessage` has a correspoding rust struct which implements the `Encodable` and `Decodable` traits. These traits are defined as follows: -[Crate: crates/common/rlp](https://github.com/paradigmxyz/reth/blob/main/crates/common/rlp) +[Crate: crates/rlp](https://github.com/paradigmxyz/reth/tree/1563506aea09049a85e5cc72c2894f3f7a371581/crates/rlp) ```rust, ignore pub trait Decodable: Sized { fn decode(buf: &mut &[u8]) -> Result; @@ -93,7 +93,7 @@ The items in the list are transactions in the format described in the main Ether In reth, this is represented as: -[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/broadcast.rs) +[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/broadcast.rs) ```rust,ignore pub struct Transactions( /// New transactions for the peer to include in its mempool. @@ -103,7 +103,7 @@ pub struct Transactions( And the corresponding trait implementations are present in the primitives crate. -[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/transaction/mod.rs) +[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/mod.rs) ```rust, ignore #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default)] @@ -131,7 +131,7 @@ impl Decodable for TransactionSigned { // Implementation omitted for brevity //... } - +} ``` Now that we know how the types work, let's take a look at how these are utilized in the network. @@ -146,7 +146,7 @@ The lowest level stream to communicate with other peers is the P2P stream. It ta Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) using the external `snap` crate. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore #[pin_project] pub struct P2PStream { @@ -164,7 +164,7 @@ pub struct P2PStream { To manage pinging, an instance of the `Pinger` struct is used. This is a state machine which keeps track of how many pings we have sent/received and the timeouts associated with them. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) ```rust,ignore #[derive(Debug)] pub(crate) struct Pinger { @@ -190,7 +190,7 @@ pub(crate) enum PingState { State transitions are then implemented like a future, with the `poll_ping` function advancing the state of the pinger. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) ```rust, ignore pub(crate) fn poll_ping( &mut self, @@ -218,12 +218,12 @@ pub(crate) fn poll_ping( ``` ### Sending and receiving data -To send and recieve data, the P2PStream itself is a future which implemenents the `Stream` and `Sink` traits from the `futures` crate. +To send and receive data, the P2PStream itself is a future which implements the `Stream` and `Sink` traits from the `futures` crate. For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore impl Stream for P2PStream { @@ -250,7 +250,7 @@ impl Stream for P2PStream { Similarly, for the `Sink` trait, we do the reverse, compressing and sending data out to the `inner` stream. The important functions in this trait are shown below. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore impl Sink for P2PStream { fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { @@ -287,7 +287,7 @@ impl Sink for P2PStream { ## EthStream The EthStream is very simple, it does not keep track of any state, it simply wraps the P2Pstream. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) ```rust,ignore #[pin_project] pub struct EthStream { @@ -298,7 +298,7 @@ pub struct EthStream { EthStream's only job is to perform the RLP decoding/encoding, using the `ProtocolMessage::decode()` and `ProtocolMessage::encode()` functions we looked at earlier. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) ```rust,ignore impl Stream for EthStream { // ... @@ -341,7 +341,7 @@ To perform these, reth has special `Unauthed` versions of streams described abov The `UnauthedP2Pstream` does the `Hello` handshake and returns a `P2PStream`. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore #[pin_project] pub struct UnauthedP2PStream { @@ -370,6 +370,6 @@ impl UnauthedP2PStream { } ``` -Similary, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +Similarly, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) diff --git a/docs/crates/network.md b/docs/crates/network.md index f78a5f5d33b0..edfa3515b047 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -33,7 +33,7 @@ The `"node"` CLI command, used to run the node itself, does the following at a h Steps 5-6 are of interest to us as they consume items from the `network` crate: -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs) ```rust,ignore let network = start_network(network_config(db.clone(), chain_id, genesis_hash)).await?; @@ -84,7 +84,7 @@ pipeline.run(db.clone()).await?; Let's begin by taking a look at the line where the network is started, with the call, unsurprisingly, to `start_network`. Sounds important, doesn't it? -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs) ```rust,ignore async fn start_network(config: NetworkConfig) -> Result where @@ -107,7 +107,7 @@ It gets the handles for the network management, transactions, and ETH requests t The `NetworkManager::builder` constructor requires a `NetworkConfig` struct to be passed in as a parameter, which can be used as the main entrypoint for setting up the entire network layer: -[File: crates/net/network/src/config.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/config.rs) +[File: crates/net/network/src/config.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/config.rs) ```rust,ignore pub struct NetworkConfig { /// The client type that can interact with the chain. @@ -152,7 +152,7 @@ pub struct NetworkConfig { The discovery task progresses as the network management task is polled, handling events regarding peer management through the `Swarm` struct which is stored as a field on the `NetworkManager`: -[File: crates/net/network/src/swarm.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/swarm.rs) +[File: crates/net/network/src/swarm.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/swarm.rs) ```rust,ignore pub(crate) struct Swarm { /// Listens for new incoming connections. @@ -180,7 +180,7 @@ Let's walk through how each is implemented, and then apply that knowledge to und The `NetworkHandle` struct is a client for the network management task that can be shared across threads. It wraps an `Arc` around the `NetworkInner` struct, defined as follows: -[File: crates/net/network/src/network.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/network.rs) +[File: crates/net/network/src/network.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/network.rs) ```rust,ignore struct NetworkInner { /// Number of active peer sessions the node's currently handling. @@ -200,7 +200,7 @@ struct NetworkInner { The field of note here is `to_manager_tx`, which is a handle that can be used to send messages in a channel to an instance of the `NetworkManager` struct. -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) ```rust,ignore pub struct NetworkManager { /// The type that manages the actual network part, which includes connections. @@ -235,7 +235,7 @@ While the `NetworkManager` is meant to be spawned as a standalone [`tokio::task` In the pipeline, the `NetworkHandle` is used to instantiate the `FetchClient` - which we'll get into next - and is used in the `HeaderStage` to update the node's ["status"](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00) (record the total difficulty, hash, and height of the last processed block). -[File: crates/stages/src/stages/headers.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/headers.rs) +[File: crates/stages/src/stages/headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stages/headers.rs) ```rust,ignore async fn update_head( &self, @@ -255,7 +255,7 @@ Now that we have some understanding about the internals of the network managemen The `FetchClient` struct, similar to `NetworkHandle`, can be shared across threads, and is a client for fetching data from the network. It's a fairly lightweight struct: -[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/client.rs) +[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/client.rs) ```rust,ignore pub struct FetchClient { /// Sender half of the request channel. @@ -271,7 +271,7 @@ The `request_tx` field is a handle to a channel that can be used to send request The fields `request_tx` and `peers_handle` are cloned off of the `StateFetcher` struct when instantiating the `FetchClient`, which is the lower-level struct responsible for managing data fetching operations over the network: -[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/mod.rs) +[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/mod.rs) ```rust,ignore pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests @@ -295,7 +295,7 @@ pub struct StateFetcher { This struct itself is nested deeply within the `NetworkManager`: its `Swarm` struct (shown earlier in the chapter) contains a `NetworkState` struct that has the `StateFetcher` as a field: -[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/state.rs) +[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/state.rs) ```rust,ignore pub struct NetworkState { /// All active peers and their state. @@ -322,7 +322,7 @@ pub struct NetworkState { The `FetchClient` implements the `HeadersClient` and `BodiesClient` traits, defining the functionality to get headers and block bodies from available peers. -[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/client.rs) +[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/client.rs) ```rust,ignore impl HeadersClient for FetchClient { /// Sends a `GetBlockHeaders` request to an available peer. @@ -346,7 +346,7 @@ This functionality is used in the `HeaderStage` and `BodyStage`, respectively. In the pipeline used by the main Reth binary, the `HeaderStage` uses a `ReverseHeadersDownloader` to stream headers from the network: -[File: crates/net/downloaders/src/headers/linear.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/headers/linear.rs) +[File: crates/net/downloaders/src/headers/reverse_headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/headers/reverse_headers.rs) ```rust,ignore pub struct ReverseHeadersDownloader { /// The consensus client @@ -362,7 +362,7 @@ pub struct ReverseHeadersDownloader { A `FetchClient` is passed in to the `client` field, and the `get_headers` method it implements gets used when polling the stream created by the `ReverseHeadersDownloader` in the `execute` method of the `HeaderStage`. -[File: crates/net/downloaders/src/headers/linear.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/headers/linear.rs) +[File: crates/net/downloaders/src/headers/reverse_headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/headers/reverse_headers.rs) ```rust,ignore fn get_or_init_fut(&mut self) -> HeadersRequestFuture { match self.request.take() { @@ -388,7 +388,7 @@ fn get_or_init_fut(&mut self) -> HeadersRequestFuture { In the `BodyStage` configured by the main binary, a `BodiesDownloader` is used: -[File: crates/net/downloaders/src/bodies/concurrent.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/bodies/concurrent.rs) +[File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore pub struct BodiesDownloader { /// The bodies client @@ -406,7 +406,7 @@ pub struct BodiesDownloader { Here, similarly, a `FetchClient` is passed in to the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. -[File: crates/net/downloaders/src/bodies/concurrent.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/bodies/concurrent.rs) +[File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore async fn fetch_bodies( &self, @@ -425,7 +425,7 @@ When `FetchClient.get_headers` or `FetchClient.get_block_bodies` is called, thos Every time the `StateFetcher` is polled, it finds the next idle peer available to service the current request (for either a block header, or a block body). In this context, "idle" means any peer that is not currently handling a request from the node: -[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/mod.rs) +[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/mod.rs) ```rust,ignore /// Returns the next action to return fn poll_action(&mut self) -> PollAction { @@ -455,7 +455,7 @@ The ETH requests task serves _incoming_ requests related to blocks in the [`eth` Similar to the network management task, it's implemented as an endless future, but it is meant to run as a background task (on a standalone `tokio::task`) and not to be interacted with directly from the pipeline. It's represented by the following `EthRequestHandler` struct: -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore pub struct EthRequestHandler { /// The client type that can interact with the chain. @@ -480,7 +480,7 @@ As the `NetworkManager` is polled and listens for events from peers passed throu Being an endless future, the core of the ETH requests task's functionality is in its `poll` method implementation. As the `EthRequestHandler` is polled, it listens for any ETH requests coming through the channel, and handles them accordingly. At the time of writing, the ETH requests task can handle the [`GetBlockHeaders`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockheaders-0x03) and [`GetBlockBodies`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockbodies-0x05) requests. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -506,7 +506,7 @@ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { The handling of these requests is fairly straightforward. The `GetBlockHeaders` payload is the following: -[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/blocks.rs) +[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/blocks.rs) ```rust,ignore pub struct GetBlockHeaders { /// The block number or hash that the peer should start returning headers from. @@ -528,7 +528,7 @@ pub struct GetBlockHeaders { In handling this request, the ETH requests task attempts, starting with `start_block`, to fetch the associated header from the database, increment/decrement the block number to fetch by `skip` depending on the `direction` while checking for overflow/underflow, and checks that bounds specifying the maximum numbers of headers or bytes to send have not been breached. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ let GetBlockHeaders { start_block, limit, skip, direction } = request; @@ -598,7 +598,7 @@ fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ The `GetBlockBodies` payload is simpler, it just contains a vector of requested block hashes: -[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/blocks.rs) +[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/blocks.rs) ```rust,ignore pub struct GetBlockBodies( /// The block hashes to request bodies for. @@ -608,7 +608,7 @@ pub struct GetBlockBodies( In handling this request, similarly, the ETH requests task attempts, for each hash in the requested order, to fetch the block body (transactions & ommers), while checking that bounds specifying the maximum numbers of bodies or bytes to send have not been breached. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn on_bodies_request( &mut self, @@ -653,7 +653,7 @@ in the [transaction-pool](../../../ethereum/transaction-pool/README.md) chapter. Again, like the network management and ETH requests tasks, the transactions task is implemented as an endless future that runs as a background task on a standalone `tokio::task`. It's represented by the `TransactionsManager` struct: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub struct TransactionsManager { /// Access to the transaction pool. @@ -688,7 +688,7 @@ pub struct TransactionsManager { Unlike the ETH requests task, but like the network management task's `NetworkHandle`, the transactions task can also be accessed via a shareable "handle" struct, the `TransactionsHandle`: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] @@ -711,7 +711,7 @@ Let's get a view into the transactions task's operation by walking through the ` The `poll` method lays out an order of operations for the transactions task. It begins by draining the `TransactionsManager.network_events`, `TransactionsManager.command_rx`, and `TransactionsManager.transaction_events` streams, in this order. Then, it checks on all the current `TransactionsManager.inflight_requests`, which are requests sent by the node to its peers for full transaction objects. After this, it checks on the status of completed `TransactionsManager.pool_imports` events, which are transactions that are being imported into the node's transaction pool. Finally, it drains the new `TransactionsManager.pending_transactions` events from the transaction pool. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -786,7 +786,7 @@ The `TransactionsManager.network_events` stream is the first to have all of its The events received in this channel are of type `NetworkEvent`: -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) ```rust,ignore pub enum NetworkEvent { /// Closed the peer session. @@ -822,7 +822,7 @@ Removes the peer given by `NetworkEvent::SessionClosed.peer_id` from the `Transa **`NetworkEvent::SessionEstablished`** Begins by inserting a `Peer` into `TransactionsManager.peers` by `peer_id`, which is a struct of the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore struct Peer { /// Keeps track of transactions that we know the peer has seen. @@ -838,7 +838,7 @@ The `request_tx` field on the `Peer` is used at the sender end of a channel to s After the `Peer` is added to `TransactionsManager.peers`, the hashes of all of the transactions in the node's transaction pool are sent to the peer in a [`NewPooledTransactionHashes` message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#newpooledtransactionhashes-0x08). -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_network_event(&mut self, event: NetworkEvent) { match event { @@ -875,7 +875,7 @@ fn on_network_event(&mut self, event: NetworkEvent) { Next in the `poll` method, `TransactionsCommand`s sent through the `TransactionsManager.command_rx` stream are handled. These are the next to be handled as they are those sent manually via the `TransactionsHandle`, giving them precedence over transactions-related requests picked up from the network. The `TransactionsCommand` enum has the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore enum TransactionsCommand { PropagateHash(H256), @@ -886,7 +886,7 @@ enum TransactionsCommand { `on_new_transactions` propagates the full transaction object, with the signer attached, to a small random sample of peers using the `propagate_transactions` method. Then, it notifies all other peers of the hash of the new transaction, so that they can request the full transaction object if they don't already have it. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_new_transactions(&mut self, hashes: impl IntoIterator) { trace!(target: "net::tx", "Start propagating transactions"); @@ -946,7 +946,7 @@ fn propagate_transactions( After `TransactionsCommand`s, it's time to take care of transactions-related requests sent by peers in the network, so the `poll` method handles `NetworkTransactionEvent`s received through the `TransactionsManager.transaction_events` stream. `NetworkTransactionEvent` has the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub enum NetworkTransactionEvent { /// Received list of transactions from the given peer. @@ -976,7 +976,7 @@ To understand this a bit better, let's double back and examine what `Transaction `TransactionsManager.pool_imports` is a set of futures representing the transactions which are currently in the process of being imported to the node's transaction pool. This process is asynchronous due to the validation of the transaction that must occur, thus we need to keep a handle on the generated future. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec) { let mut has_bad_transactions = false; @@ -1026,7 +1026,7 @@ This event is generated from the [`NewPooledTransactionHashes` protocol message] Here, it begins by adding the transaction hashes included in the `NewPooledTransactionHashes` payload to the LRU cache for the `Peer` identified by `peer_id` in `TransactionsManager.peers`. Next, it filters the list of hashes to those that are not already present in the transaction pool, and for each such hash, requests its full transaction object from the peer by sending it a [`GetPooledTransactions` protocol message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09) through the `Peer.request_tx` channel. If the request was successfully sent, a `GetPooledTxRequest` gets added to `TransactionsManager.inflight_requests` vector: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore struct GetPooledTxRequest { peer_id: PeerId, @@ -1036,7 +1036,7 @@ struct GetPooledTxRequest { As you can see, this struct also contains a `response` channel from which the peer's response can later be polled. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_new_pooled_transactions(&mut self, peer_id: PeerId, msg: NewPooledTransactionHashes) { if let Some(peer) = self.peers.get_mut(&peer_id) { @@ -1072,7 +1072,7 @@ This event is generated from the [`GetPooledTransactions` protocol message](http Here, it collects _all_ the transactions in the node's transaction pool, recovers their signers, adds their hashes to the LRU cache of the requesting peer, and sends them to the peer in a [`PooledTransactions` protocol message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#pooledtransactions-0x0a). This is sent through the `response` channel that's stored as a field of the `NetworkTransaction::GetPooledTransactions` variant itself. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_get_pooled_transactions( &mut self, diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 430579d197bb..f9fcc33cd6a9 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -2,7 +2,7 @@ The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages are queued up and stored within the Reth pipeline. -[File: crates/stages/src/pipeline.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/pipeline.rs) +[File: crates/stages/src/pipeline/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/pipeline/mod.rs) ```rust,ignore pub struct Pipeline { stages: Vec>, @@ -19,7 +19,7 @@ When the node is first started, a new `Pipeline` is initialized and all of the s Each stage within the pipeline implements the `Stage` trait which provides function interfaces to get the stage id, execute the stage and unwind the changes to the database if there was an issue during the stage execution. -[File: crates/stages/src/stage.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stage.rs) +[File: crates/stages/src/stage.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stage.rs) ```rust,ignore pub trait Stage: Send + Sync { /// Get the ID of the stage. @@ -53,7 +53,7 @@ To get a better idea of what is happening at each part of the pipeline, lets wal The `HeaderStage` is responsible for syncing the block headers, validating the header integrity and writing the headers to the database. When the `execute()` function is called, the local head of the chain is updated to the most recent block height previously executed by the stage. At this point, the node status is also updated with that block's height, hash and total difficulty. These values are used during any new eth/65 handshakes. After updating the head, a stream is established with other peers in the network to sync the missing chain headers between the most recent state stored in the database and the chain tip. The `HeaderStage` contains a `downloader` attribute, which is a type that implements the `HeaderDownloader` trait. A `HeaderDownloader` is a `Stream` that returns batches of headers. -[File: crates/interfaces/src/p2p/headers/downloader.rs](https://github.com/paradigmxyz/reth/blob/main/crates/interfaces/src/p2p/headers/downloader.rs) +[File: crates/interfaces/src/p2p/headers/downloader.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/interfaces/src/p2p/headers/downloader.rs) ```rust,ignore pub trait HeaderDownloader: Send + Sync + Stream> + Unpin { /// Updates the gap to sync which ranges from local head to the sync target @@ -77,7 +77,7 @@ pub trait HeaderDownloader: Send + Sync + Stream> + Unp The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip from the Consensus Layer, verifies the tip, and then walks backwards by the parent hash. Each value yielded from the stream is a `SealedHeader`. -[File: crates/primitives/src/header.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/header.rs) +[File: crates/primitives/src/header.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/header.rs) ```rust,ignore pub struct SealedHeader { /// Locked Header fields. @@ -130,7 +130,7 @@ The new block is then pre-validated, checking that the ommers hash and transacti Following a successful `BodyStage`, the `SenderRecoveryStage` starts to execute. The `SenderRecoveryStage` is responsible for recovering the transaction sender for each of the newly added transactions to the database. At the beginning of the execution function, all of the transactions are first retrieved from the database. Then the `SenderRecoveryStage` goes through each transaction and recovers the signer from the transaction signature and hash. The transaction hash is derived by taking the Keccak 256-bit hash of the RLP encoded transaction bytes. This hash is then passed into the `recover_signer` function. -[File: crates/primitives/src/transaction/signature.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/transaction/signature.rs) +[File: crates/primitives/src/transaction/signature.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/signature.rs) ```rust,ignore pub(crate) fn recover_signer(&self, hash: H256) -> Option
{ let mut sig: [u8; 65] = [0; 65]; @@ -157,7 +157,7 @@ Once the transaction signer has been recovered, the signer is then added to the Finally, after all headers, bodies and senders are added to the database, the `ExecutionStage` starts to execute. This stage is responsible for executing all of the transactions and updating the state stored in the database. For every new block header added to the database, the corresponding transactions have their signers attached to them and `reth_blockchain_tree::executor::execute_and_verify_receipt()` is called, pushing the state changes resulting from the execution to a `Vec`. -[File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/execution.rs) +[File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stages/execution.rs) ```rust,ignore pub fn execute_and_verify_receipt( block: &Block, diff --git a/docs/design/database.md b/docs/design/database.md index eae89bf9d011..45f9d2a139cc 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -14,7 +14,7 @@ * We implemented that trait for the following encoding formats: * [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. * [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. - * Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the modular_bitfield crate, which compacts this information. + * Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the [`modular_bitfield`](https://docs.rs/modular-bitfield/latest/modular_bitfield/) crate, which compacts this information. * We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It, also generates the interfaces required for fuzzing using ToB/test-fuzz: * [Scale Encoding](https://github.com/paritytech/parity-scale-codec) * [Postcard Encoding](https://github.com/jamesmunns/postcard) From 3b404acc7dbb7499895876e3391511e67446bb45 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 17:05:11 +0200 Subject: [PATCH 487/722] feat: support blob transactions in manager (#4294) --- crates/net/eth-wire/src/types/broadcast.rs | 7 ++++ crates/net/network/src/transactions.rs | 40 +++++++++++++-------- crates/primitives/src/transaction/pooled.rs | 21 +++++++++++ 3 files changed, 54 insertions(+), 14 deletions(-) diff --git a/crates/net/eth-wire/src/types/broadcast.rs b/crates/net/eth-wire/src/types/broadcast.rs index c31ce72445fb..18da1bd75684 100644 --- a/crates/net/eth-wire/src/types/broadcast.rs +++ b/crates/net/eth-wire/src/types/broadcast.rs @@ -80,6 +80,13 @@ pub struct Transactions( pub Vec, ); +impl Transactions { + /// Returns `true` if the list of transactions contains any blob transactions. + pub fn has_eip4844(&self) -> bool { + self.0.iter().any(|tx| tx.is_eip4844()) + } +} + impl From> for Transactions { fn from(txs: Vec) -> Self { Transactions(txs) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 56d43dbb5e28..e03d466676a1 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -19,8 +19,8 @@ use reth_interfaces::{ use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; use reth_primitives::{ - FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, TxType, - H256, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, + TransactionSigned, TxHash, TxType, H256, }; use reth_rlp::Encodable; use reth_transaction_pool::{ @@ -164,7 +164,6 @@ impl TransactionsManager { impl TransactionsManager where Pool: TransactionPool + 'static, - ::Transaction: IntoRecoveredTransaction, { /// Returns a new handle that can send commands to this type. pub fn handle(&self) -> TransactionsHandle { @@ -375,7 +374,22 @@ where fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { - self.import_transactions(peer_id, msg.0, TransactionSource::Broadcast); + // ensure we didn't receive any blob transactions as these are disallowed to be + // broadcasted in full + + let has_blob_txs = msg.has_eip4844(); + + let non_blob_txs = msg + .0 + .into_iter() + .map(PooledTransactionsElement::try_from_broadcast) + .filter_map(Result::ok); + + self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); + + if has_blob_txs { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + } } NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { self.on_new_pooled_transaction_hashes(peer_id, msg) @@ -448,7 +462,7 @@ where fn import_transactions( &mut self, peer_id: PeerId, - transactions: Vec, + transactions: impl IntoIterator, source: TransactionSource, ) { // If the node is pipeline syncing, ignore transactions @@ -463,7 +477,7 @@ where if let Some(peer) = self.peers.get_mut(&peer_id) { for tx in transactions { // recover transaction - let tx = if let Some(tx) = tx.into_ecrecovered() { + let tx = if let Ok(tx) = tx.try_into_ecrecovered() { tx } else { has_bad_transactions = true; @@ -474,18 +488,18 @@ where // If we received the transactions as the response to our GetPooledTransactions // requests (based on received `NewPooledTransactionHashes`) then we already // recorded the hashes in [`Self::on_new_pooled_transaction_hashes`] - if source.is_broadcast() && !peer.transactions.insert(tx.hash()) { + if source.is_broadcast() && !peer.transactions.insert(*tx.hash()) { num_already_seen += 1; } - match self.transactions_by_peers.entry(tx.hash()) { + match self.transactions_by_peers.entry(*tx.hash()) { Entry::Occupied(mut entry) => { // transaction was already inserted entry.get_mut().push(peer_id); } Entry::Vacant(entry) => { // this is a new transaction that should be imported into the pool - let pool_transaction = ::from_recovered_transaction(tx); + let pool_transaction = ::from_recovered_transaction(tx); let pool = self.pool.clone(); @@ -583,11 +597,7 @@ where { match result { Ok(Ok(txs)) => { - // convert all transactions to the inner transaction type, ignoring any - // sidecars - // TODO: remove this! this will be different when we introduce the blobpool - let transactions = txs.0.into_iter().map(|tx| tx.into_transaction()).collect(); - this.import_transactions(peer_id, transactions, TransactionSource::Response) + this.import_transactions(peer_id, txs.0, TransactionSource::Response) } Ok(Err(req_err)) => { this.on_request_error(peer_id, req_err); @@ -825,6 +835,8 @@ enum TransactionsCommand { #[allow(missing_docs)] pub enum NetworkTransactionEvent { /// Received list of transactions from the given peer. + /// + /// This represents transactions that were broadcasted to use from the peer. IncomingTransactions { peer_id: PeerId, msg: Transactions }, /// Received list of transactions hashes to the given peer. IncomingPooledTransactionHashes { peer_id: PeerId, msg: NewPooledTransactionHashes }, diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index a293b0b67aef..e34d05725338 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -46,6 +46,17 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { + /// Tries to convert a [TransactionSigned] into a [PooledTransactionsElement]. + /// + /// [BlobTransaction] are disallowed from being propagated, hence this returns an error if the + /// `tx` is [Transaction::Eip4844] + pub fn try_from_broadcast(tx: TransactionSigned) -> Result { + if tx.is_eip4844() { + return Err(tx) + } + Ok(tx.into()) + } + /// Heavy operation that return signature hash over rlp encoded transaction. /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> H256 { @@ -57,6 +68,16 @@ impl PooledTransactionsElement { } } + /// Reference to transaction hash. Used to identify transaction. + pub fn hash(&self) -> &TxHash { + match self { + PooledTransactionsElement::Legacy { hash, .. } => hash, + PooledTransactionsElement::Eip2930 { hash, .. } => hash, + PooledTransactionsElement::Eip1559 { hash, .. } => hash, + PooledTransactionsElement::BlobTransaction(tx) => &tx.hash, + } + } + /// Returns the signature of the transaction. pub fn signature(&self) -> &Signature { match self { From c4626f7039436d197e3c6e6d071928ade6993a54 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Aug 2023 18:00:47 +0200 Subject: [PATCH 488/722] fix: state overrides and call many args (#4298) --- crates/rpc/rpc-api/src/debug.rs | 4 +--- crates/rpc/rpc-types/src/eth/state.rs | 23 +++++++++++++++++++++-- crates/rpc/rpc/src/debug.rs | 15 ++++++--------- crates/rpc/rpc/src/eth/revm_utils.rs | 10 ++-------- 4 files changed, 30 insertions(+), 22 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 91377017425c..880fa82c10e2 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,7 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H256}; use reth_rpc_types::{ - state::StateOverride, trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, @@ -120,8 +119,7 @@ pub trait DebugApi { &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> RpcResult>; /// Sets the logging backtrace location. When a backtrace location is set and a log message is diff --git a/crates/rpc/rpc-types/src/eth/state.rs b/crates/rpc/rpc-types/src/eth/state.rs index 38e971083a5e..b9e1221850d0 100644 --- a/crates/rpc/rpc-types/src/eth/state.rs +++ b/crates/rpc/rpc-types/src/eth/state.rs @@ -24,11 +24,11 @@ pub struct AccountOverride { /// Fake key-value mapping to override all slots in the account storage before executing the /// call. #[serde(default, skip_serializing_if = "Option::is_none")] - pub state: Option>, + pub state: Option>, /// Fake key-value mapping to override individual slots in the account storage before executing /// the call. #[serde(default, skip_serializing_if = "Option::is_none")] - pub state_diff: Option>, + pub state_diff: Option>, } #[cfg(test)] @@ -48,4 +48,23 @@ mod tests { .unwrap(); assert!(acc.code.is_some()); } + #[test] + fn test_state_override_state_diff() { + let s = r#"{ + "0x1b5212AF6b76113afD94cD2B5a78a73B7d7A8222": { + "balance": "0x39726378b58c400000", + "stateDiff": {} + }, + "0xdAC17F958D2ee523a2206206994597C13D831ec7": { + "stateDiff": { + "0xede27e4e7f3676edbf125879f17a896d6507958df3d57bda6219f1880cae8a41": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + }"#; + let state_override: StateOverride = serde_json::from_str(s).unwrap(); + let acc = state_override + .get(&"0x1b5212AF6b76113afD94cD2B5a78a73B7d7A8222".parse().unwrap()) + .unwrap(); + assert!(acc.state_diff.is_some()); + } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index f492599963f1..fcb592516306 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -25,7 +25,6 @@ use reth_revm::{ use reth_rlp::{Decodable, Encodable}; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ - state::StateOverride, trace::geth::{ BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, NoopFrame, TraceResult, @@ -341,8 +340,7 @@ where &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> EthResult> { if bundles.is_empty() { return Err(EthApiError::InvalidParams(String::from("bundles are empty."))) @@ -357,8 +355,9 @@ where self.inner.eth_api.block_by_id(target_block), )?; + let opts = opts.unwrap_or_default(); let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let tracing_options = opts.unwrap_or_default(); + let GethDebugTracingCallOptions { tracing_options, state_overrides, .. } = opts; let gas_limit = self.inner.eth_api.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the state @@ -401,7 +400,7 @@ where //let mut result = Vec::with_capacity(bundle.len()); let Bundle { transactions, block_override } = bundle; let overrides = - EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + EvmOverrides::new(state_overrides.clone(), block_override.map(Box::new)); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -765,12 +764,10 @@ where &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts, state_override) - .await?) + Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts).await?) } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index d83b3d53e100..1dc65db8e2ba 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -488,19 +488,13 @@ where account, new_account_state .into_iter() - .map(|(slot, value)| { - (U256::from_be_bytes(slot.0), U256::from_be_bytes(value.0)) - }) + .map(|(slot, value)| (U256::from_be_bytes(slot.0), value)) .collect(), )?; } (None, Some(account_state_diff)) => { for (slot, value) in account_state_diff { - db.insert_account_storage( - account, - U256::from_be_bytes(slot.0), - U256::from_be_bytes(value.0), - )?; + db.insert_account_storage(account, U256::from_be_bytes(slot.0), value)?; } } }; From 34b68deedf401624de09fd24d63361be8f4f9987 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 21 Aug 2023 19:13:58 +0300 Subject: [PATCH 489/722] release: v0.1.0-alpha.7 (#4297) --- Cargo.lock | 94 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 299c165541d4..d1a88fa5dc6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1935,7 +1935,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-db", "reth-interfaces", @@ -5192,7 +5192,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "backon", "boyer-moore-magiclen", @@ -5264,7 +5264,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5281,7 +5281,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-core", "futures-util", @@ -5301,7 +5301,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "futures", @@ -5330,7 +5330,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -5350,7 +5350,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "bytes", @@ -5365,7 +5365,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "confy", "reth-discv4", @@ -5382,7 +5382,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "mockall", @@ -5393,7 +5393,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "assert_matches", @@ -5435,7 +5435,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "discv5", "enr 0.8.1", @@ -5459,7 +5459,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "data-encoding", @@ -5483,7 +5483,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "futures", @@ -5509,7 +5509,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aes 0.8.3", "block-padding", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "async-trait", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "async-trait", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "bytes", @@ -5621,7 +5621,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -5641,7 +5641,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "bindgen 0.65.1", "cc", @@ -5650,7 +5650,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures", "metrics", @@ -5660,7 +5660,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "metrics", "once_cell", @@ -5674,7 +5674,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "pin-project", "reth-primitives", @@ -5683,7 +5683,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "igd", "pin-project-lite", @@ -5697,7 +5697,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "async-trait", @@ -5748,7 +5748,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "reth-eth-wire", @@ -5761,7 +5761,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-util", "metrics", @@ -5781,7 +5781,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "assert_matches", @@ -5833,7 +5833,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "auto_impl", "derive_more", @@ -5854,7 +5854,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -5872,7 +5872,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "once_cell", "reth-consensus-common", @@ -5888,7 +5888,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "boa_engine", "boa_gc", @@ -5904,7 +5904,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-primitives", "revm", @@ -5912,7 +5912,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arrayvec", "auto_impl", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -5940,7 +5940,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "async-trait", @@ -5990,7 +5990,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "jsonrpsee", "reth-primitives", @@ -6000,7 +6000,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "futures", @@ -6014,7 +6014,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "hyper", "jsonrpsee", @@ -6046,7 +6046,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "async-trait", @@ -6068,7 +6068,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "itertools 0.11.0", "jsonrpsee-types", @@ -6083,7 +6083,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-primitives", "reth-rlp", @@ -6092,7 +6092,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -6129,7 +6129,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "dyn-clone", "futures-util", @@ -6143,7 +6143,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "tracing", "tracing-appender", @@ -6153,7 +6153,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -6183,7 +6183,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 2bc3bf236cfe..1c1c7d9862b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From 07a1af85ea797df30ef3a82ab51453ddf6411563 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Aug 2023 01:12:03 +0200 Subject: [PATCH 490/722] feat: add Validator::on_new_head_block (#4303) --- crates/transaction-pool/src/lib.rs | 2 +- crates/transaction-pool/src/maintain.rs | 8 ++--- crates/transaction-pool/src/pool/mod.rs | 19 +++------- crates/transaction-pool/src/traits.rs | 40 ++++++++++++++++----- crates/transaction-pool/src/validate/mod.rs | 7 +++- 5 files changed, 45 insertions(+), 31 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 60f56cace21f..d9669943db4c 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -486,7 +486,7 @@ where self.pool.set_block_info(info) } - fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { self.pool.on_canonical_state_change(update); } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index c7c55a619620..2e9936fe95b2 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -271,13 +271,11 @@ pub async fn maintain_transaction_pool( // update the pool first let update = CanonicalStateUpdate { - hash: new_tip.hash, - number: new_tip.number, + new_tip: &new_tip.block, pending_block_base_fee, changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_mined_transactions.into_iter().collect(), - timestamp: new_tip.timestamp, }; pool.on_canonical_state_change(update); @@ -348,12 +346,10 @@ pub async fn maintain_transaction_pool( // Canonical update let update = CanonicalStateUpdate { - hash: tip.hash, - number: tip.number, + new_tip: &tip.block, pending_block_base_fee, changed_accounts, mined_transactions, - timestamp: tip.timestamp, }; pool.on_canonical_state_change(update); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 718fb4070443..211f488ae517 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -270,23 +270,14 @@ where } /// Updates the entire pool after a new block was executed. - pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { + pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { trace!(target: "txpool", %update, "updating pool on canonical state change"); - let CanonicalStateUpdate { - hash, - number, - pending_block_base_fee, - changed_accounts, - mined_transactions, - timestamp: _, - } = update; + let block_info = update.block_info(); + let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + self.validator.on_new_head_block(new_tip); + let changed_senders = self.changed_senders(changed_accounts.into_iter()); - let block_info = BlockInfo { - last_seen_block_hash: hash, - last_seen_block_number: number, - pending_basefee: pending_block_base_fee, - }; // update the pool let outcome = self.pool.write().on_canonical_state_change( diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9d3c5026dce5..eef461587c42 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -8,7 +8,7 @@ use futures_util::{ready, Stream}; use reth_primitives::{ Address, BlobTransactionSidecar, FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, Transaction, TransactionKind, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, U256, }; use reth_rlp::Encodable; @@ -295,7 +295,7 @@ pub trait TransactionPoolExt: TransactionPool { /// Implementers need to update the pool accordingly. /// For example the base fee of the pending block is determined after a block is mined which /// affects the dynamic fee requirement of pending transactions in the pool. - fn on_canonical_state_change(&self, update: CanonicalStateUpdate); + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>); /// Updates the accounts in the pool fn update_accounts(&self, accounts: Vec); @@ -450,11 +450,9 @@ impl TransactionOrigin { /// /// This is used to update the pool state accordingly. #[derive(Debug, Clone)] -pub struct CanonicalStateUpdate { +pub struct CanonicalStateUpdate<'a> { /// Hash of the tip block. - pub hash: H256, - /// Number of the tip block. - pub number: u64, + pub new_tip: &'a SealedBlock, /// EIP-1559 Base fee of the _next_ (pending) block /// /// The base fee of a block depends on the utilization of the last block and its base fee. @@ -463,14 +461,38 @@ pub struct CanonicalStateUpdate { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, +} + +impl<'a> CanonicalStateUpdate<'a> { + /// Returns the number of the tip block. + pub fn number(&self) -> u64 { + self.new_tip.number + } + + /// Returns the hash of the tip block. + pub fn hash(&self) -> H256 { + self.new_tip.hash + } + /// Timestamp of the latest chain update - pub timestamp: u64, + pub fn timestamp(&self) -> u64 { + self.new_tip.timestamp + } + + /// Returns the block info for the tip block. + pub fn block_info(&self) -> BlockInfo { + BlockInfo { + last_seen_block_hash: self.hash(), + last_seen_block_number: self.number(), + pending_basefee: self.pending_block_base_fee, + } + } } -impl fmt::Display for CanonicalStateUpdate { +impl<'a> fmt::Display for CanonicalStateUpdate<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{{ hash: {}, number: {}, pending_block_base_fee: {}, changed_accounts: {}, mined_transactions: {} }}", - self.hash, self.number, self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) + self.hash(), self.number(), self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 1ec2bd07f4eb..1e5cc78f89ad 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -6,7 +6,7 @@ use crate::{ traits::{PoolTransaction, TransactionOrigin}, }; use reth_primitives::{ - Address, BlobTransactionSidecar, IntoRecoveredTransaction, TransactionKind, + Address, BlobTransactionSidecar, IntoRecoveredTransaction, SealedBlock, TransactionKind, TransactionSignedEcRecovered, TxHash, H256, U256, }; use std::{fmt, time::Instant}; @@ -157,6 +157,11 @@ pub trait TransactionValidator: Send + Sync { transaction: Self::Transaction, ) -> TransactionValidationOutcome; + /// Invoked when the head block changes. + /// + /// This can be used to update fork specific values (timestamp). + fn on_new_head_block(&self, _new_tip_block: &SealedBlock) {} + /// Ensure that the code size is not greater than `max_init_code_size`. /// `max_init_code_size` should be configurable so this will take it as an argument. fn ensure_max_init_code_size( From fe685143f0c92383278b7dfc99860893f3d70a6d Mon Sep 17 00:00:00 2001 From: Derek <103802618+leeederek@users.noreply.github.com> Date: Tue, 22 Aug 2023 03:57:20 -0400 Subject: [PATCH 491/722] Docs: Add a guide on how to spin up a private Reth testnet using Kurtosis (#4304) Co-authored-by: Roman Krasiuk --- book/SUMMARY.md | 1 + book/run/private-testnet.md | 93 +++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 book/run/private-testnet.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index ffdb45c0cfdc..6c061cebd7ab 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -9,6 +9,7 @@ 1. [Update Priorities](./installation/priorities.md) 1. [Run a Node](./run/run-a-node.md) 1. [Mainnet or official testnets](./run/mainnet.md) + 1. [Private testnet](./run/private-testnet.md) 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) 1. [Transaction types](./run/transactions.md) diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md new file mode 100644 index 000000000000..901a4de68a68 --- /dev/null +++ b/book/run/private-testnet.md @@ -0,0 +1,93 @@ +# Run Reth in a private testnet using Kurtosis +For those who need a private testnet to validate functionality or scale with Reth. + +## Using Docker locally +This guide uses [Kurtosis' eth2-package](https://github.com/kurtosis-tech/eth2-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine. +* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis +* Go [here](https://docs.docker.com/get-docker/) to install Docker + +The `eth2-package` is a package that is a general purpose testnet definition for instantiating private testnets at any scale over Docker or Kubernetes. This guide will go through how to spin up a local private testnet with Reth various CL clients. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. + +To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/eth2-package#configuration). + +Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves/). Read more about how the `eth2-package` works by going [here](https://github.com/kurtosis-tech/eth2-package/). + +First, in your home directory, create a file with the name `network_params.json` with the following contents: +```json +{ + "participants": [ + { + "el_client_type": "reth", + "el_client_image": "ghcr.io/paradigmxyz/reth", + "cl_client_type": "lighthouse", + "cl_client_image": "sigp/lighthouse:latest", + "count": 1 + }, + { + "el_client_type": "reth", + "el_client_image": "ghcr.io/paradigmxyz/reth", + "cl_client_type": "teku", + "cl_client_image": "consensys/teku:latest", + "count": 1 + } + ] + "launch_additional_services": false +} +``` + +Next, run the following command from your command line: +```bash +kurtosis run github.com/kurtosis-tech/eth2-package "$(cat ~/network_params.json)" +``` + +In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: +```console +INFO[2023-08-21T18:22:18-04:00] ==================================================== +INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp || +INFO[2023-08-21T18:22:18-04:00] ==================================================== +Name: silky-swamp +UUID: 3df730c66123 +Status: RUNNING +Creation Time: Mon, 21 Aug 2023 18:21:32 EDT + +========================================= Files Artifacts ========================================= +UUID Name +c168ec4468f6 1-lighthouse-reth-0-63 +61f821e2cfd5 2-teku-reth-64-127 +e6f94fdac1b8 cl-genesis-data +e6b57828d099 el-genesis-data +1fb632573a2e genesis-generation-config-cl +b8917e497980 genesis-generation-config-el +6fd8c5be336a geth-prefunded-keys +6ab83723b4bd prysm-password + +========================================== User Services ========================================== +UUID Name Ports Status +95386198d3f9 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:64947 RUNNING + metrics: 5054/tcp -> http://127.0.0.1:64948 + tcp-discovery: 9000/tcp -> 127.0.0.1:64949 + udp-discovery: 9000/udp -> 127.0.0.1:60303 +5f5cc4cf639a cl-1-lighthouse-reth-validator http: 5042/tcp -> 127.0.0.1:64950 RUNNING + metrics: 5064/tcp -> http://127.0.0.1:64951 +27e1cfaddc72 cl-2-teku-reth http: 4000/tcp -> 127.0.0.1:64954 RUNNING + metrics: 8008/tcp -> 127.0.0.1:64952 + tcp-discovery: 9000/tcp -> 127.0.0.1:64953 + udp-discovery: 9000/udp -> 127.0.0.1:53749 +b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:64941 RUNNING + metrics: 9001/tcp -> 127.0.0.1:64937 + rpc: 8545/tcp -> 127.0.0.1:64939 + tcp-discovery: 30303/tcp -> 127.0.0.1:64938 + udp-discovery: 30303/udp -> 127.0.0.1:55861 + ws: 8546/tcp -> 127.0.0.1:64940 +03a2ef13c99b el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:64945 RUNNING + metrics: 9001/tcp -> 127.0.0.1:64946 + rpc: 8545/tcp -> 127.0.0.1:64943 + tcp-discovery: 30303/tcp -> 127.0.0.1:64942 + udp-discovery: 30303/udp -> 127.0.0.1:64186 + ws: 8546/tcp -> 127.0.0.1:64944 +5c199b334236 prelaunch-data-generator-cl-genesis-data RUNNING +46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING +``` + +## Using Kubernetes on remote infrastructure +Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker locally as in the cloud on Kubernetes. Check out these docs [here](https://docs.kurtosis.com/k8s/) to learn how to deploy your private testnet to a Kubernetes cluster. From b296c6bb926ab3ba7e43df8436c0bb5cb41c704c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Aug 2023 11:00:22 +0100 Subject: [PATCH 492/722] feat(bin): show db path when confirming the drop (#4309) --- bin/reth/src/db/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 9671f8d795ba..53a24b718ada 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -188,7 +188,7 @@ impl Command { Subcommands::Drop { force } => { if !force { // Ask for confirmation - print!("Are you sure you want to drop the database? This cannot be undone. (y/N): "); + print!("Are you sure you want to drop the database at {db_path:?}? This cannot be undone. (y/N): "); // Flush the buffer to ensure the message is printed immediately io::stdout().flush().unwrap(); From b78e10fb2fff898d94484c1e2d1356525efe59ef Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Aug 2023 12:53:25 +0100 Subject: [PATCH 493/722] fix(pruner): history indices `cursor.prev()` emptiness check (#4301) --- crates/prune/src/pruner.rs | 76 ++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 5ae773c12a39..9b082d4a08fb 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -661,55 +661,59 @@ impl Pruner { while let Some(result) = cursor.next()? { let (key, blocks): (T::Key, BlockNumberList) = result; + // If shard consists only of block numbers less than the target one, delete shard + // completely. if key.as_ref().highest_block_number <= to_block { - // If shard consists only of block numbers less than the target one, delete shard - // completely. cursor.delete_current()?; if key.as_ref().highest_block_number == to_block { - // Shard contains only block numbers up to the target one, so we can skip to the - // next sharded key. It is guaranteed that further shards for this sharded key - // will not contain the target block number, as it's in this shard. + // Shard contains only block numbers up to the target one, so we can skip to + // the last shard for this key. It is guaranteed that further shards for this + // sharded key will not contain the target block number, as it's in this shard. cursor.seek_exact(last_key(&key))?; } - } else { - // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this sharded key will not - // contain the target block number, as it's in this shard. + } + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this sharded key will not + // contain the target block number, as it's in this shard. + else { let new_blocks = blocks .iter(0) .skip_while(|block| *block <= to_block as usize) .collect::>(); + // If there were blocks less than or equal to the target one + // (so the shard has changed), update the shard. if blocks.len() != new_blocks.len() { - // If there were blocks less than or equal to the target one - // (so the shard has changed), update the shard. + // If there are no more blocks in this shard, we need to remove it, as empty + // shards are not allowed. if new_blocks.is_empty() { - // If there are no more blocks in this shard, we need to remove it, as empty - // shards are not allowed. if key.as_ref().highest_block_number == u64::MAX { - if let Some(prev_value) = cursor - .prev()? - .filter(|(prev_key, _)| key_matches(prev_key, &key)) - .map(|(_, prev_value)| prev_value) - { - // If current shard is the last shard for the sharded key that has - // previous shards, replace it with the previous shard. - cursor.delete_current()?; - // Upsert will replace the last shard for this sharded key with the - // previous value. - cursor.upsert(key.clone(), prev_value)?; - } else { + let prev_row = cursor.prev()?; + match prev_row { + // If current shard is the last shard for the sharded key that + // has previous shards, replace it with the previous shard. + Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { + cursor.delete_current()?; + // Upsert will replace the last shard for this sharded key with + // the previous value. + cursor.upsert(key.clone(), prev_value)?; + } // If there's no previous shard for this sharded key, // just delete last shard completely. - - // Jump back to the original last shard. - cursor.next()?; - // Delete shard. - cursor.delete_current()?; + _ => { + // If we successfully moved the cursor to a previous row, + // jump to the original last shard. + if prev_row.is_some() { + cursor.next()?; + } + // Delete shard. + cursor.delete_current()?; + } } - } else { - // If current shard is not the last shard for this sharded key, - // just delete it. + } + // If current shard is not the last shard for this sharded key, + // just delete it. + else { cursor.delete_current()?; } } else { @@ -717,8 +721,10 @@ impl Pruner { } } - // Jump to the next address. - cursor.seek_exact(last_key(&key))?; + // Jump to the last shard for this key, if current key isn't already the last shard. + if key.as_ref().highest_block_number != u64::MAX { + cursor.seek_exact(last_key(&key))?; + } } processed += 1; From 6fee16e1445c75c214e4b4a826e49d68c3e24d69 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Aug 2023 14:15:56 +0100 Subject: [PATCH 494/722] chore: bump rustls-webpki (#4311) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1a88fa5dc6e..b3f73c167135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6471,9 +6471,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", From 8d25aa314c16c722ebe9ff560f7ecb2f2e55afd8 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 22 Aug 2023 09:33:07 -0400 Subject: [PATCH 495/722] fix: add docs and serde attrs to ExecutionPayload v3 fields (#4302) --- crates/rpc/rpc-types/src/eth/engine/payload.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index d63e0817e183..856e8728470c 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -79,14 +79,20 @@ pub struct ExecutionPayload { pub timestamp: U64, pub extra_data: Bytes, pub base_fee_per_gas: U256, - pub blob_gas_used: Option, - pub excess_blob_gas: Option, pub block_hash: H256, pub transactions: Vec, /// Array of [`Withdrawal`] enabled with V2 /// See #[serde(default, skip_serializing_if = "Option::is_none")] pub withdrawals: Option>, + /// Array of [`U64`] representing blob gas used, enabled with V3 + /// See + #[serde(default, skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// Array of [`U64`] representing excess blob gas, enabled with V3 + /// See + #[serde(default, skip_serializing_if = "Option::is_none")] + pub excess_blob_gas: Option, } impl From for ExecutionPayload { @@ -529,16 +535,16 @@ mod tests { #[test] fn serde_roundtrip_legacy_txs_payload() { - // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + // pulled from hive tests - modified with 4844 fields + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } #[test] fn serde_roundtrip_enveloped_txs_payload() { - // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + // pulled from hive tests - modified with 4844 fields + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } From 3f887ab82c079a5ae0af0d94d539e8482d6a1de5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Aug 2023 16:07:19 +0200 Subject: [PATCH 496/722] chore: kebab case --dev fields (#4314) --- bin/reth/src/args/dev_args.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/reth/src/args/dev_args.rs b/bin/reth/src/args/dev_args.rs index 5cc02522d1fa..ec951e795912 100644 --- a/bin/reth/src/args/dev_args.rs +++ b/bin/reth/src/args/dev_args.rs @@ -20,7 +20,7 @@ pub struct DevArgs { /// How many transactions to mine per block. #[arg( - long = "dev.block_max_transactions", + long = "dev.block-max-transactions", help_heading = "Dev testnet", conflicts_with = "block_time" )] @@ -31,7 +31,7 @@ pub struct DevArgs { /// Parses strings using [humantime::parse_duration] /// --dev.block_time 12s #[arg( - long = "dev.block_time", + long = "dev.block-time", help_heading = "Dev testnet", conflicts_with = "block_max_transactions", value_parser = parse_duration, @@ -66,14 +66,14 @@ mod tests { let args = CommandParser::::parse_from([ "reth", "--dev", - "--dev.block_max_transactions", + "--dev.block-max-transactions", "2", ]) .args; assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); let args = - CommandParser::::parse_from(["reth", "--dev", "--dev.block_time", "1s"]).args; + CommandParser::::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args; assert_eq!( args, DevArgs { @@ -89,9 +89,9 @@ mod tests { let args = CommandParser::::try_parse_from([ "reth", "--dev", - "--dev.block_max_transactions", + "--dev.block-max-transactions", "2", - "--dev.block_time", + "--dev.block-time", "1s", ]); assert!(args.is_err()); From 35c01fe25d923ca8773d7d01e9a103d5078989e7 Mon Sep 17 00:00:00 2001 From: Sabnock <24715302+Sabnock01@users.noreply.github.com> Date: Tue, 22 Aug 2023 09:17:12 -0500 Subject: [PATCH 497/722] docs: add `--dev` flag to book (#4307) Co-authored-by: Matthias Seitz --- book/cli/node.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/book/cli/node.md b/book/cli/node.md index 78d9b40dba2f..d0826f9b214e 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -314,6 +314,25 @@ Database: --auto-mine Automatically mine blocks for new transactions +Dev testnet: + --dev + Start the node in dev mode + + This mode uses a local proof-of-authority consensus engine with either fixed block times + or automatically mined blocks. + Disables network discovery and enables local http server. + Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test + test junk" with 10 000 ETH each. + + --dev.block-max-transactions + How many transactions to mine per block + + --dev.block-time + Interval between blocks. + + Parses strings using [humantime::parse_duration] + --dev.block-time 12s + Logging: --log.persistent The flag to enable persistent logs From 0e61fbe9c63a3dc1ea685d395100efef8eeee3a3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Aug 2023 16:04:54 +0100 Subject: [PATCH 498/722] feat(bin): expose `db.table_entries` metric (#4316) --- bin/reth/src/prometheus_exporter.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index cea3c0633bff..ba7f24fe5da5 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -91,11 +91,13 @@ pub(crate) async fn initialize( let overflow_pages = stats.overflow_pages(); let num_pages = leaf_pages + branch_pages + overflow_pages; let table_size = page_size * num_pages; + let entries = stats.entries(); absolute_counter!("db.table_size", table_size as u64, "table" => table); absolute_counter!("db.table_pages", leaf_pages as u64, "table" => table, "type" => "leaf"); absolute_counter!("db.table_pages", branch_pages as u64, "table" => table, "type" => "branch"); absolute_counter!("db.table_pages", overflow_pages as u64, "table" => table, "type" => "overflow"); + absolute_counter!("db.table_entries", entries as u64, "table" => table); } Ok::<(), eyre::Report>(()) From 928c60cad46e00266a5ec8f90b23cf54653106b0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Aug 2023 17:17:24 +0200 Subject: [PATCH 499/722] feat: add TransactionPool::get_pooled_transaction_elements (#4317) --- crates/primitives/src/transaction/eip4844.rs | 18 +++++++ crates/transaction-pool/src/lib.rs | 15 +++++- crates/transaction-pool/src/noop.rs | 23 +++++--- crates/transaction-pool/src/pool/mod.rs | 57 ++++++++++++++++++-- crates/transaction-pool/src/traits.rs | 45 ++++++++++++++-- 5 files changed, 143 insertions(+), 15 deletions(-) diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 3092f284f171..3db0bd36a1c6 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -288,6 +288,24 @@ pub struct BlobTransaction { } impl BlobTransaction { + /// Constructs a new [BlobTransaction] from a [TransactionSigned] and a + /// [BlobTransactionSidecar]. + /// + /// Returns an error if the signed transaction is not [TxEip4844] + pub fn try_from_signed( + tx: TransactionSigned, + sidecar: BlobTransactionSidecar, + ) -> Result { + let TransactionSigned { transaction, signature, hash } = tx; + match transaction { + Transaction::Eip4844(transaction) => Ok(Self { hash, transaction, signature, sidecar }), + transaction => { + let tx = TransactionSigned { transaction, signature, hash }; + Err((tx, sidecar)) + } + } + } + /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// /// Takes as input the [KzgSettings], which should contain the the parameters derived from the diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index d9669943db4c..d2b705607ec3 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,7 @@ //! - `test-utils`: Export utilities for testing use crate::pool::PoolInner; use aquamarine as _; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; +use reth_primitives::{Address, BlobTransactionSidecar, PooledTransactionsElement, TxHash, U256}; use reth_provider::StateProviderFactory; use std::{ collections::{HashMap, HashSet}, @@ -165,7 +165,10 @@ use std::{ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; -use crate::blobstore::{BlobStore, BlobStoreError}; +use crate::{ + blobstore::{BlobStore, BlobStoreError}, + traits::GetPooledTransactionLimit, +}; pub use crate::{ config::{ PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, @@ -403,6 +406,14 @@ where self.pooled_transactions().into_iter().take(max).collect() } + fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec { + self.pool.get_pooled_transaction_elements(tx_hashes, limit) + } + fn best_transactions( &self, ) -> Box>>> { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 027c106e9994..fe59bd93ac70 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,13 +4,16 @@ //! to be generic over it. use crate::{ - blobstore::BlobStoreError, error::PoolError, traits::PendingTransactionListenerKind, - validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, - BlockInfo, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + blobstore::BlobStoreError, + error::PoolError, + traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + validate::ValidTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, + NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, + TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, + TransactionValidator, ValidPoolTransaction, }; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash}; +use reth_primitives::{Address, BlobTransactionSidecar, PooledTransactionsElement, TxHash}; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -108,6 +111,14 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pooled_transaction_elements( + &self, + _tx_hashes: Vec, + _limit: GetPooledTransactionLimit, + ) -> Vec { + vec![] + } + fn best_transactions( &self, ) -> Box>>> { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 211f488ae517..e2bbd4ffe255 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -82,7 +82,10 @@ use crate::{ }; use best::BestTransactions; use parking_lot::{Mutex, RwLock}; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash, H256}; +use reth_primitives::{ + Address, BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, + PooledTransactionsElement, TransactionSigned, TxHash, H256, +}; use std::{ collections::{HashMap, HashSet}, fmt, @@ -97,10 +100,14 @@ pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; use crate::{ - blobstore::BlobStore, metrics::BlobStoreMetrics, pool::txpool::UpdateOutcome, - traits::PendingTransactionListenerKind, validate::ValidTransaction, + blobstore::BlobStore, + metrics::BlobStoreMetrics, + pool::txpool::UpdateOutcome, + traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + validate::ValidTransaction, }; pub use listener::{AllTransactionsEvents, TransactionEvents}; +use reth_rlp::Encodable; mod best; mod parked; @@ -269,6 +276,50 @@ where pool.all().transactions_iter().filter(|tx| tx.propagate).collect() } + /// Returns the [BlobTransaction] for the given transaction if the sidecar exists. + /// + /// Caution: this assumes the given transaction is eip-4844 + fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { + if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { + if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { + return Some(blob) + } + } + None + } + + /// Returns converted [PooledTransactionsElement] for the given transaction hashes. + pub(crate) fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec { + let transactions = self.get_all(tx_hashes); + let mut elements = Vec::with_capacity(transactions.len()); + let mut size = 0; + for transaction in transactions { + let tx = transaction.to_recovered_transaction().into_signed(); + let pooled = if tx.is_eip4844() { + if let Some(blob) = self.get_blob_transaction(tx) { + PooledTransactionsElement::BlobTransaction(blob) + } else { + continue + } + } else { + PooledTransactionsElement::from(tx) + }; + + size += pooled.length(); + elements.push(pooled); + + if limit.exceeds(size) { + break + } + } + + elements + } + /// Updates the entire pool after a new block was executed. pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { trace!(target: "txpool", %update, "updating pool on canonical state change"); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index eef461587c42..2da408b77c96 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -171,8 +171,13 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the _full_ transaction objects all transactions in the pool. /// + /// This is intended to be used by the network for the initial exchange of pooled transaction + /// _hashes_ + /// /// Note: This returns a `Vec` but should guarantee that all transactions are unique. /// + /// Caution: In case of blob transactions, this does not include the sidecar. + /// /// Consumer: P2P fn pooled_transactions(&self) -> Vec>>; @@ -184,6 +189,21 @@ pub trait TransactionPool: Send + Sync + Clone { max: usize, ) -> Vec>>; + /// Returns converted [PooledTransactionsElement] for the given transaction hashes. + /// + /// This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): + /// The transactions must be in same order as in the request, but it is OK to skip transactions + /// which are not available. + /// + /// If the transaction is a blob transaction, the sidecar will be included. + /// + /// Consumer: P2P + fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec; + /// Returns an iterator that yields transactions that are ready for block production. /// /// Consumer: Block production @@ -249,10 +269,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns all transactions objects for the given hashes. /// - /// TODO(mattsse): this will no longer be accurate and we need a new function specifically for - /// pooled txs This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): - /// The transactions must be in same order as in the request, but it is OK to skip transactions - /// which are not available. + /// Caution: This in case of blob transactions, this does not include the sidecar. fn get_all(&self, txs: Vec) -> Vec>>; /// Notify the pool about transactions that are propagated to peers. @@ -848,6 +865,26 @@ pub struct BlockInfo { pub pending_basefee: u64, } +/// The limit to enforce for [TransactionPool::get_pooled_transaction_elements]. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum GetPooledTransactionLimit { + /// No limit, return all transactions. + None, + /// Enforce a size limit on the returned transactions, for example 2MB + SizeSoftLimit(usize), +} + +impl GetPooledTransactionLimit { + /// Returns true if the given size exceeds the limit. + #[inline] + pub fn exceeds(&self, size: usize) -> bool { + match self { + GetPooledTransactionLimit::None => false, + GetPooledTransactionLimit::SizeSoftLimit(limit) => size > *limit, + } + } +} + /// A Stream that yields full transactions the subpool #[must_use = "streams do nothing unless polled"] #[derive(Debug)] From 12bec7153983b12a6ade31de173429e135269c81 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Aug 2023 17:18:06 +0200 Subject: [PATCH 500/722] fix: apply state overrides once (#4313) --- crates/rpc/rpc/src/debug.rs | 12 ++++++++---- crates/rpc/rpc/src/eth/api/call.rs | 10 +++++++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index fcb592516306..7e987ca9c70f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -357,7 +357,7 @@ where let opts = opts.unwrap_or_default(); let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let GethDebugTracingCallOptions { tracing_options, state_overrides, .. } = opts; + let GethDebugTracingCallOptions { tracing_options, mut state_overrides, .. } = opts; let gas_limit = self.inner.eth_api.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the state @@ -399,18 +399,22 @@ where while let Some(bundle) = bundles.next() { //let mut result = Vec::with_capacity(bundle.len()); let Bundle { transactions, block_override } = bundle; - let overrides = - EvmOverrides::new(state_overrides.clone(), block_override.map(Box::new)); + + let block_overrides = block_override.map(Box::new); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_overrides.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + let env = prepare_call_env( cfg.clone(), block_env.clone(), tx, gas_limit, &mut db, - overrides.clone(), + overrides, )?; let (trace, state) = this.trace_transaction( diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index e47555429ca8..6e40b76b6ad0 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -75,7 +75,7 @@ where &self, bundle: Bundle, state_context: Option, - state_override: Option, + mut state_override: Option, ) -> EthResult> { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { @@ -123,17 +123,21 @@ where } } - let overrides = EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + let block_overrides = block_override.map(Box::new); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_override.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + let env = prepare_call_env( cfg.clone(), block_env.clone(), tx, gas_limit, &mut db, - overrides.clone(), + overrides, )?; let (res, _) = transact(&mut db, env)?; From 404f6baaaaa353c237db14004f38c91e7c1ca804 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Aug 2023 17:36:55 +0200 Subject: [PATCH 501/722] feat: track active forks (#4315) --- crates/transaction-pool/src/validate/eth.rs | 119 +++++++++++++++---- crates/transaction-pool/src/validate/task.rs | 13 +- 2 files changed, 99 insertions(+), 33 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index de33fa16d806..f5899b4f8b02 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -10,24 +10,48 @@ use crate::{ use reth_primitives::{ constants::{eip4844::KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, kzg::KzgSettings, - ChainSpec, InvalidTransactionError, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, + ChainSpec, InvalidTransactionError, SealedBlock, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; -use std::{marker::PhantomData, sync::Arc}; +use std::{ + marker::PhantomData, + sync::{atomic::AtomicBool, Arc}, +}; use tokio::sync::Mutex; /// Validator for Ethereum transactions. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthTransactionValidator { /// The type that performs the actual validation. - pub inner: Arc>, + inner: Arc>, +} + +#[async_trait::async_trait] +impl TransactionValidator for EthTransactionValidator +where + Client: StateProviderFactory, + Tx: PoolTransaction, +{ + type Transaction = Tx; + + async fn validate_transaction( + &self, + origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + self.inner.validate_transaction(origin, transaction).await + } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + self.inner.on_new_head_block(new_tip_block) + } } /// A [TransactionValidator] implementation that validates ethereum transaction. #[derive(Debug)] -pub struct EthTransactionValidatorInner { +pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain chain_spec: Arc, /// This type fetches account info from the db @@ -35,10 +59,8 @@ pub struct EthTransactionValidatorInner { /// Blobstore used for fetching re-injected blob transactions. #[allow(unused)] blob_store: Box, - /// Fork indicator whether we are in the Shanghai stage. - shanghai: bool, - /// Fork indicator whether we are in the Cancun hardfork. - cancun: bool, + /// tracks activated forks relevant for transaction validation + fork_tracker: ForkTracker, /// Fork indicator whether we are using EIP-2718 type transactions. eip2718: bool, /// Fork indicator whether we are using EIP-1559 type transactions. @@ -62,7 +84,7 @@ pub struct EthTransactionValidatorInner { impl EthTransactionValidatorInner { /// Returns the configured chain id - pub fn chain_id(&self) -> u64 { + pub(crate) fn chain_id(&self) -> u64 { self.chain_spec.chain().id() } } @@ -131,7 +153,7 @@ where } // Check whether the init code size has been exceeded. - if self.shanghai { + if self.fork_tracker.is_shanghai_activated() { if let Err(err) = self.ensure_max_init_code_size(&transaction, MAX_INIT_CODE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } @@ -177,8 +199,15 @@ where } // blob tx checks - if self.cancun { - // TODO: validate blob txs, if missing try load from blob store + if transaction.is_eip4844() { + // Cancun fork is required for blob txs + if !self.fork_tracker.is_cancun_activated() { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + // TODO add checks for blob tx } let account = match self @@ -235,6 +264,17 @@ where }, } } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + // update all forks + if self.chain_spec.is_cancun_activated_at_timestamp(new_tip_block.timestamp) { + self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); + } + + if self.chain_spec.is_shanghai_activated_at_timestamp(new_tip_block.timestamp) { + self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); + } + } } /// A builder for [TransactionValidationTaskExecutor] @@ -245,11 +285,11 @@ pub struct EthTransactionValidatorBuilder { shanghai: bool, /// Fork indicator whether we are in the Cancun hardfork. cancun: bool, - /// Fork indicator whether we are using EIP-2718 type transactions. + /// Whether using EIP-2718 type transactions is allowed eip2718: bool, - /// Fork indicator whether we are using EIP-1559 type transactions. + /// Whether using EIP-1559 type transactions is allowed eip1559: bool, - /// Fork indicator whether we are using EIP-4844 blob transactions. + /// Whether using EIP-4844 type transactions is allowed eip4844: bool, /// The current max gas limit block_gas_limit: u64, @@ -271,9 +311,6 @@ impl EthTransactionValidatorBuilder { pub fn new(chain_spec: Arc) -> Self { Self { chain_spec, - shanghai: true, - eip2718: true, - eip1559: true, block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, minimum_priority_fee: None, additional_tasks: 1, @@ -281,9 +318,16 @@ impl EthTransactionValidatorBuilder { propagate_local_transactions: true, kzg_settings: Arc::clone(&KZG_TRUSTED_SETUP), - // TODO: can hard enable by default once transitioned + // by default all transaction types are allowed + eip2718: true, + eip1559: true, + eip4844: true, + + // shanghai is activated by default + shanghai: true, + + // TODO: can hard enable by default once mainnet transitioned cancun: false, - eip4844: false, } } @@ -338,7 +382,7 @@ impl EthTransactionValidatorBuilder { } /// Sets toggle to propagate transactions received locally by this client (e.g - /// transactions from eth_Sendtransaction to this nodes' RPC server) + /// transactions from eth_sendTransaction to this nodes' RPC server) /// /// If set to false, only transactions received by network peers (via /// p2p) will be marked as propagated in the local transaction pool and returned on a @@ -347,7 +391,7 @@ impl EthTransactionValidatorBuilder { self.propagate_local_transactions = propagate_local_txs; self } - /// Disables propagating transactions recieved locally by this client + /// Disables propagating transactions received locally by this client /// /// For more information, check docs for set_propagate_local_transactions pub fn no_local_transaction_propagation(mut self) -> Self { @@ -397,13 +441,15 @@ impl EthTransactionValidatorBuilder { kzg_settings, } = self; + let fork_tracker = + ForkTracker { shanghai: AtomicBool::new(shanghai), cancun: AtomicBool::new(cancun) }; + let inner = EthTransactionValidatorInner { chain_spec, client, - shanghai, eip2718, eip1559, - cancun, + fork_tracker, eip4844, block_gas_limit, minimum_priority_fee, @@ -438,3 +484,24 @@ impl EthTransactionValidatorBuilder { } } } + +/// Keeps track of whether certain forks are activated +#[derive(Debug)] +pub(crate) struct ForkTracker { + /// Tracks if shanghai is activated at the block's timestamp. + pub(crate) shanghai: AtomicBool, + /// Tracks if cancun is activated at the block's timestamp. + pub(crate) cancun: AtomicBool, +} + +impl ForkTracker { + /// Returns true if the Shanghai fork is activated. + pub(crate) fn is_shanghai_activated(&self) -> bool { + self.shanghai.load(std::sync::atomic::Ordering::Relaxed) + } + + /// Returns true if the Shanghai fork is activated. + pub(crate) fn is_cancun_activated(&self) -> bool { + self.cancun.load(std::sync::atomic::Ordering::Relaxed) + } +} diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 007aa2568a90..a3ea15e55ba8 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -7,7 +7,7 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_primitives::ChainSpec; +use reth_primitives::{ChainSpec, SealedBlock}; use reth_provider::StateProviderFactory; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; @@ -132,11 +132,6 @@ impl TransactionValidationTaskExecutor(client, tasks, blob_store) } - - /// Returns the configured chain id - pub fn chain_id(&self) -> u64 { - self.validator.inner.chain_id() - } } impl TransactionValidationTaskExecutor { @@ -169,7 +164,7 @@ where { let to_validation_task = self.to_validation_task.clone(); let to_validation_task = to_validation_task.lock().await; - let validator = Arc::clone(&self.validator.inner); + let validator = self.validator.clone(); let res = to_validation_task .send(Box::pin(async move { let res = validator.validate_transaction(origin, transaction).await; @@ -192,4 +187,8 @@ where ), } } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + self.validator.on_new_head_block(new_tip_block) + } } From 1859321c48a58986cab57869b04f540efb9782df Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 22 Aug 2023 18:46:17 +0100 Subject: [PATCH 502/722] chore: add `crates/prune` to CODEOWNERS (#4320) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 83a3203467d9..7216063442e7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -17,4 +17,5 @@ crates/blockchain-tree @rakita @rkrasiuk crates/metrics @onbjerg crates/tracing @onbjerg crates/tasks @mattsse +crates/prune @shekhirin @joshieDo .github/ @onbjerg @gakonst \ No newline at end of file From bfc4abf7cad9a38fa8131e2a3e28842328a96322 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 22 Aug 2023 16:42:29 -0400 Subject: [PATCH 503/722] feat: add shouldOverrideBuilder to ExecutionPayloadEnvelope (#4322) --- crates/payload/builder/src/payload.rs | 6 +++++- crates/rpc/rpc-types/src/eth/engine/payload.rs | 11 +++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index c930585a457d..0deb6980875b 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -69,7 +69,11 @@ impl From for ExecutionPayloadEnvelope { fn from(value: BuiltPayload) -> Self { let BuiltPayload { block, fees, .. } = value; - ExecutionPayloadEnvelope { block_value: fees, payload: block.into() } + ExecutionPayloadEnvelope { + block_value: fees, + payload: block.into(), + should_override_builder: None, + } } } diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 856e8728470c..84751e7f7aa2 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -29,9 +29,12 @@ impl std::fmt::Display for PayloadId { } } -/// This structure maps for the return value of `engine_getPayloadV2` of the beacon chain spec. +/// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for +/// both V2 and V3. /// -/// See also: +/// See also: +/// +/// #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ExecutionPayloadEnvelope { /// Execution payload, which could be either V1 or V2 @@ -50,6 +53,10 @@ pub struct ExecutionPayloadEnvelope { // // TODO(mattsse): for V3 // #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] // pub blobs_bundle: Option, + /// Introduced in V3, this represents a suggestion from the execution layer if the payload + /// should be used instead of an externally provided one. + #[serde(rename = "shouldOverrideBuilder", skip_serializing_if = "Option::is_none")] + pub should_override_builder: Option, } impl ExecutionPayloadEnvelope { From 77621fac2bd0e9215e1f4b5ce2228f5b958a9eb4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 23 Aug 2023 10:02:17 +0300 Subject: [PATCH 504/722] chore(trie): proofs should be generic over hashed cursor factory (#4310) --- crates/trie/src/proof.rs | 42 +++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 1450843d8c43..1ccd114ad3b0 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -41,21 +41,25 @@ pub struct Proof<'a, 'b, TX, H> { hashed_cursor_factory: &'b H, } -impl<'a, 'tx, TX> Proof<'a, 'a, TX, TX> -where - TX: DbTx<'tx> + HashedCursorFactory<'a>, -{ +impl<'a, TX> Proof<'a, 'a, TX, TX> { /// Create a new [Proof] instance. pub fn new(tx: &'a TX) -> Self { Self { tx, hashed_cursor_factory: tx } } +} +impl<'a, 'b, 'tx, TX, H> Proof<'a, 'b, TX, H> +where + TX: DbTx<'tx>, + H: HashedCursorFactory<'b>, +{ /// Generate an account proof from intermediate nodes. pub fn account_proof(&self, address: Address) -> Result, ProofError> { let hashed_address = keccak256(address); let target_nibbles = Nibbles::unpack(hashed_address); - let mut proof_restorer = ProofRestorer::new(self.hashed_cursor_factory)?; + let mut proof_restorer = + ProofRestorer::new(self.tx)?.with_hashed_cursor_factory(self.hashed_cursor_factory)?; let mut trie_cursor = AccountTrieCursor::new(self.tx.cursor_read::()?); @@ -96,7 +100,7 @@ where fn traverse_path>( &self, trie_cursor: &mut AccountTrieCursor, - proof_restorer: &mut ProofRestorer<'a, 'a, TX, TX>, + proof_restorer: &mut ProofRestorer<'a, 'b, TX, H>, hashed_address: H256, ) -> Result, ProofError> { let mut intermediate_proofs = Vec::new(); @@ -142,7 +146,7 @@ where impl<'a, 'tx, TX> ProofRestorer<'a, 'a, TX, TX> where - TX: DbTx<'tx> + HashedCursorFactory<'a>, + TX: DbTx<'tx>, { fn new(tx: &'a TX) -> Result { let hashed_account_cursor = tx.hashed_account_cursor()?; @@ -154,6 +158,30 @@ where node_rlp_buf: Vec::with_capacity(128), }) } +} + +impl<'a, 'b, 'tx, TX, H> ProofRestorer<'a, 'b, TX, H> +where + TX: DbTx<'tx> + HashedCursorFactory<'a>, + H: HashedCursorFactory<'b>, +{ + /// Set the hashed cursor factory. + fn with_hashed_cursor_factory<'c, HF>( + self, + hashed_cursor_factory: &'c HF, + ) -> Result, ProofError> + where + HF: HashedCursorFactory<'c>, + { + let hashed_account_cursor = hashed_cursor_factory.hashed_account_cursor()?; + Ok(ProofRestorer { + tx: self.tx, + hashed_cursor_factory, + hashed_account_cursor, + account_rlp_buf: self.account_rlp_buf, + node_rlp_buf: self.node_rlp_buf, + }) + } fn restore_branch_node( &mut self, From 5a7a57d86bb488761ce42b85b18b53031285b547 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 12:25:34 +0100 Subject: [PATCH 505/722] fix(bin): make db metrics a gauge instead of counter (#4324) --- bin/reth/src/prometheus_exporter.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index ba7f24fe5da5..650c433522f3 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -4,10 +4,11 @@ use hyper::{ service::{make_service_fn, service_fn}, Body, Request, Response, Server, }; +use metrics::gauge; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; use reth_db::{database::Database, tables, DatabaseEnv}; -use reth_metrics::metrics::{absolute_counter, describe_counter, Unit}; +use reth_metrics::metrics::{describe_counter, Unit}; use std::{convert::Infallible, net::SocketAddr, sync::Arc}; pub(crate) trait Hook: Fn() + Send + Sync {} @@ -93,11 +94,11 @@ pub(crate) async fn initialize( let table_size = page_size * num_pages; let entries = stats.entries(); - absolute_counter!("db.table_size", table_size as u64, "table" => table); - absolute_counter!("db.table_pages", leaf_pages as u64, "table" => table, "type" => "leaf"); - absolute_counter!("db.table_pages", branch_pages as u64, "table" => table, "type" => "branch"); - absolute_counter!("db.table_pages", overflow_pages as u64, "table" => table, "type" => "overflow"); - absolute_counter!("db.table_entries", entries as u64, "table" => table); + gauge!("db.table_size", table_size as f64, "table" => table); + gauge!("db.table_pages", leaf_pages as f64, "table" => table, "type" => "leaf"); + gauge!("db.table_pages", branch_pages as f64, "table" => table, "type" => "branch"); + gauge!("db.table_pages", overflow_pages as f64, "table" => table, "type" => "overflow"); + gauge!("db.table_entries", entries as f64, "table" => table); } Ok::<(), eyre::Report>(()) @@ -126,7 +127,6 @@ pub(crate) async fn initialize( #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use jemalloc_ctl::{epoch, stats}; - use reth_metrics::metrics::gauge; use tracing::error; if epoch::advance().map_err(|error| error!(?error, "Failed to advance jemalloc epoch")).is_err() From 9a97640f1904ea01de95be9f66777bf9849cfb30 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 16:44:29 +0100 Subject: [PATCH 506/722] feat(book): `[prune]` config section (#4328) --- bin/reth/src/args/pruning_args.rs | 4 +- book/run/config.md | 53 +++++++++++++++++++ crates/primitives/src/lib.rs | 2 +- crates/primitives/src/prune/mod.rs | 4 +- crates/primitives/src/prune/target.rs | 14 ++--- crates/prune/src/pruner.rs | 6 +-- crates/storage/provider/src/post_state/mod.rs | 2 +- 7 files changed, 69 insertions(+), 16 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index b9a783798901..4b288a4e45e1 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -3,7 +3,7 @@ use clap::Args; use reth_config::config::PruneConfig; use reth_primitives::{ - ChainSpec, ContractLogsPruneConfig, PruneMode, PruneModes, MINIMUM_PRUNING_DISTANCE, + ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; use std::sync::Arc; @@ -35,7 +35,7 @@ impl PruningArgs { .map(|contract| PruneMode::Before(contract.block)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - contract_logs_filter: ContractLogsPruneConfig( + receipts_log_filter: ReceiptsLogPruneConfig( _chain_spec .deposit_contract .as_ref() diff --git a/book/run/config.md b/book/run/config.md index 3d5c6ce5d6b9..e432055f9a42 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -27,6 +27,7 @@ The configuration file contains the following sections: - [`reputation_weights`](#reputation_weights) - [`backoff_durations`](#backoff_durations) - [`[sessions]`](#the-sessions-section) +- [`[prune]`](#the-prune-section) ## The `[stages]` section @@ -330,4 +331,56 @@ secs = 120 nanos = 0 ``` +## The `[prune]` section + +The prune section configures the pruning configuration. + +You can configure the pruning of different parts of the data independently of others. +For any unspecified parts, the default setting is no pruning. + +### Default config + +No pruning, run as archive node. + +### Example of the custom pruning configuration + +This configuration will: +- Run pruning every 5 blocks +- Continuously prune all transaction senders, account history and storage history before the block `head-128`, i.e. keep the data for the last 129 blocks +- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000 + +```toml +[prune] +# Minimum pruning interval measured in blocks +block_interval = 5 + +[prune.parts] +# Sender Recovery pruning configuration +sender_recovery = { distance = 128 } # Prune all transaction senders before the block `head-128`, i.e. keep transaction senders for the last 129 blocks + +# Transaction Lookup pruning configuration +transaction_lookup = "full" # Prune all TxNumber => TxHash mappings + +# Receipts pruning configuration. This setting overrides `receipts_log_filter`. +receipts = { before = 1920000 } # Prune all receipts from transactions before the block 1920000, i.e. keep receipts from the block 1920000 + +# Account History pruning configuration +account_history = { distance = 128 } # Prune all historical account states before the block `head-128` + +# Storage History pruning configuration +storage_history = { distance = 128 } # Prune all historical storage states before the block `head-128` +``` + +We can also prune receipts more granular, using the logs filtering: +```toml +# Receipts pruning configuration by retaining only those receipts that contain logs emitted +# by the specified addresses, discarding all others. This setting is overridden by `receipts`. +[prune.parts.receipts_log_filter] +# Prune all receipts, leaving only those which: +# - Contain logs from address `0x7ea2be2df7ba6e54b1a9c70676f668455e329d29`, starting from the block 17000000 +# - Contain logs from address `0xdac17f958d2ee523a2206206994597c13d831ec7` in the last 1001 blocks +"0x7ea2be2df7ba6e54b1a9c70676f668455e329d29" = { before = 17000000 } +"0xdac17f958d2ee523a2206206994597c13d831ec7" = { distance = 1000 } +``` + [TOML]: https://toml.io/ diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b2ff09300f43..898ccc40af8b 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -81,7 +81,7 @@ pub use net::{ }; pub use peer::{PeerId, WithPeerId}; pub use prune::{ - ContractLogsPruneConfig, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, + PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 5359c3d9c72f..9f8cab504fc4 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -13,9 +13,9 @@ pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct ContractLogsPruneConfig(pub BTreeMap); +pub struct ReceiptsLogPruneConfig(pub BTreeMap); -impl ContractLogsPruneConfig { +impl ReceiptsLogPruneConfig { /// Checks if the configuration is empty pub fn is_empty(&self) -> bool { self.0.is_empty() diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 9620569760e2..8789b0cf8f55 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -1,6 +1,6 @@ use crate::{ prune::PrunePartError, serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, - ContractLogsPruneConfig, PruneMode, PrunePart, + PruneMode, PrunePart, ReceiptsLogPruneConfig, }; use paste::paste; use serde::{Deserialize, Serialize}; @@ -23,8 +23,8 @@ pub struct PruneModes { /// Transaction Lookup pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] pub transaction_lookup: Option, - /// Configuration for pruning of receipts. This setting overrides - /// `PruneModes::contract_logs_filter` and offers improved performance. + /// Receipts pruning configuration. This setting overrides `receipts_log_filter` + /// and offers improved performance. #[serde( skip_serializing_if = "Option::is_none", deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" @@ -42,12 +42,12 @@ pub struct PruneModes { deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" )] pub storage_history: Option, - /// Retains only those receipts that contain logs emitted by the specified addresses, - /// discarding all others. Note that this setting is overridden by `PruneModes::receipts`. + /// Receipts pruning configuration by retaining only those receipts that contain logs emitted + /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// /// The [`BlockNumber`] represents the starting block from which point onwards the receipts are /// preserved. - pub contract_logs_filter: ContractLogsPruneConfig, + pub receipts_log_filter: ReceiptsLogPruneConfig, } macro_rules! impl_prune_parts { @@ -90,7 +90,7 @@ macro_rules! impl_prune_parts { $( $part: Some(PruneMode::Full), )+ - contract_logs_filter: Default::default() + receipts_log_filter: Default::default() } } diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 9b082d4a08fb..eebd6bab0bd8 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -103,7 +103,7 @@ impl Pruner { .record(part_start.elapsed()) } - if !self.modes.contract_logs_filter.is_empty() { + if !self.modes.receipts_log_filter.is_empty() { let part_start = Instant::now(); self.prune_receipts_by_logs(&provider, tip_block_number)?; self.metrics @@ -305,7 +305,7 @@ impl Pruner { .map(|checkpoint| checkpoint.block_number); let address_filter = - self.modes.contract_logs_filter.group_by_block(tip_block_number, pruned)?; + self.modes.receipts_log_filter.group_by_block(tip_block_number, pruned)?; // Splits all transactions in different block ranges. Each block range will have its own // filter address list and will check it while going through the table @@ -411,7 +411,7 @@ impl Pruner { // one using `get_next_tx_num_range_from_checkpoint`. let checkpoint_block = self .modes - .contract_logs_filter + .receipts_log_filter .lowest_block_with_distance(tip_block_number, pruned)? .unwrap_or(to_block); diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index aa1cb7489d37..fc1d74af8f80 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -661,7 +661,7 @@ impl PostState { let contract_log_pruner = self .prune_modes - .contract_logs_filter + .receipts_log_filter .group_by_block(tip, None) .map_err(|e| Error::Custom(e.to_string()))?; From 96f58d729373716c2a87ddeea0361a23f4191bee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Aug 2023 18:05:31 +0200 Subject: [PATCH 507/722] feat: make TransactionValidationTaskExecutor generic over V (#4331) --- crates/transaction-pool/src/validate/task.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index a3ea15e55ba8..4cd203d0432d 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -8,7 +8,6 @@ use crate::{ }; use futures_util::{lock::Mutex, StreamExt}; use reth_primitives::{ChainSpec, SealedBlock}; -use reth_provider::StateProviderFactory; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -134,7 +133,7 @@ impl TransactionValidationTaskExecutor TransactionValidationTaskExecutor { +impl TransactionValidationTaskExecutor { /// Creates a new executor instance with the given validator for transaction validation. /// /// Initializes the executor with the provided validator and sets up communication for @@ -146,13 +145,11 @@ impl TransactionValidationTaskExecutor { } #[async_trait::async_trait] -impl TransactionValidator - for TransactionValidationTaskExecutor> +impl TransactionValidator for TransactionValidationTaskExecutor where - Client: StateProviderFactory + Clone + 'static, - Tx: PoolTransaction + Clone + 'static, + V: TransactionValidator + Clone + 'static, { - type Transaction = Tx; + type Transaction = ::Transaction; async fn validate_transaction( &self, From c99945ece4d607d7e4b268f664606ebbf0bc0958 Mon Sep 17 00:00:00 2001 From: Aniket Prajapati <46114123+aniketpr01@users.noreply.github.com> Date: Wed, 23 Aug 2023 21:56:46 +0530 Subject: [PATCH 508/722] fix: Update Return Type for eth_getFilterLogs function as per standard schema (#4323) --- crates/rpc/rpc-api/src/eth_filter.rs | 2 +- crates/rpc/rpc/src/eth/filter.rs | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-api/src/eth_filter.rs b/crates/rpc/rpc-api/src/eth_filter.rs index 9e313c75f70b..484157898e1d 100644 --- a/crates/rpc/rpc-api/src/eth_filter.rs +++ b/crates/rpc/rpc-api/src/eth_filter.rs @@ -23,7 +23,7 @@ pub trait EthFilterApi { /// Returns all logs matching given filter (in a range 'from' - 'to'). #[method(name = "getFilterLogs")] - async fn filter_logs(&self, id: FilterId) -> RpcResult>; + async fn filter_logs(&self, id: FilterId) -> RpcResult; /// Uninstalls filter. #[method(name = "uninstallFilter")] diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 5332de726b0b..d7ab17c3adc4 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -135,7 +135,7 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - pub async fn filter_logs(&self, id: FilterId) -> Result, FilterError> { + pub async fn filter_logs(&self, id: FilterId) -> Result { let filter = { let filters = self.inner.active_filters.inner.lock().await; if let FilterKind::Log(ref filter) = @@ -148,7 +148,8 @@ where } }; - self.inner.logs_for_filter(filter).await + let logs = self.inner.logs_for_filter(filter).await?; + Ok(FilterChanges::Logs(logs)) } } @@ -187,7 +188,7 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - async fn filter_logs(&self, id: FilterId) -> RpcResult> { + async fn filter_logs(&self, id: FilterId) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_getFilterLogs"); Ok(EthFilter::filter_logs(self, id).await?) } From 134364495525c7af55ccf11f1d7301259990bf96 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Aug 2023 18:26:57 +0200 Subject: [PATCH 509/722] feat: use get_pooled_transaction_elements in network manager (#4329) --- crates/net/network/src/transactions.rs | 22 +++++++++++----------- crates/transaction-pool/src/lib.rs | 11 ++++------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index e03d466676a1..17318aa67d62 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -24,8 +24,8 @@ use reth_primitives::{ }; use reth_rlp::Encodable; use reth_transaction_pool::{ - error::PoolResult, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, - ValidPoolTransaction, + error::PoolResult, GetPooledTransactionLimit, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -52,6 +52,10 @@ const MAX_FULL_TRANSACTIONS_PACKET_SIZE: usize = 100 * 1024; /// const GET_POOLED_TRANSACTION_SOFT_LIMIT_NUM_HASHES: usize = 256; +/// Softlimit for the response size of a GetPooledTransactions message (2MB) +const GET_POOLED_TRANSACTION_SOFT_LIMIT_SIZE: GetPooledTransactionLimit = + GetPooledTransactionLimit::SizeSoftLimit(2 * 1024 * 1024); + /// The future for inserting a function into the pool pub type PoolImportFuture = Pin> + Send + 'static>>; @@ -182,19 +186,15 @@ where response: oneshot::Sender>, ) { if let Some(peer) = self.peers.get_mut(&peer_id) { - // TODO softResponseLimit 2 * 1024 * 1024 let transactions = self .pool - .get_all(request.0) - .into_iter() - .map(|tx| tx.transaction.to_recovered_transaction().into_signed()) - .collect::>(); + .get_pooled_transaction_elements(request.0, GET_POOLED_TRANSACTION_SOFT_LIMIT_SIZE); - // we sent a response at which point we assume that the peer is aware of the transaction - peer.transactions.extend(transactions.iter().map(|tx| tx.hash())); + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.transactions.extend(transactions.iter().map(|tx| *tx.hash())); - // TODO: remove this! this will be different when we introduce the blobpool - let resp = PooledTransactions(transactions.into_iter().map(Into::into).collect()); + let resp = PooledTransactions(transactions); let _ = response.send(Ok(resp)); } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index d2b705607ec3..570277656380 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -165,10 +165,7 @@ use std::{ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; -use crate::{ - blobstore::{BlobStore, BlobStoreError}, - traits::GetPooledTransactionLimit, -}; +use crate::blobstore::{BlobStore, BlobStoreError}; pub use crate::{ config::{ PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, @@ -183,9 +180,9 @@ pub use crate::{ }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, - EthPooledTransaction, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, - PoolTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, TransactionPool, - TransactionPoolExt, + EthPooledTransaction, GetPooledTransactionLimit, NewTransactionEvent, + PendingTransactionListenerKind, PoolSize, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionOrigin, TransactionPool, TransactionPoolExt, }, validate::{ EthTransactionValidator, TransactionValidationOutcome, TransactionValidationTaskExecutor, From 312cf724bc9347c314be9ef27beb1e40d1d6e2ad Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 18:23:25 +0100 Subject: [PATCH 510/722] feat(pruner): respect batch size per run (#4246) Co-authored-by: joshieDo --- Cargo.lock | 55 +- crates/consensus/beacon/src/engine/prune.rs | 4 +- .../interfaces/src/test_utils/generators.rs | 5 +- crates/primitives/src/prune/checkpoint.rs | 7 +- crates/primitives/src/prune/mod.rs | 6 +- crates/prune/src/pruner.rs | 1079 ++++++++++++----- crates/revm/src/executor.rs | 7 + crates/stages/src/stages/sender_recovery.rs | 14 +- crates/stages/src/stages/tx_lookup.rs | 16 +- .../provider/src/providers/database/mod.rs | 12 +- .../src/providers/database/provider.rs | 92 +- 11 files changed, 870 insertions(+), 427 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3f73c167135..c12a766d326f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c" dependencies = [ "memchr", ] @@ -1899,9 +1899,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" dependencies = [ "pkcs8", "signature", @@ -2505,9 +2505,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", @@ -3008,9 +3008,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "human_bytes" @@ -3105,7 +3105,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows", ] [[package]] @@ -3925,9 +3925,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "1.0.12" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c93f6ad342d3f7bc14724147e2dbc6eb6fdbe5a832ace16ea23b73618e8cc17" +checksum = "006271a8019ad7a9a28cfac2cc40e3ee104d54be763c4a0901e228a63f49d706" dependencies = [ "libproc", "mach2", @@ -3935,7 +3935,7 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows 0.51.1", + "windows", ] [[package]] @@ -4135,9 +4135,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] @@ -5098,7 +5098,7 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick 1.0.3", "memchr", "regex-automata 0.3.6", "regex-syntax 0.7.4", @@ -5119,7 +5119,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick 1.0.3", "memchr", "regex-syntax 0.7.4", ] @@ -6308,9 +6308,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +checksum = "f8a29d87a652dc4d43c586328706bb5cdff211f3f39a530f240b53f7221dab8e" dependencies = [ "libc", ] @@ -8320,25 +8320,6 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "windows" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" -dependencies = [ - "windows-core", - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-sys" version = "0.45.0" diff --git a/crates/consensus/beacon/src/engine/prune.rs b/crates/consensus/beacon/src/engine/prune.rs index 257170a376fe..4b2b4852dccf 100644 --- a/crates/consensus/beacon/src/engine/prune.rs +++ b/crates/consensus/beacon/src/engine/prune.rs @@ -3,7 +3,7 @@ use futures::FutureExt; use reth_db::database::Database; use reth_primitives::BlockNumber; -use reth_prune::{Pruner, PrunerError, PrunerWithResult}; +use reth_prune::{Pruner, PrunerResult, PrunerWithResult}; use reth_tasks::TaskSpawner; use std::task::{ready, Context, Poll}; use tokio::sync::oneshot; @@ -116,7 +116,7 @@ pub(crate) enum EnginePruneEvent { /// If this is returned, the pruner is idle. Finished { /// Final result of the pruner run. - result: Result<(), PrunerError>, + result: PrunerResult, }, /// Pruner task was dropped after it was started, unable to receive it because channel /// closed. This would indicate a panicked pruner task diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index 6b33d9973de8..ae0c0bb0d474 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -239,7 +239,7 @@ where // deposit in receiving account and update storage let (prev_to, storage): &mut (Account, BTreeMap) = state.get_mut(&to).unwrap(); - let old_entries = new_entries + let mut old_entries: Vec<_> = new_entries .into_iter() .filter_map(|entry| { let old = if entry.value != U256::ZERO { @@ -254,9 +254,12 @@ where Some(StorageEntry { value: old.unwrap_or(U256::from(0)), ..entry }) }) .collect(); + old_entries.sort_by_key(|entry| entry.key); changeset.push((to, *prev_to, old_entries)); + changeset.sort_by_key(|(address, _, _)| *address); + prev_to.balance = prev_to.balance.wrapping_add(transfer); changesets.push(changeset); diff --git a/crates/primitives/src/prune/checkpoint.rs b/crates/primitives/src/prune/checkpoint.rs index 52e1cabd76cb..8096d2067af0 100644 --- a/crates/primitives/src/prune/checkpoint.rs +++ b/crates/primitives/src/prune/checkpoint.rs @@ -1,4 +1,4 @@ -use crate::{prune::PruneMode, BlockNumber}; +use crate::{prune::PruneMode, BlockNumber, TxNumber}; use reth_codecs::{main_codec, Compact}; /// Saves the pruning progress of a stage. @@ -7,7 +7,10 @@ use reth_codecs::{main_codec, Compact}; #[cfg_attr(test, derive(Default))] pub struct PruneCheckpoint { /// Highest pruned block number. - pub block_number: BlockNumber, + /// If it's [None], the pruning for block `0` is not finished yet. + pub block_number: Option, + /// Highest pruned transaction number, if applicable. + pub tx_number: Option, /// Prune mode. pub prune_mode: PruneMode, } diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 9f8cab504fc4..48bdacdb9e89 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -49,10 +49,14 @@ impl ReceiptsLogPruneConfig { // the BTreeMap (block = 0), otherwise it will be excluded. // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all // other receipts. + // + // Reminder, that we increment because the [`BlockNumber`] key of the new map should be + // viewed as `PruneMode::Before(block)` let block = (pruned_block + 1).max( mode.prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? .map(|(block, _)| block) - .unwrap_or_default(), + .unwrap_or_default() + + 1, ); map.entry(block).or_insert_with(Vec::new).push(address) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index eebd6bab0bd8..4332534d39f8 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -20,30 +20,40 @@ use reth_provider::{ TransactionsProvider, }; use std::{ops::RangeInclusive, sync::Arc, time::Instant}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, error, instrument, trace}; -/// Result of [Pruner::run] execution -pub type PrunerResult = Result<(), PrunerError>; +/// Result of [Pruner::run] execution. +/// +/// Returns `true` if pruning has been completed up to the target block, +/// and `false` if there's more data to prune in further runs. +pub type PrunerResult = Result; -/// The pipeline type itself with the result of [Pruner::run] +/// The pruner type itself with the result of [Pruner::run] pub type PrunerWithResult = (Pruner, PrunerResult); pub struct BatchSizes { + /// Maximum number of receipts to prune in one run. receipts: usize, + /// Maximum number of transaction lookup entries to prune in one run. transaction_lookup: usize, + /// Maximum number of transaction senders to prune in one run. transaction_senders: usize, + /// Maximum number of account history entries to prune in one run. + /// Measured in the number of [tables::AccountChangeSet] rows. account_history: usize, + /// Maximum number of storage history entries to prune in one run. + /// Measured in the number of [tables::StorageChangeSet] rows. storage_history: usize, } impl Default for BatchSizes { fn default() -> Self { Self { - receipts: 10000, - transaction_lookup: 10000, - transaction_senders: 10000, - account_history: 10000, - storage_history: 10000, + receipts: 1000, + transaction_lookup: 1000, + transaction_senders: 1000, + account_history: 1000, + storage_history: 1000, } } } @@ -59,6 +69,7 @@ pub struct Pruner { /// when the pruning needs to be initiated. last_pruned_block_number: Option, modes: PruneModes, + /// Maximum entries to prune per one run, per prune part. batch_sizes: BatchSizes, } @@ -83,77 +94,156 @@ impl Pruner { /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { - trace!( - target: "pruner", - %tip_block_number, - "Pruner started" - ); + if tip_block_number == 0 { + self.last_pruned_block_number = Some(tip_block_number); + + trace!(target: "pruner", %tip_block_number, "Nothing to prune yet"); + return Ok(true) + } + + trace!(target: "pruner", %tip_block_number, "Pruner started"); let start = Instant::now(); let provider = self.provider_factory.provider_rw()?; + let mut done = true; + if let Some((to_block, prune_mode)) = self.modes.prune_target_block_receipts(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::Receipts, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_receipts(&provider, to_block, prune_mode)?; + let part_done = self.prune_receipts(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::Receipts) .duration_seconds .record(part_start.elapsed()) + } else { + trace!(target: "pruner", prune_part = ?PrunePart::Receipts, "No target block to prune"); } if !self.modes.receipts_log_filter.is_empty() { let part_start = Instant::now(); - self.prune_receipts_by_logs(&provider, tip_block_number)?; + let part_done = self.prune_receipts_by_logs(&provider, tip_block_number)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::ContractLogs) .duration_seconds .record(part_start.elapsed()) + } else { + trace!(target: "pruner", prune_part = ?PrunePart::ContractLogs, "No filter to prune"); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_transaction_lookup(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::TransactionLookup, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + let part_done = self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::TransactionLookup) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::TransactionLookup, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_sender_recovery(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::SenderRecovery, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_transaction_senders(&provider, to_block, prune_mode)?; + let part_done = self.prune_transaction_senders(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::SenderRecovery) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::SenderRecovery, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_account_history(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::AccountHistory, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_account_history(&provider, to_block, prune_mode)?; + let part_done = self.prune_account_history(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::AccountHistory) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::AccountHistory, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_storage_history(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::StorageHistory, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_storage_history(&provider, to_block, prune_mode)?; + let part_done = self.prune_storage_history(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::StorageHistory) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::StorageHistory, + "No target block to prune" + ); } provider.commit()?; @@ -162,13 +252,8 @@ impl Pruner { let elapsed = start.elapsed(); self.metrics.duration_seconds.record(elapsed); - trace!( - target: "pruner", - %tip_block_number, - ?elapsed, - "Pruner finished" - ); - Ok(()) + trace!(target: "pruner", %tip_block_number, ?elapsed, "Pruner finished"); + Ok(done) } /// Returns `true` if the pruning is needed at the provided tip block number. @@ -192,6 +277,36 @@ impl Pruner { } } + /// Get next inclusive block range to prune according to the checkpoint, `to_block` block + /// number and `limit`. + /// + /// To get the range start (`from_block`): + /// 1. If checkpoint exists, use next block. + /// 2. If checkpoint doesn't exist, use block 0. + /// + /// To get the range end: use block `to_block`. + fn get_next_block_range_from_checkpoint( + &self, + provider: &DatabaseProviderRW<'_, DB>, + prune_part: PrunePart, + to_block: BlockNumber, + ) -> reth_interfaces::Result>> { + let from_block = provider + .get_prune_checkpoint(prune_part)? + .and_then(|checkpoint| checkpoint.block_number) + // Checkpoint exists, prune from the next block after the highest pruned one + .map(|block_number| block_number + 1) + // No checkpoint exists, prune from genesis + .unwrap_or(0); + + let range = from_block..=to_block; + if range.is_empty() { + return Ok(None) + } + + Ok(Some(range)) + } + /// Get next inclusive tx number range to prune according to the checkpoint and `to_block` block /// number. /// @@ -206,30 +321,34 @@ impl Pruner { prune_part: PrunePart, to_block: BlockNumber, ) -> reth_interfaces::Result>> { - let from_block_number = provider + let from_tx_number = provider .get_prune_checkpoint(prune_part)? - // Checkpoint exists, prune from the next block after the highest pruned one - .map(|checkpoint| checkpoint.block_number + 1) + // Checkpoint exists, prune from the next transaction after the highest pruned one + .and_then(|checkpoint| match checkpoint.tx_number { + Some(tx_number) => Some(tx_number + 1), + _ => { + error!(target: "pruner", %prune_part, ?checkpoint, "Expected transaction number in prune checkpoint, found None"); + None + }, + }) // No checkpoint exists, prune from genesis .unwrap_or(0); - // Get first transaction - let from_tx_num = - provider.block_body_indices(from_block_number)?.map(|body| body.first_tx_num); - // If no block body index is found, the DB is either corrupted or we've already pruned up to - // the latest block, so there's no thing to prune now. - let Some(from_tx_num) = from_tx_num else { return Ok(None) }; - - let to_tx_num = match provider.block_body_indices(to_block)? { + let to_tx_number = match provider.block_body_indices(to_block)? { Some(body) => body, None => return Ok(None), } .last_tx_num(); - Ok(Some(from_tx_num..=to_tx_num)) + let range = from_tx_number..=to_tx_number; + if range.is_empty() { + return Ok(None) + } + + Ok(Some(range)) } - /// Prune receipts up to the provided block, inclusive. + /// Prune receipts up to the provided block, inclusive, respecting the batch size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_receipts( &self, @@ -237,7 +356,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let tx_range = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::Receipts, to_block, @@ -245,42 +364,44 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No receipts to prune"); - return Ok(()) + return Ok(true) } }; - let total = range.clone().count(); + let tx_range_end = *tx_range.end(); - provider.prune_table_with_iterator_in_batches::( - range, + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_range::( + tx_range, self.batch_sizes.receipts, - |rows| { - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), - "Pruned receipts" - ); - }, |_| false, + |row| last_pruned_transaction = row.0, )?; + trace!(target: "pruner", %deleted, %done, "Pruned receipts"); + + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more receipts to prune, set the checkpoint block number to previous, + // so we could finish pruning its receipts on the next run. + .checked_sub(if done { 0 } else { 1 }); + + let prune_checkpoint = PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }; - provider.save_prune_checkpoint( - PrunePart::Receipts, - PruneCheckpoint { block_number: to_block, prune_mode }, - )?; + provider.save_prune_checkpoint(PrunePart::Receipts, prune_checkpoint)?; // `PrunePart::Receipts` overrides `PrunePart::ContractLogs`, so we can preemptively // limit their pruning start point. - provider.save_prune_checkpoint( - PrunePart::ContractLogs, - PruneCheckpoint { block_number: to_block, prune_mode }, - )?; + provider.save_prune_checkpoint(PrunePart::ContractLogs, prune_checkpoint)?; - Ok(()) + Ok(done) } - /// Prune receipts up to the provided block by filtering logs. Works as in inclusion list, and - /// removes every receipt not belonging to it. + /// Prune receipts up to the provided block, inclusive, by filtering logs. Works as in inclusion + /// list, and removes every receipt not belonging to it. Respects the batch size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_receipts_by_logs( &self, @@ -298,14 +419,25 @@ impl Pruner { .map(|(bn, _)| bn) .unwrap_or_default(); - // Figure out what receipts have already been pruned, so we can have an accurate - // `address_filter` - let pruned = provider + // Get status checkpoint from latest run + let mut last_pruned_block = provider .get_prune_checkpoint(PrunePart::ContractLogs)? - .map(|checkpoint| checkpoint.block_number); + .and_then(|checkpoint| checkpoint.block_number); + let initial_last_pruned_block = last_pruned_block; + + let mut from_tx_number = match initial_last_pruned_block { + Some(block) => provider + .block_body_indices(block)? + .map(|block| block.last_tx_num() + 1) + .unwrap_or(0), + None => 0, + }; + + // Figure out what receipts have already been pruned, so we can have an accurate + // `address_filter` let address_filter = - self.modes.receipts_log_filter.group_by_block(tip_block_number, pruned)?; + self.modes.receipts_log_filter.group_by_block(tip_block_number, last_pruned_block)?; // Splits all transactions in different block ranges. Each block range will have its own // filter address list and will check it while going through the table @@ -334,9 +466,13 @@ impl Pruner { while let Some((start_block, addresses)) = blocks_iter.next() { filtered_addresses.extend_from_slice(addresses); - // This will clear all receipts before the first appearance of a contract log + // This will clear all receipts before the first appearance of a contract log or since + // the block after the last pruned one. if block_ranges.is_empty() { - block_ranges.push((0, *start_block - 1, 0)); + let init = last_pruned_block.map(|b| b + 1).unwrap_or_default(); + if init < *start_block { + block_ranges.push((init, *start_block - 1, 0)); + } } let end_block = @@ -347,86 +483,107 @@ impl Pruner { block_ranges.push((*start_block, end_block, filtered_addresses.len())); } + trace!( + target: "pruner", + ?block_ranges, + ?filtered_addresses, + "Calculated block ranges and filtered addresses", + ); + + let mut limit = self.batch_sizes.receipts; + let mut done = true; + let mut last_pruned_transaction = None; for (start_block, end_block, num_addresses) in block_ranges { - let range = match self.get_next_tx_num_range_from_checkpoint( - provider, - PrunePart::ContractLogs, - end_block, - )? { - Some(range) => range, + let block_range = start_block..=end_block; + + // Calculate the transaction range from this block range + let tx_range_end = match provider.block_body_indices(end_block)? { + Some(body) => body.last_tx_num(), None => { trace!( - target: "pruner", - block_range = format!("{start_block}..={end_block}"), - "No receipts to prune." + target: "pruner", + ?block_range, + "No receipts to prune." ); continue } }; - - let total = range.clone().count(); - let mut processed = 0; - - provider.prune_table_with_iterator_in_batches::( - range, - self.batch_sizes.receipts, - |rows| { - processed += rows; - trace!( - target: "pruner", - %rows, - block_range = format!("{start_block}..={end_block}"), - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), - "Pruned receipts" - ); - }, - |receipt| { - num_addresses > 0 && + let tx_range = from_tx_number..=tx_range_end; + + // Delete receipts, except the ones in the inclusion list + let mut last_skipped_transaction = 0; + let deleted; + (deleted, done) = provider.prune_table_with_range::( + tx_range, + limit, + |(tx_num, receipt)| { + let skip = num_addresses > 0 && receipt.logs.iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) - }) + }); + + if skip { + last_skipped_transaction = *tx_num; + } + skip }, + |row| last_pruned_transaction = Some(row.0), )?; + trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); + + limit = limit.saturating_sub(deleted); + + // For accurate checkpoints we need to know that we have checked every transaction. + // Example: we reached the end of the range, and the last receipt is supposed to skip + // its deletion. + last_pruned_transaction = + Some(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction)); + last_pruned_block = Some( + provider + .transaction_block(last_pruned_transaction.expect("qed"))? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more receipts to prune, set the checkpoint block number to + // previous, so we could finish pruning its receipts on the + // next run. + .saturating_sub(if done { 0 } else { 1 }), + ); - // If this is the last block range, avoid writing an unused checkpoint - if end_block != to_block { - // This allows us to query for the transactions in the next block range with - // [`get_next_tx_num_range_from_checkpoint`]. It's just a temporary intermediate - // checkpoint, which should be adjusted in the end. - provider.save_prune_checkpoint( - PrunePart::ContractLogs, - PruneCheckpoint { - block_number: end_block, - prune_mode: PruneMode::Before(end_block + 1), - }, - )?; + if limit == 0 { + done &= end_block == to_block; + break } + + from_tx_number = last_pruned_transaction.expect("qed") + 1; } // If there are contracts using `PruneMode::Distance(_)` there will be receipts before - // `to_block` that become eligible to be pruned in future runs. Therefore, our - // checkpoint is not actually `to_block`, but the `lowest_block_with_distance` from any - // contract. This ensures that in future pruner runs we can - // prune all these receipts between the previous `lowest_block_with_distance` and the new - // one using `get_next_tx_num_range_from_checkpoint`. - let checkpoint_block = self + // `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is + // not actually `to_block`, but the `lowest_block_with_distance` from any contract. + // This ensures that in future pruner runs we can prune all these receipts between the + // previous `lowest_block_with_distance` and the new one using + // `get_next_tx_num_range_from_checkpoint`. + // + // Only applies if we were able to prune everything intended for this run, otherwise the + // checkpoing is the `last_pruned_block`. + let prune_mode_block = self .modes .receipts_log_filter - .lowest_block_with_distance(tip_block_number, pruned)? + .lowest_block_with_distance(tip_block_number, initial_last_pruned_block)? .unwrap_or(to_block); provider.save_prune_checkpoint( PrunePart::ContractLogs, PruneCheckpoint { - block_number: checkpoint_block - 1, - prune_mode: PruneMode::Before(checkpoint_block), + block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))), + tx_number: last_pruned_transaction, + prune_mode: PruneMode::Before(prune_mode_block), }, )?; - - Ok(()) + Ok(done) } - /// Prune transaction lookup entries up to the provided block, inclusive. + /// Prune transaction lookup entries up to the provided block, inclusive, respecting the batch + /// size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_transaction_lookup( &self, @@ -434,7 +591,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let (start, end) = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::TransactionLookup, to_block, @@ -442,52 +599,54 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No transaction lookup entries to prune"); - return Ok(()) - } - }; - let last_tx_num = *range.end(); - let total = range.clone().count(); - let mut processed = 0; - - for i in range.step_by(self.batch_sizes.transaction_lookup) { - // The `min` ensures that the transaction range doesn't exceed the last transaction - // number. `last_tx_num + 1` is used to include the last transaction in the range. - let tx_range = i..(i + self.batch_sizes.transaction_lookup as u64).min(last_tx_num + 1); - - // Retrieve transactions in the range and calculate their hashes in parallel - let mut hashes = provider - .transactions_by_tx_range(tx_range.clone())? - .into_par_iter() - .map(|transaction| transaction.hash()) - .collect::>(); - - // Number of transactions retrieved from the database should match the tx range count - let tx_count = tx_range.clone().count(); - if hashes.len() != tx_count { - return Err(PrunerError::InconsistentData( - "Unexpected number of transaction hashes retrieved by transaction number range", - )) + return Ok(true) } + } + .into_inner(); + let tx_range = start..=(end.min(start + self.batch_sizes.transaction_lookup as u64 - 1)); + let tx_range_end = *tx_range.end(); + + // Retrieve transactions in the range and calculate their hashes in parallel + let hashes = provider + .transactions_by_tx_range(tx_range.clone())? + .into_par_iter() + .map(|transaction| transaction.hash()) + .collect::>(); + + // Number of transactions retrieved from the database should match the tx range count + let tx_count = tx_range.clone().count(); + if hashes.len() != tx_count { + return Err(PrunerError::InconsistentData( + "Unexpected number of transaction hashes retrieved by transaction number range", + )) + } - // Pre-sort hashes to prune them in order - hashes.sort_unstable(); + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_iterator::( + hashes, + self.batch_sizes.transaction_lookup, + |row| last_pruned_transaction = row.1, + )?; + trace!(target: "pruner", %deleted, %done, "Pruned transaction lookup"); - let rows = provider.prune_table_with_iterator::(hashes)?; - processed += rows; - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), - "Pruned transaction lookup" - ); - } + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more transaction lookup entries to prune, set the checkpoint block number + // to previous, so we could finish pruning its transaction lookup entries on the next + // run. + .checked_sub(if done { 0 } else { 1 }); provider.save_prune_checkpoint( PrunePart::TransactionLookup, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune transaction senders up to the provided block, inclusive. @@ -498,7 +657,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let tx_range = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::SenderRecovery, to_block, @@ -506,30 +665,37 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No transaction senders to prune"); - return Ok(()) + return Ok(true) } }; - let total = range.clone().count(); + let tx_range_end = *tx_range.end(); - provider.prune_table_with_range_in_batches::( - range, + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_range::( + tx_range, self.batch_sizes.transaction_senders, - |rows, _| { - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), - "Pruned transaction senders" - ); - }, + |_| false, + |row| last_pruned_transaction = row.0, )?; + trace!(target: "pruner", %deleted, %done, "Pruned transaction senders"); + + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more transaction senders to prune, set the checkpoint block number to + // previous, so we could finish pruning its transaction senders on the next run. + .checked_sub(if done { 0 } else { 1 }); provider.save_prune_checkpoint( PrunePart::SenderRecovery, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune account history up to the provided block, inclusive. @@ -540,48 +706,52 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let from_block = provider - .get_prune_checkpoint(PrunePart::AccountHistory)? - .map(|checkpoint| checkpoint.block_number + 1) - .unwrap_or_default(); - let range = from_block..=to_block; - let total = range.clone().count(); + let range = match self.get_next_block_range_from_checkpoint( + provider, + PrunePart::AccountHistory, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No account history to prune"); + return Ok(true) + } + }; + let range_end = *range.end(); - provider.prune_table_with_range_in_batches::( + let mut last_changeset_pruned_block = None; + let (rows, done) = provider.prune_table_with_range::( range, self.batch_sizes.account_history, - |keys, rows| { - trace!( - target: "pruner", - %keys, - %rows, - progress = format!("{:.1}%", 100.0 * keys as f64 / total as f64), - "Pruned account history (changesets)" - ); - }, + |_| false, + |row| last_changeset_pruned_block = Some(row.0), )?; + trace!(target: "pruner", %rows, %done, "Pruned account history (changesets)"); + + let last_changeset_pruned_block = last_changeset_pruned_block + // If there's more account account changesets to prune, set the checkpoint block number + // to previous, so we could finish pruning its account changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(range_end); - self.prune_history_indices::( + let (processed, deleted) = self.prune_history_indices::( provider, - to_block, + last_changeset_pruned_block, |a, b| a.key == b.key, |key| ShardedKey::last(key.key), - self.batch_sizes.account_history, - |rows| { - trace!( - target: "pruner", - rows, - "Pruned account history (indices)" - ); - }, )?; + trace!(target: "pruner", %processed, %deleted, %done, "Pruned account history (history)" ); provider.save_prune_checkpoint( PrunePart::AccountHistory, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: Some(last_changeset_pruned_block), + tx_number: None, + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune storage history up to the provided block, inclusive. @@ -592,64 +762,70 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let from_block = provider - .get_prune_checkpoint(PrunePart::StorageHistory)? - .map(|checkpoint| checkpoint.block_number + 1) - .unwrap_or_default(); - let block_range = from_block..=to_block; - let range = BlockNumberAddress::range(block_range); + let range = match self.get_next_block_range_from_checkpoint( + provider, + PrunePart::StorageHistory, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No storage history to prune"); + return Ok(true) + } + }; + let range_end = *range.end(); - provider.prune_table_with_range_in_batches::( - range, + let mut last_changeset_pruned_block = None; + let (rows, done) = provider.prune_table_with_range::( + BlockNumberAddress::range(range), self.batch_sizes.storage_history, - |keys, rows| { - trace!( - target: "pruner", - %keys, - %rows, - "Pruned storage history (changesets)" - ); - }, + |_| false, + |row| last_changeset_pruned_block = Some(row.0.block_number()), )?; + trace!(target: "pruner", %rows, %done, "Pruned storage history (changesets)"); + + let last_changeset_pruned_block = last_changeset_pruned_block + // If there's more account storage changesets to prune, set the checkpoint block number + // to previous, so we could finish pruning its storage changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(range_end); - self.prune_history_indices::( + let (processed, deleted) = self.prune_history_indices::( provider, - to_block, + last_changeset_pruned_block, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, |key| StorageShardedKey::last(key.address, key.sharded_key.key), - self.batch_sizes.storage_history, - |rows| { - trace!( - target: "pruner", - rows, - "Pruned storage history (indices)" - ); - }, )?; + trace!(target: "pruner", %processed, %deleted, %done, "Pruned storage history (history)" ); provider.save_prune_checkpoint( PrunePart::StorageHistory, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: Some(last_changeset_pruned_block), + tx_number: None, + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune history indices up to the provided block, inclusive. + /// + /// Returns total number of processed (walked) and deleted entities. fn prune_history_indices( &self, provider: &DatabaseProviderRW<'_, DB>, to_block: BlockNumber, key_matches: impl Fn(&T::Key, &T::Key) -> bool, last_key: impl Fn(&T::Key) -> T::Key, - batch_size: usize, - batch_callback: impl Fn(usize), - ) -> PrunerResult + ) -> Result<(usize, usize), PrunerError> where T: Table, T::Key: AsRef>, { let mut processed = 0; + let mut deleted = 0; let mut cursor = provider.tx_ref().cursor_write::()?; // Prune history table: @@ -665,6 +841,7 @@ impl Pruner { // completely. if key.as_ref().highest_block_number <= to_block { cursor.delete_current()?; + deleted += 1; if key.as_ref().highest_block_number == to_block { // Shard contains only block numbers up to the target one, so we can skip to // the last shard for this key. It is guaranteed that further shards for this @@ -694,6 +871,7 @@ impl Pruner { // has previous shards, replace it with the previous shard. Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { cursor.delete_current()?; + deleted += 1; // Upsert will replace the last shard for this sharded key with // the previous value. cursor.upsert(key.clone(), prev_value)?; @@ -708,6 +886,7 @@ impl Pruner { } // Delete shard. cursor.delete_current()?; + deleted += 1; } } } @@ -715,6 +894,7 @@ impl Pruner { // just delete it. else { cursor.delete_current()?; + deleted += 1; } } else { cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(new_blocks))?; @@ -728,17 +908,9 @@ impl Pruner { } processed += 1; - - if processed % batch_size == 0 { - batch_callback(batch_size); - } } - if processed % batch_size != 0 { - batch_callback(processed % batch_size); - } - - Ok(()) + Ok((processed, deleted)) } } @@ -746,17 +918,26 @@ impl Pruner { mod tests { use crate::{pruner::BatchSizes, Pruner}; use assert_matches::assert_matches; - use reth_db::{tables, test_utils::create_test_rw_db, BlockNumberList}; + use itertools::{ + FoldWhile::{Continue, Done}, + Itertools, + }; + use reth_db::{ + cursor::DbCursorRO, tables, test_utils::create_test_rw_db, transaction::DbTx, + BlockNumberList, + }; use reth_interfaces::test_utils::{ generators, generators::{ - random_block_range, random_changeset_range, random_eoa_account_range, random_receipt, + random_block_range, random_changeset_range, random_eoa_account, + random_eoa_account_range, random_log, random_receipt, }, }; use reth_primitives::{ - BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, + BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, ReceiptsLogPruneConfig, + TxNumber, H256, MAINNET, }; - use reth_provider::PruneCheckpointReader; + use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_stages::test_utils::TestTransaction; use std::{collections::BTreeMap, ops::AddAssign}; @@ -819,24 +1000,60 @@ mod tests { }, ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::Receipts) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = blocks + .iter() + .map(|block| block.body.len()) + .sum::() + .min(next_tx_number_to_prune as usize + pruner.batch_sizes.receipts - 1); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!(pruner.prune_receipts(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_receipts(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::Receipts).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -879,27 +1096,59 @@ mod tests { }, ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::TransactionLookup) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + pruner.batch_sizes.transaction_lookup - 1, + ); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!( - pruner.prune_transaction_lookup(&provider, to_block, prune_mode), - Ok(()) - ); + let result = pruner.prune_transaction_lookup(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::TransactionLookup).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -945,27 +1194,59 @@ mod tests { }, ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::SenderRecovery) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + pruner.batch_sizes.transaction_senders - 1, + ); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!( - pruner.prune_transaction_senders(&provider, to_block, prune_mode), - Ok(()) - ); + let result = pruner.prune_transaction_senders(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::SenderRecovery).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -974,8 +1255,7 @@ mod tests { let tx = TestTransaction::default(); let mut rng = generators::rng(); - let block_num = 7000; - let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + let blocks = random_block_range(&mut rng, 0..=7000, H256::zero(), 0..1); tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = @@ -1007,7 +1287,7 @@ mod tests { let original_shards = tx.table::().unwrap(); - let test_prune = |to_block: BlockNumber| { + let test_prune = |to_block: BlockNumber, run: usize, expect_done: bool| { let prune_mode = PruneMode::Before(to_block); let pruner = Pruner::new( tx.inner_raw(), @@ -1016,29 +1296,68 @@ mod tests { PruneModes { account_history: Some(prune_mode), ..Default::default() }, BatchSizes { // Less than total amount of blocks to prune to test the batching logic - account_history: 10, + account_history: 2000, ..Default::default() }, ); let provider = tx.inner_rw(); - assert_matches!(pruner.prune_account_history(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_account_history(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + assert_eq!(done, expect_done); provider.commit().expect("commit"); + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.into_iter().map(move |change| (block_number, change)) + }) + .collect::>(); + + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _))| { + *i < pruner.batch_sizes.account_history * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + let mut pruned_changesets = changesets + .iter() + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + .skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _)| if done { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) + .unwrap_or(to_block); + + let pruned_changesets = + pruned_changesets.fold(BTreeMap::new(), |mut acc, (block_number, change)| { + acc.entry(block_number).or_insert_with(Vec::new).push(change); + acc + }); + assert_eq!( tx.table::().unwrap().len(), - changesets[to_block as usize + 1..].iter().flatten().count() + pruned_changesets.values().flatten().count() ); let actual_shards = tx.table::().unwrap(); let expected_shards = original_shards .iter() - .filter(|(key, _)| key.highest_block_number > to_block) + .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { let new_blocks = blocks .iter(0) - .skip_while(|block| *block <= to_block as usize) + .skip_while(|block| *block <= last_pruned_block_number as usize) .collect::>(); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) @@ -1048,15 +1367,17 @@ mod tests { assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::AccountHistory).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) ); }; - // Prune first time: no previous checkpoint is present - test_prune(3000); - // Prune second time: previous checkpoint is present, should continue pruning from where - // ended last time - test_prune(4500); + test_prune(1700, 1, false); + test_prune(1700, 2, true); + test_prune(2000, 3, true); } #[test] @@ -1064,8 +1385,7 @@ mod tests { let tx = TestTransaction::default(); let mut rng = generators::rng(); - let block_num = 7000; - let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + let blocks = random_block_range(&mut rng, 0..=7000, H256::zero(), 0..1); tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = @@ -1097,7 +1417,7 @@ mod tests { let original_shards = tx.table::().unwrap(); - let test_prune = |to_block: BlockNumber| { + let test_prune = |to_block: BlockNumber, run: usize, expect_done: bool| { let prune_mode = PruneMode::Before(to_block); let pruner = Pruner::new( tx.inner_raw(), @@ -1106,33 +1426,72 @@ mod tests { PruneModes { storage_history: Some(prune_mode), ..Default::default() }, BatchSizes { // Less than total amount of blocks to prune to test the batching logic - storage_history: 10, + storage_history: 2000, ..Default::default() }, ); let provider = tx.inner_rw(); - assert_matches!(pruner.prune_storage_history(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_storage_history(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + assert_eq!(done, expect_done); provider.commit().expect("commit"); + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.into_iter().flat_map(move |(address, _, entries)| { + entries.into_iter().map(move |entry| (block_number, address, entry)) + }) + }) + .collect::>(); + + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _, _))| { + *i < pruner.batch_sizes.storage_history * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + let mut pruned_changesets = changesets + .iter() + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + .skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _, _)| if done { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) + .unwrap_or(to_block); + + let pruned_changesets = pruned_changesets.fold( + BTreeMap::new(), + |mut acc, (block_number, address, entry)| { + acc.entry((block_number, address)).or_insert_with(Vec::new).push(entry); + acc + }, + ); + assert_eq!( tx.table::().unwrap().len(), - changesets[to_block as usize + 1..] - .iter() - .flatten() - .flat_map(|(_, _, entries)| entries) - .count() + pruned_changesets.values().flatten().count() ); let actual_shards = tx.table::().unwrap(); let expected_shards = original_shards .iter() - .filter(|(key, _)| key.sharded_key.highest_block_number > to_block) + .filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { let new_blocks = blocks .iter(0) - .skip_while(|block| *block <= to_block as usize) + .skip_while(|block| *block <= last_pruned_block_number as usize) .collect::>(); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) @@ -1142,14 +1501,116 @@ mod tests { assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::StorageHistory).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) ); }; - // Prune first time: no previous checkpoint is present - test_prune(3000); - // Prune second time: previous checkpoint is present, should continue pruning from where - // ended last time - test_prune(4500); + test_prune(2300, 1, false); + test_prune(2300, 2, true); + test_prune(3000, 3, true); + } + + #[test] + fn prune_receipts_by_logs() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let tip = 300; + let blocks = random_block_range(&mut rng, 0..=tip, H256::zero(), 1..5); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let mut receipts = Vec::new(); + + let (deposit_contract_addr, _) = random_eoa_account(&mut rng); + for block in &blocks { + assert!(!block.body.is_empty()); + for (txi, transaction) in block.body.iter().enumerate() { + let mut receipt = random_receipt(&mut rng, transaction, Some(1)); + receipt.logs.push(random_log( + &mut rng, + if txi == (block.body.len() - 1) { Some(deposit_contract_addr) } else { None }, + Some(1), + )); + receipts.push((receipts.len() as u64, receipt)); + } + } + tx.insert_receipts(receipts).expect("insert receipts"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.table::().unwrap().len(), + tx.table::().unwrap().len() + ); + + let run_prune = || { + let provider = tx.inner_rw(); + + let prune_before_block: usize = 20; + let prune_mode = PruneMode::Before(prune_before_block as u64); + let receipts_log_filter = + ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + PruneModes { + receipts_log_filter: receipts_log_filter.clone(), + ..Default::default() + }, + BatchSizes { + // Less than total amount of blocks to prune to test the batching logic + receipts: 10, + ..Default::default() + }, + ); + + let result = pruner.prune_receipts_by_logs(&provider, tip); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + provider.commit().expect("commit"); + + let (pruned_block, pruned_tx) = tx + .inner() + .get_prune_checkpoint(PrunePart::ContractLogs) + .unwrap() + .and_then(|checkpoint| { + Some((checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) + }) + .unwrap_or_default(); + + // All receipts are in the end of the block + let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() - + ((pruned_tx + 1) - unprunable) as usize + ); + + return done + }; + + while !run_prune() {} + + let provider = tx.inner(); + let mut cursor = provider.tx_ref().cursor_read::().unwrap(); + let walker = cursor.walk(None).unwrap(); + for receipt in walker { + let (tx_num, receipt) = receipt.unwrap(); + + // Either we only find our contract, or the receipt is part of the unprunable receipts + // set by tip - 128 + assert!( + receipt.logs.iter().any(|l| l.address == deposit_contract_addr) || + provider.transaction_block(tx_num).unwrap().unwrap() > tip - 128, + ); + } } } diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index 990e674bb7c5..6ba25f4192aa 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -251,6 +251,13 @@ where // append gas used cumulative_gas_used += result.gas_used(); + tracing::trace!( + target: "revm::executor", + hash = ?transaction.hash, + gas_used = result.gas_used(), + "transaction executed" + ); + // Push transaction changeset and calculate header bloom filter for receipt. post_state.add_receipt( block.number, diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 9f72c69d6e3b..002f0b6708a9 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -212,11 +212,7 @@ fn stage_checkpoint( ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PrunePart::SenderRecovery)? - .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) - .transpose()? - .flatten() - // +1 is needed because TxNumber is 0-indexed - .map(|body| body.last_tx_num() + 1) + .and_then(|checkpoint| checkpoint.tx_number) .unwrap_or_default(); Ok(EntitiesCheckpoint { // If `TxSenders` table was pruned, we will have a number of entries in it not matching @@ -409,7 +405,13 @@ mod tests { .save_prune_checkpoint( PrunePart::SenderRecovery, PruneCheckpoint { - block_number: max_pruned_block as BlockNumber, + block_number: Some(max_pruned_block), + tx_number: Some( + blocks[..=max_pruned_block as usize] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + ), prune_mode: PruneMode::Full, }, ) diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 211266d45d81..65f5772b74ee 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -13,7 +13,7 @@ use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, PrunePart, TransactionSignedNoHash, TxNumber, H256, }; -use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointReader}; +use reth_provider::{DatabaseProviderRW, PruneCheckpointReader}; use tokio::sync::mpsc; use tracing::*; @@ -186,11 +186,7 @@ fn stage_checkpoint( ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PrunePart::TransactionLookup)? - .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) - .transpose()? - .flatten() - // +1 is needed because TxNumber is 0-indexed - .map(|body| body.last_tx_num() + 1) + .and_then(|checkpoint| checkpoint.tx_number) .unwrap_or_default(); Ok(EntitiesCheckpoint { // If `TxHashNumber` table was pruned, we will have a number of entries in it not matching @@ -365,7 +361,13 @@ mod tests { .save_prune_checkpoint( PrunePart::TransactionLookup, PruneCheckpoint { - block_number: max_pruned_block as BlockNumber, + block_number: Some(max_pruned_block), + tx_number: Some( + blocks[..=max_pruned_block as usize] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + ), prune_mode: PruneMode::Full, }, ) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 506cc59ebab3..b45417909b95 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -111,14 +111,18 @@ impl ProviderFactory { // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune part. - if let Some(prune_checkpoint) = account_history_prune_checkpoint { + if let Some(prune_checkpoint_block_number) = + account_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { state_provider = state_provider.with_lowest_available_account_history_block_number( - prune_checkpoint.block_number + 1, + prune_checkpoint_block_number + 1, ); } - if let Some(prune_checkpoint) = storage_history_prune_checkpoint { + if let Some(prune_checkpoint_block_number) = + storage_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { state_provider = state_provider.with_lowest_available_storage_history_block_number( - prune_checkpoint.block_number + 1, + prune_checkpoint_block_number + 1, ); } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b013ee697737..85c497cc4432 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -17,7 +17,7 @@ use reth_db::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }, - table::Table, + table::{Table, TableRow}, tables, transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, @@ -624,85 +624,61 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { } /// Prune the table for the specified pre-sorted key iterator. - /// Returns number of rows pruned. - pub fn prune_table_with_iterator( - &self, - keys: impl IntoIterator, - ) -> std::result::Result { - self.prune_table_with_iterator_in_batches::(keys, usize::MAX, |_| {}, |_| false) - } - - /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after - /// every `batch_size` pruned rows with number of total rows pruned. - /// - /// `skip_filter` can be used to skip pruning certain elements. /// /// Returns number of rows pruned. - pub fn prune_table_with_iterator_in_batches( + pub fn prune_table_with_iterator( &self, keys: impl IntoIterator, - batch_size: usize, - mut batch_callback: impl FnMut(usize), - skip_filter: impl Fn(&T::Value) -> bool, - ) -> std::result::Result { + limit: usize, + mut delete_callback: impl FnMut(TableRow), + ) -> std::result::Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; let mut deleted = 0; - for key in keys { - if let Some((_, value)) = cursor.seek_exact(key)? { - if !skip_filter(&value) { - cursor.delete_current()?; - deleted += 1; - } + let mut keys = keys.into_iter(); + for key in &mut keys { + let row = cursor.seek_exact(key.clone())?; + if let Some(row) = row { + cursor.delete_current()?; + deleted += 1; + delete_callback(row); } - if deleted % batch_size == 0 { - batch_callback(deleted); + if deleted == limit { + break } } - if deleted % batch_size != 0 { - batch_callback(deleted); - } - - Ok(deleted) + Ok((deleted, keys.next().is_none())) } - /// Prune the table for the specified key range, calling `chunk_callback` after every - /// `batch_size` pruned rows with number of total unique keys and total rows pruned. For dupsort - /// tables, these numbers will be different as one key can correspond to multiple rows. + /// Prune the table for the specified key range. /// - /// Returns number of rows pruned. - pub fn prune_table_with_range_in_batches( + /// Returns number of total unique keys and total rows pruned pruned. + pub fn prune_table_with_range( &self, - keys: impl RangeBounds, - batch_size: usize, - mut batch_callback: impl FnMut(usize, usize), - ) -> std::result::Result<(), DatabaseError> { + keys: impl RangeBounds + Clone + Debug, + limit: usize, + mut skip_filter: impl FnMut(&TableRow) -> bool, + mut delete_callback: impl FnMut(TableRow), + ) -> std::result::Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(keys)?; - let mut deleted_keys = 0; - let mut deleted_rows = 0; - let mut previous_key = None; - - while let Some((key, _)) = walker.next().transpose()? { - walker.delete_current()?; - deleted_rows += 1; - if previous_key.as_ref().map(|previous_key| previous_key != &key).unwrap_or(true) { - deleted_keys += 1; - previous_key = Some(key); - } + let mut walker = cursor.walk_range(keys.clone())?; + let mut deleted = 0; - if deleted_rows % batch_size == 0 { - batch_callback(deleted_keys, deleted_rows); + while let Some(row) = walker.next().transpose()? { + if !skip_filter(&row) { + walker.delete_current()?; + deleted += 1; + delete_callback(row); } - } - if deleted_rows % batch_size != 0 { - batch_callback(deleted_keys, deleted_rows); + if deleted == limit { + break + } } - Ok(()) + Ok((deleted, walker.next().transpose()?.is_none())) } /// Load shard and remove it. If list is empty, last shard was full or From 1eee5ee80a7148a8ab8c83a2a5c498f07e1db48c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 18:45:53 +0100 Subject: [PATCH 511/722] feat(pruner, primitives): move prune batch sizes to `ChainSpec` (#4318) Co-authored-by: joshieDo --- bin/reth/src/node/mod.rs | 3 +- crates/config/src/config.rs | 4 +- .../consensus/beacon/src/engine/test_utils.rs | 6 +- crates/primitives/src/chain/spec.rs | 22 ++- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/prune/batch_sizes.rs | 83 ++++++++++ crates/primitives/src/prune/mod.rs | 2 + crates/prune/src/lib.rs | 2 +- crates/prune/src/pruner.rs | 146 +++++++----------- 9 files changed, 165 insertions(+), 107 deletions(-) create mode 100644 crates/primitives/src/prune/batch_sizes.rs diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 06c90465d418..bfdd6a711cbb 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -51,7 +51,6 @@ use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions, HeaderProvider, ProviderFactory, StageCheckpointReader, }; -use reth_prune::BatchSizes; use reth_revm::Factory; use reth_revm_inspectors::stack::Hook; use reth_rpc_engine_api::EngineApi; @@ -429,7 +428,7 @@ impl NodeCommand { self.chain.clone(), prune_config.block_interval, prune_config.parts, - BatchSizes::default(), + self.chain.prune_batch_sizes, ) }); diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index cc54312553ef..15dcb42f1adc 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -283,14 +283,14 @@ impl Default for IndexHistoryConfig { #[serde(default)] pub struct PruneConfig { /// Minimum pruning interval measured in blocks. - pub block_interval: u64, + pub block_interval: usize, /// Pruning configuration for every part of the data that can be pruned. pub parts: PruneModes, } impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 10, parts: PruneModes::default() } + Self { block_interval: 5, parts: PruneModes::default() } } } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 78b3825e6b39..2aafd028da3f 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -20,12 +20,12 @@ use reth_interfaces::{ test_utils::{NoopFullBlockClient, TestConsensus}, }; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::{BlockNumber, ChainSpec, PruneModes, H256, U256}; +use reth_primitives::{BlockNumber, ChainSpec, PruneBatchSizes, PruneModes, H256, U256}; use reth_provider::{ providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, ExecutorFactory, ProviderFactory, StateProvider, }; -use reth_prune::{BatchSizes, Pruner}; +use reth_prune::Pruner; use reth_revm::Factory; use reth_rpc_types::engine::{ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_stages::{ @@ -470,7 +470,7 @@ where self.base_config.chain_spec.clone(), 5, PruneModes::default(), - BatchSizes::default(), + PruneBatchSizes::default(), ); let (mut engine, handle) = BeaconConsensusEngine::new( diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index f82d93052c22..6b4bfffa0d2d 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -7,7 +7,7 @@ use crate::{ header::Head, proofs::genesis_state_root, Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, - SealedHeader, H160, H256, U256, + PruneBatchSizes, SealedHeader, H160, H256, U256, }; use hex_literal::hex; use once_cell::sync::Lazy; @@ -63,7 +63,8 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 11052984, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), - ..Default::default() + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::mainnet(), } .into() }); @@ -104,7 +105,8 @@ pub static GOERLI: Lazy> = Lazy::new(|| { 4367322, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), - ..Default::default() + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::testnet(), } .into() }); @@ -149,7 +151,8 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { 1273020, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), - ..Default::default() + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::testnet(), } .into() }); @@ -203,7 +206,7 @@ pub struct BaseFeeParams { } impl BaseFeeParams { - /// Get the base fee parameters for ethereum mainnet + /// Get the base fee parameters for Ethereum mainnet pub const fn ethereum() -> BaseFeeParams { BaseFeeParams { max_change_denominator: EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, @@ -247,12 +250,18 @@ pub struct ChainSpec { /// The active hard forks and their activation conditions pub hardforks: BTreeMap, - /// The deposit contract deployed for PoS. + /// The deposit contract deployed for PoS #[serde(skip, default)] pub deposit_contract: Option, /// The parameters that configure how a block's base fee is computed pub base_fee_params: BaseFeeParams, + + /// The batch sizes for pruner, per block. In the actual pruner run it will be multiplied by + /// the amount of blocks between pruner runs to account for the difference in amount of new + /// data coming in. + #[serde(default)] + pub prune_batch_sizes: PruneBatchSizes, } impl Default for ChainSpec { @@ -266,6 +275,7 @@ impl Default for ChainSpec { hardforks: Default::default(), deposit_contract: Default::default(), base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: Default::default(), } } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 898ccc40af8b..22435f436b3a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -81,8 +81,8 @@ pub use net::{ }; pub use peer::{PeerId, WithPeerId}; pub use prune::{ - PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, ReceiptsLogPruneConfig, - MINIMUM_PRUNING_DISTANCE, + PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, + ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; diff --git a/crates/primitives/src/prune/batch_sizes.rs b/crates/primitives/src/prune/batch_sizes.rs new file mode 100644 index 000000000000..9498ea627b4b --- /dev/null +++ b/crates/primitives/src/prune/batch_sizes.rs @@ -0,0 +1,83 @@ +use paste::paste; +use serde::{Deserialize, Serialize}; + +/// Batch sizes for configuring the pruner. +/// The batch size for each prune part should be both large enough to prune the data which was +/// generated with each new block, and small enough to not generate an excessive load on the +/// database due to deletion of too many rows at once. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub struct PruneBatchSizes { + /// Maximum number of receipts to prune, per block. + receipts: usize, + /// Maximum number of transaction lookup entries to prune, per block. + transaction_lookup: usize, + /// Maximum number of transaction senders to prune, per block. + transaction_senders: usize, + /// Maximum number of account history entries to prune, per block. + /// Measured in the number of `AccountChangeSet` table rows. + account_history: usize, + /// Maximum number of storage history entries to prune, per block. + /// Measured in the number of `StorageChangeSet` table rows. + storage_history: usize, +} + +macro_rules! impl_prune_batch_size_methods { + ($(($human_name:expr, $name:ident)),+) => { + paste! { + impl PruneBatchSizes { + $( + #[doc = concat!("Maximum number of ", $human_name, " to prune, accounting for the block interval.")] + pub fn $name(&self, block_interval: usize) -> usize { + self.$name * block_interval + } + + #[doc = concat!("Set the maximum number of ", $human_name, " to prune per block.")] + pub fn [](mut self, batch_size: usize) -> Self { + self.$name = batch_size; + self + } + )+ + } + } + }; +} + +impl_prune_batch_size_methods!( + ("receipts", receipts), + ("transaction lookup entries", transaction_lookup), + ("transaction senders", transaction_senders), + ("account history entries", account_history), + ("storage history entries", storage_history) +); + +impl PruneBatchSizes { + /// Default prune batch sizes for Ethereum mainnet. + /// These settings are sufficient to prune more data than generated with each new block. + pub const fn mainnet() -> Self { + Self { + receipts: 250, + transaction_lookup: 250, + transaction_senders: 250, + account_history: 1000, + storage_history: 1000, + } + } + + /// Default prune batch sizes for Ethereum testnets. + /// These settings are sufficient to prune more data than generated with each new block. + pub const fn testnet() -> Self { + Self { + receipts: 100, + transaction_lookup: 100, + transaction_senders: 100, + account_history: 500, + storage_history: 500, + } + } +} + +impl Default for PruneBatchSizes { + fn default() -> Self { + Self::mainnet() + } +} diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 48bdacdb9e89..a2249f1c5b2b 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -1,9 +1,11 @@ +mod batch_sizes; mod checkpoint; mod mode; mod part; mod target; use crate::{Address, BlockNumber}; +pub use batch_sizes::PruneBatchSizes; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; pub use part::{PrunePart, PrunePartError}; diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs index 56999c50c135..9c08cfff461a 100644 --- a/crates/prune/src/lib.rs +++ b/crates/prune/src/lib.rs @@ -4,4 +4,4 @@ mod pruner; use crate::metrics::Metrics; pub use error::PrunerError; -pub use pruner::{BatchSizes, Pruner, PrunerResult, PrunerWithResult}; +pub use pruner::{Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 4332534d39f8..df2e8ce633dc 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -12,8 +12,8 @@ use reth_db::{ BlockNumberList, }; use reth_primitives::{ - BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, - MINIMUM_PRUNING_DISTANCE, + BlockNumber, ChainSpec, PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, + TxNumber, MINIMUM_PRUNING_DISTANCE, }; use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, @@ -31,46 +31,19 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [Pruner::run] pub type PrunerWithResult = (Pruner, PrunerResult); -pub struct BatchSizes { - /// Maximum number of receipts to prune in one run. - receipts: usize, - /// Maximum number of transaction lookup entries to prune in one run. - transaction_lookup: usize, - /// Maximum number of transaction senders to prune in one run. - transaction_senders: usize, - /// Maximum number of account history entries to prune in one run. - /// Measured in the number of [tables::AccountChangeSet] rows. - account_history: usize, - /// Maximum number of storage history entries to prune in one run. - /// Measured in the number of [tables::StorageChangeSet] rows. - storage_history: usize, -} - -impl Default for BatchSizes { - fn default() -> Self { - Self { - receipts: 1000, - transaction_lookup: 1000, - transaction_senders: 1000, - account_history: 1000, - storage_history: 1000, - } - } -} - /// Pruning routine. Main pruning logic happens in [Pruner::run]. pub struct Pruner { metrics: Metrics, provider_factory: ProviderFactory, /// Minimum pruning interval measured in blocks. All prune parts are checked and, if needed, /// pruned, when the chain advances by the specified number of blocks. - min_block_interval: u64, + min_block_interval: usize, /// Last pruned block number. Used in conjunction with `min_block_interval` to determine /// when the pruning needs to be initiated. last_pruned_block_number: Option, modes: PruneModes, - /// Maximum entries to prune per one run, per prune part. - batch_sizes: BatchSizes, + /// Maximum entries to prune per block, per prune part. + batch_sizes: PruneBatchSizes, } impl Pruner { @@ -78,9 +51,9 @@ impl Pruner { pub fn new( db: DB, chain_spec: Arc, - min_block_interval: u64, + min_block_interval: usize, modes: PruneModes, - batch_sizes: BatchSizes, + batch_sizes: PruneBatchSizes, ) -> Self { Self { metrics: Metrics::default(), @@ -263,7 +236,8 @@ impl Pruner { // Saturating subtraction is needed for the case when the chain was reverted, meaning // current block number might be less than the previously pruned block number. If // that's the case, no pruning is needed as outdated data is also reverted. - tip_block_number.saturating_sub(last_pruned_block_number) >= self.min_block_interval + tip_block_number.saturating_sub(last_pruned_block_number) >= + self.min_block_interval as u64 }) { debug!( target: "pruner", @@ -372,7 +346,7 @@ impl Pruner { let mut last_pruned_transaction = tx_range_end; let (deleted, done) = provider.prune_table_with_range::( tx_range, - self.batch_sizes.receipts, + self.batch_sizes.receipts(self.min_block_interval), |_| false, |row| last_pruned_transaction = row.0, )?; @@ -490,7 +464,7 @@ impl Pruner { "Calculated block ranges and filtered addresses", ); - let mut limit = self.batch_sizes.receipts; + let mut limit = self.batch_sizes.receipts(self.min_block_interval); let mut done = true; let mut last_pruned_transaction = None; for (start_block, end_block, num_addresses) in block_ranges { @@ -603,7 +577,10 @@ impl Pruner { } } .into_inner(); - let tx_range = start..=(end.min(start + self.batch_sizes.transaction_lookup as u64 - 1)); + let tx_range = start..= + (end.min( + start + self.batch_sizes.transaction_lookup(self.min_block_interval) as u64 - 1, + )); let tx_range_end = *tx_range.end(); // Retrieve transactions in the range and calculate their hashes in parallel @@ -624,7 +601,7 @@ impl Pruner { let mut last_pruned_transaction = tx_range_end; let (deleted, done) = provider.prune_table_with_iterator::( hashes, - self.batch_sizes.transaction_lookup, + self.batch_sizes.transaction_lookup(self.min_block_interval), |row| last_pruned_transaction = row.1, )?; trace!(target: "pruner", %deleted, %done, "Pruned transaction lookup"); @@ -673,7 +650,7 @@ impl Pruner { let mut last_pruned_transaction = tx_range_end; let (deleted, done) = provider.prune_table_with_range::( tx_range, - self.batch_sizes.transaction_senders, + self.batch_sizes.transaction_senders(self.min_block_interval), |_| false, |row| last_pruned_transaction = row.0, )?; @@ -722,7 +699,7 @@ impl Pruner { let mut last_changeset_pruned_block = None; let (rows, done) = provider.prune_table_with_range::( range, - self.batch_sizes.account_history, + self.batch_sizes.account_history(self.min_block_interval), |_| false, |row| last_changeset_pruned_block = Some(row.0), )?; @@ -778,7 +755,7 @@ impl Pruner { let mut last_changeset_pruned_block = None; let (rows, done) = provider.prune_table_with_range::( BlockNumberAddress::range(range), - self.batch_sizes.storage_history, + self.batch_sizes.storage_history(self.min_block_interval), |_| false, |row| last_changeset_pruned_block = Some(row.0.block_number()), )?; @@ -916,7 +893,7 @@ impl Pruner { #[cfg(test)] mod tests { - use crate::{pruner::BatchSizes, Pruner}; + use crate::Pruner; use assert_matches::assert_matches; use itertools::{ FoldWhile::{Continue, Done}, @@ -934,8 +911,8 @@ mod tests { }, }; use reth_primitives::{ - BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, ReceiptsLogPruneConfig, - TxNumber, H256, MAINNET, + BlockNumber, PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, + ReceiptsLogPruneConfig, TxNumber, H256, MAINNET, }; use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_stages::test_utils::TestTransaction; @@ -945,14 +922,14 @@ mod tests { fn is_pruning_needed() { let db = create_test_rw_db(); let pruner = - Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), BatchSizes::default()); + Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), PruneBatchSizes::default()); // No last pruned block number was set before let first_block_number = 1; assert!(pruner.is_pruning_needed(first_block_number)); // Delta is not less than min block interval - let second_block_number = first_block_number + pruner.min_block_interval; + let second_block_number = first_block_number + pruner.min_block_interval as u64; assert!(pruner.is_pruning_needed(second_block_number)); // Delta is less than min block interval @@ -991,13 +968,10 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { receipts: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - receipts: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_receipts(10), ); let next_tx_number_to_prune = tx @@ -1008,11 +982,12 @@ mod tests { .map(|tx_number| tx_number + 1) .unwrap_or_default(); - let last_pruned_tx_number = blocks - .iter() - .map(|block| block.body.len()) - .sum::() - .min(next_tx_number_to_prune as usize + pruner.batch_sizes.receipts - 1); + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + + pruner.batch_sizes.receipts(pruner.min_block_interval) - + 1, + ); let last_pruned_block_number = blocks .iter() @@ -1087,13 +1062,10 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { transaction_lookup: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - transaction_lookup: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_transaction_lookup(10), ); let next_tx_number_to_prune = tx @@ -1106,7 +1078,9 @@ mod tests { let last_pruned_tx_number = blocks.iter().map(|block| block.body.len()).sum::().min( - next_tx_number_to_prune as usize + pruner.batch_sizes.transaction_lookup - 1, + next_tx_number_to_prune as usize + + pruner.batch_sizes.transaction_lookup(pruner.min_block_interval) - + 1, ); let last_pruned_block_number = blocks @@ -1185,13 +1159,10 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { sender_recovery: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - transaction_senders: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_transaction_senders(10), ); let next_tx_number_to_prune = tx @@ -1204,7 +1175,9 @@ mod tests { let last_pruned_tx_number = blocks.iter().map(|block| block.body.len()).sum::().min( - next_tx_number_to_prune as usize + pruner.batch_sizes.transaction_senders - 1, + next_tx_number_to_prune as usize + + pruner.batch_sizes.transaction_senders(pruner.min_block_interval) - + 1, ); let last_pruned_block_number = blocks @@ -1292,13 +1265,10 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { account_history: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - account_history: 2000, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_account_history(2000), ); let provider = tx.inner_rw(); @@ -1320,7 +1290,7 @@ mod tests { .iter() .enumerate() .skip_while(|(i, (block_number, _))| { - *i < pruner.batch_sizes.account_history * run && + *i < pruner.batch_sizes.account_history(pruner.min_block_interval) * run && *block_number <= to_block as usize }) .next() @@ -1422,13 +1392,10 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { storage_history: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - storage_history: 2000, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_storage_history(2000), ); let provider = tx.inner_rw(); @@ -1452,7 +1419,7 @@ mod tests { .iter() .enumerate() .skip_while(|(i, (block_number, _, _))| { - *i < pruner.batch_sizes.storage_history * run && + *i < pruner.batch_sizes.storage_history(pruner.min_block_interval) * run && *block_number <= to_block as usize }) .next() @@ -1564,11 +1531,8 @@ mod tests { receipts_log_filter: receipts_log_filter.clone(), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - receipts: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_storage_history(10), ); let result = pruner.prune_receipts_by_logs(&provider, tip); From cb6cf590685596ca1e7255189f50ea01ec272dcb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 21:07:47 +0100 Subject: [PATCH 512/722] feat(grafana): filter by instance, bump version (#4333) --- etc/grafana/dashboards/overview.json | 265 ++++++++++++++++++++++----- 1 file changed, 219 insertions(+), 46 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 39f6f07592bc..01e9e67e36ba 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -27,7 +27,13 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "10.0.2" + "version": "10.1.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph (old)", + "version": "" }, { "type": "panel", @@ -159,7 +165,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -226,7 +232,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -270,6 +276,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -361,6 +368,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -470,6 +478,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -609,7 +618,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -727,6 +736,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1006,7 +1016,7 @@ }, "showHeader": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -1066,6 +1076,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1207,6 +1218,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1300,6 +1312,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1418,6 +1431,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1718,6 +1732,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1825,6 +1840,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1980,6 +1996,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2098,6 +2115,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2218,6 +2236,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2417,6 +2436,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2532,6 +2552,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2637,6 +2658,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2776,6 +2798,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2894,6 +2917,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3011,6 +3035,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3148,6 +3173,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3242,6 +3268,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3389,6 +3416,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3549,6 +3577,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3643,6 +3672,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3737,6 +3767,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3846,6 +3877,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3939,6 +3971,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4044,6 +4077,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4152,6 +4186,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4245,6 +4280,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4338,6 +4374,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4443,6 +4480,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4503,7 +4541,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_active", + "expr": "reth_jemalloc_active{instance=~\"$instance\"}", "instant": false, "legendFormat": "Active", "range": true, @@ -4515,7 +4553,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_allocated", + "expr": "reth_jemalloc_allocated{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Allocated", @@ -4528,7 +4566,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_mapped", + "expr": "reth_jemalloc_mapped{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Mapped", @@ -4541,7 +4579,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_metadata", + "expr": "reth_jemalloc_metadata{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Metadata", @@ -4554,7 +4592,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_resident", + "expr": "reth_jemalloc_resident{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Resident", @@ -4567,7 +4605,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_retained", + "expr": "reth_jemalloc_retained{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Retained", @@ -4603,6 +4641,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4663,7 +4702,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_process_resident_memory_bytes", + "expr": "reth_process_resident_memory_bytes{instance=~\"$instance\"}", "instant": false, "legendFormat": "Resident", "range": true, @@ -4698,6 +4737,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4757,8 +4797,8 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "avg(rate(reth_process_cpu_seconds_total[1m]))", + "editorMode": "builder", + "expr": "avg(rate(reth_process_cpu_seconds_total{instance=~\"$instance\"}[1m]))", "instant": false, "legendFormat": "Process", "range": true, @@ -4793,6 +4833,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4852,8 +4893,8 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_process_open_fds", + "editorMode": "builder", + "expr": "reth_process_open_fds{instance=~\"$instance\"}", "instant": false, "legendFormat": "Open", "range": true, @@ -4900,6 +4941,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4995,6 +5037,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5074,60 +5117,188 @@ "x": 0, "y": 204 }, - "id": 97, + "id": 108, "panels": [], "title": "RPC server", "type": "row" }, { - "title": "Active Requests", - "description": "The number of active requests.", - "type": "graph", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "The number of active requests.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 205 + }, + "hiddenSeries": false, + "id": 109, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "reth_rpc_server_requests_started - reth_rpc_server_requests_finished", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_rpc_server_requests_started{instance=~\"$instance\"} - reth_rpc_server_requests_finished{instance=~\"$instance\"}", "format": "time_series", "legendFormat": "Active Requests", + "range": true, "refId": "A" } ], - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 205 + "thresholds": [], + "timeRegions": [], + "title": "Active Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false } }, { - "title": "Active Websocket Connections", - "description": "The number of active websocket connections.", - "type": "graph", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "The number of active websocket connections.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 205 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "reth_rpc_server_ws_session_opened - reth_rpc_server_ws_session_closed", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_rpc_server_ws_session_opened{instance=~\"$instance\"} - reth_rpc_server_ws_session_closed{instance=~\"$instance\"}", "format": "time_series", "legendFormat": "Active Websocket Connections", + "range": true, "refId": "A" } ], - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 205 + "thresholds": [], + "timeRegions": [], + "title": "Active Websocket Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false } }, { - "title": "Request Latency time", - "type": "heatmap", "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -5154,7 +5325,7 @@ "x": 0, "y": 213 }, - "id": 42, + "id": 111, "maxDataPoints": 25, "options": { "calculate": false, @@ -5196,7 +5367,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -5212,11 +5383,11 @@ "range": true, "refId": "A" } - ] + ], + "title": "Request Latency time", + "type": "heatmap" }, { - "title": "Call Latency time", - "type": "heatmap", "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -5243,7 +5414,7 @@ "x": 12, "y": 213 }, - "id": 42, + "id": 112, "maxDataPoints": 25, "options": { "calculate": false, @@ -5285,7 +5456,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -5301,7 +5472,9 @@ "range": true, "refId": "A" } - ] + ], + "title": "Call Latency time", + "type": "heatmap" } ], "refresh": "30s", @@ -5343,6 +5516,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 4, + "version": 5, "weekStart": "" } \ No newline at end of file From fca6404fc794f2ca967eb11ddcf02bb18b5126e5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 23:08:14 +0100 Subject: [PATCH 513/722] feat(book): basic pruning chapter (#4334) --- book/SUMMARY.md | 1 + book/run/pruning.md | 60 ++++++++++++++++++++++++++++++++++++++++++ book/run/run-a-node.md | 1 + 3 files changed, 62 insertions(+) create mode 100644 book/run/pruning.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 6c061cebd7ab..9a8b903153a8 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -13,6 +13,7 @@ 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) 1. [Transaction types](./run/transactions.md) + 1. [Pruning](./run/pruning.md) 1. [Troubleshooting](./run/troubleshooting.md) 1. [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) 1. [eth](./jsonrpc/eth.md) diff --git a/book/run/pruning.md b/book/run/pruning.md new file mode 100644 index 000000000000..81c55e8261ce --- /dev/null +++ b/book/run/pruning.md @@ -0,0 +1,60 @@ +# Pruning + +> WARNING: pruning and full node are experimental features of Reth, +> and available only on `main` branch of the main repository now. + +By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks +available for querying and tracing. + +Reth also supports pruning of historical data and running as a full node. This chapter will walk through +the steps for running Reth as a full node, what caveats to expect and how to configure your own pruned node. + +## Basic concepts + +- Archive node – Reth node that has all historical data from genesis. +- Pruned node – Reth node that has its historical data pruned partially or fully through +a [custom configuration](./config.md#the-prune-section). +- Full Node – Reth node that has the latest state and historical data for only the last 128 blocks available +for querying in the same way as an archive node. + +The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. + +## Modes +### Archive Node + +Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). + +### Full Node + +To run Reth as a full node, follow the steps from the previous chapter on +[how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: +```bash +RUST_LOG=info reth node \ + --full \ + --authrpc.jwtsecret /path/to/secret \ + --authrpc.addr 127.0.0.1 \ + --authrpc.port 8551 +``` + +### Pruned Node + +To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section), +modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from +the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). + +## RPC support + +As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several parts +which can be pruned independently of each other: +- Sender Recovery +- Transaction Lookup +- Receipts +- Account History +- Storage History + +Pruning of each of these parts disables different RPC methods, because the historical data or lookup indexes +become unavailable. + +> TODO: `prune parts / RPC methods` table that shows which RPCs becomes unavailable when certain parts of the data +> are pruned \ No newline at end of file diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 570b28d80236..54ffe873fa64 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -7,6 +7,7 @@ In this chapter we'll go through a few different topics you'll encounter when ru 1. [Logs and Observability](./observability.md) 1. [Configuring reth.toml](./config.md) 1. [Transaction types](./transactions.md) +1. [Pruning](./pruning.md) 1. [Troubleshooting](./troubleshooting.md) In the future, we also intend to support the [OP Stack](https://stack.optimism.io/docs/understand/explainer/), which will allow you to run Reth as a Layer 2 client. More there soon! From bfa130d1875f1e1510e181e09fced33578d70563 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 23 Aug 2023 23:08:27 +0100 Subject: [PATCH 514/722] feat(bin): unhide and enable full flag (#4327) --- bin/reth/src/args/pruning_args.rs | 11 ++++------- book/cli/node.md | 4 ++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index 4b288a4e45e1..ccb6dda741fa 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -13,30 +13,27 @@ use std::sync::Arc; pub struct PruningArgs { /// Run full node. Only the most recent 128 block states are stored. This flag takes /// priority over pruning configuration in reth.toml. - // TODO(alexey): unhide when pruning is ready for production use - #[arg(long, hide = true, default_value_t = false)] + #[arg(long, default_value_t = false)] pub full: bool, } impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, _chain_spec: Arc) -> eyre::Result> { + pub fn prune_config(&self, chain_spec: Arc) -> eyre::Result> { Ok(if self.full { - eyre::bail!("full node is not supported yet, keep an eye on next releases"); - #[allow(unreachable_code)] Some(PruneConfig { block_interval: 5, parts: PruneModes { sender_recovery: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), transaction_lookup: None, - receipts: _chain_spec + receipts: chain_spec .deposit_contract .as_ref() .map(|contract| PruneMode::Before(contract.block)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), receipts_log_filter: ReceiptsLogPruneConfig( - _chain_spec + chain_spec .deposit_contract .as_ref() .map(|contract| (contract.address, PruneMode::Before(contract.block))) diff --git a/book/cli/node.md b/book/cli/node.md index d0826f9b214e..2aa71d1ccefa 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -332,6 +332,10 @@ Dev testnet: Parses strings using [humantime::parse_duration] --dev.block-time 12s + +Pruning: + --full + Run full node. Only the most recent 128 block states are stored. This flag takes priority over pruning configuration in reth.toml Logging: --log.persistent From b473f20279141865a1571694ea3fd67747eb34fa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 14:23:03 +0200 Subject: [PATCH 515/722] chore(deps): bump enr 0.9 (#4230) --- Cargo.lock | 158 +++++++++++++++------------------- Cargo.toml | 9 +- crates/net/discv4/Cargo.toml | 2 +- crates/net/dns/Cargo.toml | 2 +- crates/net/network/Cargo.toml | 4 +- 5 files changed, 77 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c12a766d326f..a2375940284d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -755,7 +755,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "num_enum 0.7.0", + "num_enum", "once_cell", "pollster", "rand 0.8.5", @@ -974,6 +974,20 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cargo_metadata" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.18", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cassowary" version = "0.3.0" @@ -1238,6 +1252,18 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "const-hex" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca268df6cd88e646b564e6aff1a016834e5f42077c736ef6b6789c31ef9ec5dc" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "serde", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -1821,7 +1847,7 @@ dependencies = [ "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.0", + "enr", "fnv", "futures", "hashlink", @@ -1997,25 +2023,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" -[[package]] -name = "enr" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" -dependencies = [ - "base64 0.13.1", - "bytes", - "hex", - "k256", - "log", - "rand 0.8.5", - "rlp", - "secp256k1", - "serde", - "sha3", - "zeroize", -] - [[package]] name = "enr" version = "0.9.0" @@ -2030,6 +2037,7 @@ dependencies = [ "log", "rand 0.8.5", "rlp", + "secp256k1", "serde", "serde-hex", "sha3", @@ -2194,17 +2202,16 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4719a44c3d37ab07c6dea99ab174068d8c35e441b60b6c20ce4e48357273e8" +checksum = "02bb80fd2c22631a5eb8a02cbf373cc5fd86937fc966bb670b9a884580c8e71c" dependencies = [ + "const-hex", "ethers-contract-abigen", "ethers-contract-derive", "ethers-core", "ethers-providers", - "ethers-signers", "futures-util", - "hex", "once_cell", "pin-project", "serde", @@ -2214,15 +2221,15 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "155ea1b84d169d231317ed86e307af6f2bed6b40dd17e5e94bc84da21cadb21c" +checksum = "22c54db0d393393e732a5b20273e4f8ab89f0cce501c84e75fab9c126799a6e6" dependencies = [ "Inflector", + "const-hex", "dunce", "ethers-core", "eyre", - "hex", "prettyplease", "proc-macro2 1.0.66", "quote 1.0.33", @@ -2236,14 +2243,14 @@ dependencies = [ [[package]] name = "ethers-contract-derive" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8567ff196c4a37c1a8c90ec73bda0ad2062e191e4f0a6dc4d943e2ec4830fc88" +checksum = "62ee4f216184a1304b707ed258f4f70aa40bf7e1522ab8963d127a8d516eaa1a" dependencies = [ "Inflector", + "const-hex", "ethers-contract-abigen", "ethers-core", - "hex", "proc-macro2 1.0.66", "quote 1.0.33", "serde_json", @@ -2252,20 +2259,20 @@ dependencies = [ [[package]] name = "ethers-core" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60ca2514feb98918a0a31de7e1983c29f2267ebf61b2dc5d4294f91e5b866623" +checksum = "8c29523f73c12753165781c6e5dc11c84d3e44c080a15f7c6cfbd70b514cb6f1" dependencies = [ "arrayvec", "bytes", - "cargo_metadata", + "cargo_metadata 0.17.0", "chrono", + "const-hex", "elliptic-curve", "ethabi", "generic-array", - "hex", "k256", - "num_enum 0.6.1", + "num_enum", "once_cell", "open-fastrlp", "rand 0.8.5", @@ -2282,9 +2289,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22b3a8269d3df0ed6364bc05b4735b95f4bf830ce3aef87d5e760fb0e93e5b91" +checksum = "4aab5af432b3fe5b7756b60df5c9ddeb85a13414575ad8a9acd707c24f0a77a5" dependencies = [ "ethers-core", "reqwest", @@ -2297,9 +2304,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c339aad74ae5c451d27e0e49c7a3c7d22620b119b4f9291d7aa21f72d7f366" +checksum = "356151d5ded56d4918146366abc9dfc9df367cf0096492a7a5477b21b7693615" dependencies = [ "async-trait", "auto_impl", @@ -2324,24 +2331,25 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b411b119f1cf0efb69e2190883dee731251882bb21270f893ee9513b3a697c48" +checksum = "00c84664b294e47fc2860d6db0db0246f79c4c724e552549631bb9505b834bee" dependencies = [ "async-trait", "auto_impl", "base64 0.21.2", "bytes", - "enr 0.8.1", + "const-hex", + "enr", "ethers-core", "futures-channel", "futures-core", "futures-timer", "futures-util", "hashers", - "hex", "http", "instant", + "jsonwebtoken", "once_cell", "pin-project", "reqwest", @@ -2361,17 +2369,17 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4864d387456a9c09a1157fa10e1528b29d90f1d859443acf06a1b23365fb518c" +checksum = "170b299698702ef1f53d2275af7d6d97409cfa4f9398ee9ff518f6bc9102d0ad" dependencies = [ "async-trait", "coins-bip32", "coins-bip39", + "const-hex", "elliptic-curve", "eth-keystore", "ethers-core", - "hex", "rand 0.8.5", "sha2 0.10.7", "thiserror", @@ -3390,7 +3398,7 @@ dependencies = [ "socket2 0.5.3", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -4205,34 +4213,13 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" -dependencies = [ - "num_enum_derive 0.6.1", -] - [[package]] name = "num_enum" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" dependencies = [ - "num_enum_derive 0.7.0", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.29", + "num_enum_derive", ] [[package]] @@ -5148,9 +5135,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ "base64 0.21.2", "bytes", @@ -5177,7 +5164,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -5438,7 +5425,7 @@ name = "reth-discv4" version = "0.1.0-alpha.7" dependencies = [ "discv5", - "enr 0.8.1", + "enr", "generic-array", "hex", "parking_lot 0.12.1", @@ -5463,7 +5450,7 @@ version = "0.1.0-alpha.7" dependencies = [ "async-trait", "data-encoding", - "enr 0.8.1", + "enr", "linked_hash_set", "parking_lot 0.12.1", "reth-net-common", @@ -5702,7 +5689,7 @@ dependencies = [ "aquamarine", "async-trait", "auto_impl", - "enr 0.8.1", + "enr", "ethers-core", "ethers-middleware", "ethers-providers", @@ -7360,7 +7347,7 @@ version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0528a7ad0bc85f826aa831434a37833aea622a5ae155f5b5dd431b25244213" dependencies = [ - "cargo_metadata", + "cargo_metadata 0.15.4", "proc-macro2 1.0.66", "quote 1.0.33", "serde", @@ -7563,9 +7550,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec509ac96e9a0c43427c74f003127d953a265737636129424288d27cb5c4b12c" +checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" dependencies = [ "futures-util", "log", @@ -7941,9 +7928,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" +checksum = "e862a1c4128df0112ab625f55cd5c934bcb4312ba80b39ae4b4835a3fd58e649" dependencies = [ "byteorder", "bytes", @@ -8461,15 +8448,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.50.0" diff --git a/Cargo.toml b/Cargo.toml index 1c1c7d9862b0..f9389a0f1abe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,10 +106,10 @@ reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat"} ## eth -ethers-core = { version = "2.0.8", default-features = false } -ethers-providers = { version = "2.0.8", default-features = false } -ethers-signers = { version = "2.0.8", default-features = false } -ethers-middleware = { version = "2.0.8", default-features = false } +ethers-core = { version = "2.0", default-features = false } +ethers-providers = { version = "2.0", default-features = false } +ethers-signers = { version = "2.0", default-features = false } +ethers-middleware = { version = "2.0", default-features = false } ## misc bytes = "1.4" @@ -148,6 +148,7 @@ jsonrpsee-types = { version = "0.20" } ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } +enr = { version = "0.9", default-features = false, features = ["k256"] } # for eip-4844 c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index bd9a1f95dc54..310da6c8a61b 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,7 +21,7 @@ reth-net-nat = { path = "../nat" } # ethereum discv5 = { git = "https://github.com/sigp/discv5" } secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1"] } +enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index eeb6d81fd7a7..b4ef53c89473 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -16,7 +16,7 @@ reth-rlp.workspace = true # ethereum secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1"] } +enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index a4acb5f06589..8ac11a46a167 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -62,7 +62,7 @@ linked-hash-map = "0.5.6" rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -enr = { version = "0.8.1", features = ["rust-secp256k1"], optional = true } +enr = { workspace = true, features = ["rust-secp256k1"], optional = true } ethers-core = { workspace = true, default-features = false, optional = true } tempfile = { version = "3.3", optional = true } @@ -84,7 +84,7 @@ ethers-providers = { workspace = true, default-features = false, features = ["ws ethers-signers = { workspace = true, default-features = false } ethers-middleware = { workspace = true, default-features = false } -enr = { version = "0.8.1", features = ["serde", "rust-secp256k1"] } +enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc hex = "0.4" From c2436a955f40457fd1888fbacebaeee35af9e60f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 24 Aug 2023 15:14:51 +0200 Subject: [PATCH 516/722] feat(rpc): add missing `DebugApi` methods (#4321) --- crates/rpc/rpc-api/src/debug.rs | 240 +++++++++++++++++++++++++++++++- crates/rpc/rpc/src/debug.rs | 211 +++++++++++++++++++++++++++- 2 files changed, 449 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 880fa82c10e2..8422a1753502 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,5 +1,5 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H256}; +use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H160, H256}; use reth_rpc_types::{ trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, @@ -127,4 +127,242 @@ pub trait DebugApi { /// be printed to stderr. #[method(name = "backtraceAt")] async fn debug_backtrace_at(&self, location: &str) -> RpcResult<()>; + + /// Enumerates all accounts at a given block with paging capability. `maxResults` are returned + /// in the page and the items have keys that come after the `start` key (hashed address). + /// + /// If incompletes is false, then accounts for which the key preimage (i.e: the address) doesn't + /// exist in db are skipped. NB: geth by default does not store preimages. + #[method(name = "accountRange")] + async fn debug_account_range( + &self, + block_number: BlockNumberOrTag, + start: Bytes, + max_results: u64, + nocode: bool, + nostorage: bool, + incompletes: bool, + ) -> RpcResult<()>; + + /// Turns on block profiling for the given duration and writes profile data to disk. It uses a + /// profile rate of 1 for most accurate information. If a different rate is desired, set the + /// rate and write the profile manually using `debug_writeBlockProfile`. + #[method(name = "blockProfile")] + async fn debug_block_profile(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Flattens the entire key-value database into a single level, removing all unused slots and + /// merging all keys. + #[method(name = "chaindbCompact")] + async fn debug_chaindb_compact(&self) -> RpcResult<()>; + + /// Returns leveldb properties of the key-value database. + #[method(name = "chaindbProperty")] + async fn debug_chaindb_property(&self, property: String) -> RpcResult<()>; + + /// Turns on CPU profiling for the given duration and writes profile data to disk. + #[method(name = "cpuProfile")] + async fn debug_cpu_profile(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Retrieves an ancient binary blob from the freezer. The freezer is a collection of + /// append-only immutable files. The first argument `kind` specifies which table to look up data + /// from. The list of all table kinds are as follows: + #[method(name = "dbAncient")] + async fn debug_db_ancient(&self, kind: String, number: u64) -> RpcResult<()>; + + /// Returns the number of ancient items in the ancient store. + #[method(name = "dbAncients")] + async fn debug_db_ancients(&self) -> RpcResult<()>; + + /// Returns the raw value of a key stored in the database. + #[method(name = "dbGet")] + async fn debug_db_get(&self, key: String) -> RpcResult<()>; + + /// Retrieves the state that corresponds to the block number and returns a list of accounts + /// (including storage and code). + #[method(name = "dumpBlock")] + async fn debug_dump_block(&self, number: BlockId) -> RpcResult<()>; + + /// Forces garbage collection. + #[method(name = "freeOSMemory")] + async fn debug_free_os_memory(&self) -> RpcResult<()>; + + /// Forces a temporary client freeze, normally when the server is overloaded. + #[method(name = "freezeClient")] + async fn debug_freeze_client(&self, node: String) -> RpcResult<()>; + + /// Returns garbage collection statistics. + #[method(name = "gcStats")] + async fn debug_gc_stats(&self) -> RpcResult<()>; + + /// Returns the first number where the node has accessible state on disk. This is the + /// post-state of that block and the pre-state of the next block. The (from, to) parameters + /// are the sequence of blocks to search, which can go either forwards or backwards. + /// + /// Note: to get the last state pass in the range of blocks in reverse, i.e. (last, first). + #[method(name = "getAccessibleState")] + async fn debug_get_accessible_state( + &self, + from: BlockNumberOrTag, + to: BlockNumberOrTag, + ) -> RpcResult<()>; + + /// Returns all accounts that have changed between the two blocks specified. A change is defined + /// as a difference in nonce, balance, code hash, or storage hash. With one parameter, returns + /// the list of accounts modified in the specified block. + #[method(name = "getModifiedAccountsByHash")] + async fn debug_get_modified_accounts_by_hash( + &self, + start_hash: H256, + end_hash: H256, + ) -> RpcResult<()>; + + /// Returns all accounts that have changed between the two blocks specified. A change is defined + /// as a difference in nonce, balance, code hash or storage hash. + #[method(name = "getModifiedAccountsByNumber")] + async fn debug_get_modified_accounts_by_number( + &self, + start_number: u64, + end_number: u64, + ) -> RpcResult<()>; + + /// Turns on Go runtime tracing for the given duration and writes trace data to disk. + #[method(name = "goTrace")] + async fn debug_go_trace(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Executes a block (bad- or canon- or side-), and returns a list of intermediate roots: the + /// stateroot after each transaction. + #[method(name = "intermediateRoots")] + async fn debug_intermediate_roots( + &self, + block_hash: H256, + opts: Option, + ) -> RpcResult<()>; + + /// Returns detailed runtime memory statistics. + #[method(name = "memStats")] + async fn debug_mem_stats(&self) -> RpcResult<()>; + + /// Turns on mutex profiling for `nsec` seconds and writes profile data to file. It uses a + /// profile rate of 1 for most accurate information. If a different rate is desired, set the + /// rate and write the profile manually. + #[method(name = "mutexProfile")] + async fn debug_mutex_profile(&self, file: String, nsec: u64) -> RpcResult<()>; + + /// Returns the preimage for a sha3 hash, if known. + #[method(name = "preimage")] + async fn debug_preimage(&self, hash: H256) -> RpcResult<()>; + + /// Retrieves a block and returns its pretty printed form. + #[method(name = "printBlock")] + async fn debug_print_block(&self, number: u64) -> RpcResult<()>; + + /// Fetches and retrieves the seed hash of the block by number. + #[method(name = "seedHash")] + async fn debug_seed_hash(&self, number: u64) -> RpcResult; + + /// Sets the rate (in samples/sec) of goroutine block profile data collection. A non-zero rate + /// enables block profiling, setting it to zero stops the profile. Collected profile data can be + /// written using `debug_writeBlockProfile`. + #[method(name = "setBlockProfileRate")] + async fn debug_set_block_profile_rate(&self, rate: u64) -> RpcResult<()>; + + /// Sets the garbage collection target percentage. A negative value disables garbage collection. + #[method(name = "setGCPercent")] + async fn debug_set_gc_percent(&self, v: i32) -> RpcResult<()>; + + /// Sets the current head of the local chain by block number. Note, this is a destructive action + /// and may severely damage your chain. Use with extreme caution. + #[method(name = "setHead")] + async fn debug_set_head(&self, number: u64) -> RpcResult<()>; + + /// Sets the rate of mutex profiling. + #[method(name = "setMutexProfileFraction")] + async fn debug_set_mutex_profile_fraction(&self, rate: i32) -> RpcResult<()>; + + /// Configures how often in-memory state tries are persisted to disk. The interval needs to be + /// in a format parsable by a time.Duration. Note that the interval is not wall-clock time. + /// Rather it is accumulated block processing time after which the state should be flushed. + #[method(name = "setTrieFlushInterval")] + async fn debug_set_trie_flush_interval(&self, interval: String) -> RpcResult<()>; + + /// Returns a printed representation of the stacks of all goroutines. + #[method(name = "stacks")] + async fn debug_stacks(&self) -> RpcResult<()>; + + /// Used to obtain info about a block. + #[method(name = "standardTraceBadBlockToFile")] + async fn debug_standard_trace_bad_block_to_file( + &self, + block: BlockNumberOrTag, + opts: Option, + ) -> RpcResult<()>; + + /// This method is similar to `debug_standardTraceBlockToFile`, but can be used to obtain info + /// about a block which has been rejected as invalid (for some reason). + #[method(name = "standardTraceBlockToFile")] + async fn debug_standard_trace_block_to_file( + &self, + block: BlockNumberOrTag, + opts: Option, + ) -> RpcResult<()>; + + /// Turns on CPU profiling indefinitely, writing to the given file. + #[method(name = "startCPUProfile")] + async fn debug_start_cpu_profile(&self, file: String) -> RpcResult<()>; + + /// Starts writing a Go runtime trace to the given file. + #[method(name = "startGoTrace")] + async fn debug_start_go_trace(&self, file: String) -> RpcResult<()>; + + /// Stops an ongoing CPU profile. + #[method(name = "stopCPUProfile")] + async fn debug_stop_cpu_profile(&self) -> RpcResult<()>; + + /// Stops writing the Go runtime trace. + #[method(name = "stopGoTrace")] + async fn debug_stop_go_trace(&self) -> RpcResult<()>; + + /// Returns the storage at the given block height and transaction index. The result can be + /// paged by providing a `maxResult` to cap the number of storage slots returned as well as + /// specifying the offset via `keyStart` (hash of storage key). + #[method(name = "storageRangeAt")] + async fn debug_storage_range_at( + &self, + block_hash: H256, + tx_idx: usize, + contract_address: H160, + key_start: H256, + max_result: u64, + ) -> RpcResult<()>; + + /// Returns the structured logs created during the execution of EVM against a block pulled + /// from the pool of bad ones and returns them as a JSON object. For the second parameter see + /// TraceConfig reference. + #[method(name = "traceBadBlock")] + async fn debug_trace_bad_block( + &self, + block_hash: H256, + opts: Option, + ) -> RpcResult<()>; + + /// Sets the logging verbosity ceiling. Log messages with level up to and including the given + /// level will be printed. + #[method(name = "verbosity")] + async fn debug_verbosity(&self, level: usize) -> RpcResult<()>; + + /// Sets the logging verbosity pattern. + #[method(name = "vmodule")] + async fn debug_vmodule(&self, pattern: String) -> RpcResult<()>; + + /// Writes a goroutine blocking profile to the given file. + #[method(name = "writeBlockProfile")] + async fn debug_write_block_profile(&self, file: String) -> RpcResult<()>; + + /// Writes an allocation profile to the given file. + #[method(name = "writeMemProfile")] + async fn debug_write_mem_profile(&self, file: String) -> RpcResult<()>; + + /// Writes a goroutine blocking profile to the given file. + #[method(name = "writeMutexProfile")] + async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>; } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 7e987ca9c70f..a18a5cf6ff06 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -12,7 +12,9 @@ use crate::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_primitives::{Account, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSigned, H256}; +use reth_primitives::{ + Account, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSigned, H160, H256, +}; use reth_provider::{BlockReaderIdExt, HeaderProvider, StateProviderBox}; use reth_revm::{ database::{State, SubState}, @@ -653,6 +655,213 @@ where Ok(()) } + async fn debug_account_range( + &self, + _block_number: BlockNumberOrTag, + _start: Bytes, + _max_results: u64, + _nocode: bool, + _nostorage: bool, + _incompletes: bool, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_block_profile(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_chaindb_compact(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_chaindb_property(&self, _property: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_cpu_profile(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_ancient(&self, _kind: String, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_ancients(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_get(&self, _key: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_dump_block(&self, _number: BlockId) -> RpcResult<()> { + Ok(()) + } + + async fn debug_free_os_memory(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_freeze_client(&self, _node: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_gc_stats(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_accessible_state( + &self, + _from: BlockNumberOrTag, + _to: BlockNumberOrTag, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_modified_accounts_by_hash( + &self, + _start_hash: H256, + _end_hash: H256, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_modified_accounts_by_number( + &self, + _start_number: u64, + _end_number: u64, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_go_trace(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_intermediate_roots( + &self, + _block_hash: H256, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_mem_stats(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_mutex_profile(&self, _file: String, _nsec: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_preimage(&self, _hash: H256) -> RpcResult<()> { + Ok(()) + } + + async fn debug_print_block(&self, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_seed_hash(&self, _number: u64) -> RpcResult { + Ok(Default::default()) + } + + async fn debug_set_block_profile_rate(&self, _rate: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_gc_percent(&self, _v: i32) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_head(&self, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_mutex_profile_fraction(&self, _rate: i32) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_trie_flush_interval(&self, _interval: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stacks(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_standard_trace_bad_block_to_file( + &self, + _block: BlockNumberOrTag, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_standard_trace_block_to_file( + &self, + _block: BlockNumberOrTag, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_start_cpu_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_start_go_trace(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stop_cpu_profile(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stop_go_trace(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_storage_range_at( + &self, + _block_hash: H256, + _tx_idx: usize, + _contract_address: H160, + _key_start: H256, + _max_result: u64, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_trace_bad_block( + &self, + _block_hash: H256, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_verbosity(&self, _level: usize) -> RpcResult<()> { + Ok(()) + } + + async fn debug_vmodule(&self, _pattern: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_block_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_mem_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_mutex_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self.inner.provider.block_by_id(block_id).to_rpc_result()?; From 2801e686f1e5ab42f3190c20d1a002c4854fb8cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 16:15:28 +0200 Subject: [PATCH 517/722] chore: add TxEip4844::validate_blob (#4345) --- crates/primitives/src/transaction/eip4844.rs | 104 ++++++++++--------- 1 file changed, 57 insertions(+), 47 deletions(-) diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 3db0bd36a1c6..b569b3a4070d 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -105,6 +105,61 @@ impl TxEip4844 { } } + /// Verifies that the given blob data, commitments, and proofs are all valid for this + /// transaction. + /// + /// Takes as input the [KzgSettings], which should contain the the parameters derived from the + /// KZG trusted setup. + /// + /// This ensures that the blob transaction payload has the same number of blob data elements, + /// commitments, and proofs. Each blob data element is verified against its commitment and + /// proof. + /// + /// Returns `false` if any blob KZG proof in the response fails to verify, or if the versioned + /// hashes in the transaction do not match the actual commitment versioned hashes. + pub fn validate_blob( + &self, + sidecar: &BlobTransactionSidecar, + proof_settings: &KzgSettings, + ) -> Result { + // Ensure the versioned hashes and commitments have the same length + if self.blob_versioned_hashes.len() != sidecar.commitments.len() { + return Err(kzg::Error::MismatchLength(format!( + "There are {} versioned commitment hashes and {} commitments", + self.blob_versioned_hashes.len(), + sidecar.commitments.len() + )) + .into()) + } + + // zip and iterate, calculating versioned hashes + for (versioned_hash, commitment) in + self.blob_versioned_hashes.iter().zip(sidecar.commitments.iter()) + { + // convert to KzgCommitment + let commitment = KzgCommitment::from(*commitment.deref()); + + // Calculate the versioned hash + // + // TODO: should this method distinguish the type of validation failure? For example + // whether a certain versioned hash does not match, or whether the blob proof + // validation failed? + let calculated_versioned_hash = kzg_to_versioned_hash(commitment); + if *versioned_hash != calculated_versioned_hash { + return Ok(false) + } + } + + // Verify as a batch + KzgProof::verify_blob_kzg_proof_batch( + sidecar.blobs.as_slice(), + sidecar.commitments.as_slice(), + sidecar.proofs.as_slice(), + proof_settings, + ) + .map_err(Into::into) + } + /// Returns the total gas for all blobs in this transaction. #[inline] pub fn blob_gas(&self) -> u64 { @@ -308,57 +363,12 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// Takes as input the [KzgSettings], which should contain the the parameters derived from the - /// KZG trusted setup. - /// - /// This ensures that the blob transaction payload has the same number of blob data elements, - /// commitments, and proofs. Each blob data element is verified against its commitment and - /// proof. - /// - /// Returns `false` if any blob KZG proof in the response fails to verify, or if the versioned - /// hashes in the transaction do not match the actual commitment versioned hashes. + /// See also [TxEip4844::validate_blob] pub fn validate( &self, proof_settings: &KzgSettings, ) -> Result { - let inner_tx = &self.transaction; - - // Ensure the versioned hashes and commitments have the same length - if inner_tx.blob_versioned_hashes.len() != self.sidecar.commitments.len() { - return Err(kzg::Error::MismatchLength(format!( - "There are {} versioned commitment hashes and {} commitments", - inner_tx.blob_versioned_hashes.len(), - self.sidecar.commitments.len() - )) - .into()) - } - - // zip and iterate, calculating versioned hashes - for (versioned_hash, commitment) in - inner_tx.blob_versioned_hashes.iter().zip(self.sidecar.commitments.iter()) - { - // convert to KzgCommitment - let commitment = KzgCommitment::from(*commitment.deref()); - - // Calculate the versioned hash - // - // TODO: should this method distinguish the type of validation failure? For example - // whether a certain versioned hash does not match, or whether the blob proof - // validation failed? - let calculated_versioned_hash = kzg_to_versioned_hash(commitment); - if *versioned_hash != calculated_versioned_hash { - return Ok(false) - } - } - - // Verify as a batch - KzgProof::verify_blob_kzg_proof_batch( - self.sidecar.blobs.as_slice(), - self.sidecar.commitments.as_slice(), - self.sidecar.proofs.as_slice(), - proof_settings, - ) - .map_err(Into::into) + self.transaction.validate_blob(&self.sidecar, proof_settings) } /// Splits the [BlobTransaction] into its [TransactionSigned] and [BlobTransactionSidecar] From 20ea9c9237692732b18e0f6a82eb06d8ddbeff82 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 16:56:54 +0200 Subject: [PATCH 518/722] chore: change field order (#4348) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 025c922d96c1..ecb73c232c5e 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -71,8 +71,8 @@ pub enum Delta { #[serde(rename_all = "camelCase")] pub struct AccountDiff { pub balance: Delta, - pub nonce: Delta, pub code: Delta, + pub nonce: Delta, pub storage: BTreeMap>, } From 97913049df99ae71817f8b09c37feee98482a770 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 24 Aug 2023 15:52:19 +0100 Subject: [PATCH 519/722] feat(bin): db freelist metric (#4346) --- bin/reth/src/prometheus_exporter.rs | 22 +++++++++++++------- crates/storage/libmdbx-rs/src/environment.rs | 14 +++++-------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index 650c433522f3..bb612d53825f 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -4,12 +4,13 @@ use hyper::{ service::{make_service_fn, service_fn}, Body, Request, Response, Server, }; -use metrics::gauge; +use metrics::{describe_gauge, gauge}; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; use reth_db::{database::Database, tables, DatabaseEnv}; -use reth_metrics::metrics::{describe_counter, Unit}; +use reth_metrics::metrics::Unit; use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use tracing::error; pub(crate) trait Hook: Fn() + Send + Sync {} impl Hook for T {} @@ -102,7 +103,13 @@ pub(crate) async fn initialize( } Ok::<(), eyre::Report>(()) - }); + }).map_err(|error| error!(?error, "Failed to read db table stats")); + + if let Ok(freelist) = + db.freelist().map_err(|error| error!(?error, "Failed to read db.freelist")) + { + gauge!("db.freelist", freelist as f64); + } }; // Clone `process` to move it into the hook and use the original `process` for describe below. @@ -116,8 +123,10 @@ pub(crate) async fn initialize( // We describe the metrics after the recorder is installed, otherwise this information is not // registered - describe_counter!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); - describe_counter!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); + describe_gauge!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_entries", "The number of entries for a table"); + describe_gauge!("db.freelist", "The number of pages on the freelist"); process.describe(); describe_memory_stats(); @@ -127,7 +136,6 @@ pub(crate) async fn initialize( #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use jemalloc_ctl::{epoch, stats}; - use tracing::error; if epoch::advance().map_err(|error| error!(?error, "Failed to advance jemalloc epoch")).is_err() { @@ -173,8 +181,6 @@ fn collect_memory_stats() { #[cfg(all(feature = "jemalloc", unix))] fn describe_memory_stats() { - use reth_metrics::metrics::describe_gauge; - describe_gauge!( "jemalloc.active", Unit::Bytes, diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 4a37b941bd97..ff09838e3c3c 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -186,8 +186,8 @@ where /// /// Note: /// - /// * LMDB stores all the freelists in the designated database 0 in each environment, and the - /// freelist count is stored at the beginning of the value as `libc::size_t` in the native + /// * MDBX stores all the freelists in the designated database 0 in each environment, and the + /// freelist count is stored at the beginning of the value as `libc::uint32_t` in the native /// byte order. /// /// * It will create a read transaction to traverse the freelist database. @@ -199,16 +199,12 @@ where for result in cursor { let (_key, value) = result?; - if value.len() < mem::size_of::() { + if value.len() < size_of::() { return Err(Error::Corrupted) } - let s = &value[..mem::size_of::()]; - if cfg!(target_pointer_width = "64") { - freelist += NativeEndian::read_u64(s) as usize; - } else { - freelist += NativeEndian::read_u32(s) as usize; - } + let s = &value[..size_of::()]; + freelist += NativeEndian::read_u32(s) as usize; } Ok(freelist) From b65e808ebf215b3546b749188e8fe73f5656aa76 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 24 Aug 2023 16:30:06 +0100 Subject: [PATCH 520/722] feat(book): node sizes (archive, full, pruned) (#4344) --- book/installation/installation.md | 7 ++++--- book/run/pruning.md | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index a7fd324bf133..0dd779cb2768 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -33,10 +33,11 @@ Prior to purchasing an NVMe drive, it is advisable to research and determine whe ### Disk -There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode: +There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode. +As of August 2023 at block number 17.9M: -* Archive Node: At least 2.1TB is required (as of July 2023, at block number 17.7M) -* Full Node: TBD +* Archive Node: At least 2.1TB is required +* Full Node: At least 1TB is required NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. diff --git a/book/run/pruning.md b/book/run/pruning.md index 81c55e8261ce..48382f000bda 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -43,6 +43,34 @@ To run Reth as a pruned node configured through a [custom configuration](./confi modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). +## Size + +All numbers are as of August 2023 at block number 17.9M for mainnet. + +### Archive + +Archive node occupies at least 2.1TB. + +You can track the growth of Reth archive node size with our +[public Grafana dashboard](https://reth.paradigm.xyz/d/2k8BXz24k/reth?orgId=1&refresh=30s&viewPanel=52). + +### Full + +Full node occupies 1TB at the peak, and slowly goes down to 920GB. + +### Pruned + +Different parts take up different amounts of disk space. +If pruned fully, this is the total freed space you'll get, per part: + +| Part | Size | +|--------------------|-------| +| Sender Recovery | 70GB | +| Transaction Lookup | 140GB | +| Receipts | 240GB | +| Account History | 230GB | +| Storage History | 680GB | + ## RPC support As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several parts From fea753beb27fefe55c6b897518777fdd31347591 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 17:31:39 +0200 Subject: [PATCH 521/722] fix: compare prev value against new value (#4347) --- .../revm/revm-inspectors/src/tracing/types.rs | 42 ++++++++++++++----- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 6c877830541f..6a64738854d3 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -310,31 +310,51 @@ impl CallTraceNode { // iterate over all storage diffs for change in self.trace.steps.iter().filter_map(|s| s.storage_change) { let StorageChange { key, value, had_value } = change; - let value = H256::from(value); + let h256_value = H256::from(value); match acc.storage.entry(key.into()) { Entry::Vacant(entry) => { if let Some(had_value) = had_value { - entry.insert(Delta::Changed(ChangedType { - from: had_value.into(), - to: value, - })); + if value != had_value { + entry.insert(Delta::Changed(ChangedType { + from: had_value.into(), + to: h256_value, + })); + } } else { - entry.insert(Delta::Added(value)); + entry.insert(Delta::Added(h256_value)); } } Entry::Occupied(mut entry) => { let value = match entry.get() { - Delta::Unchanged => Delta::Added(value), + Delta::Unchanged => { + if let Some(had_value) = had_value { + if value != had_value { + Delta::Changed(ChangedType { + from: had_value.into(), + to: h256_value, + }) + } else { + Delta::Unchanged + } + } else { + Delta::Added(h256_value) + } + } Delta::Added(added) => { - if added == &value { + if added == &h256_value { Delta::Added(*added) } else { - Delta::Changed(ChangedType { from: *added, to: value }) + Delta::Changed(ChangedType { from: *added, to: h256_value }) } } - Delta::Removed(_) => Delta::Added(value), + Delta::Removed(_) => Delta::Added(h256_value), Delta::Changed(c) => { - Delta::Changed(ChangedType { from: c.from, to: value }) + if c.from == h256_value { + // remains unchanged if the value is the same + Delta::Unchanged + } else { + Delta::Changed(ChangedType { from: c.from, to: h256_value }) + } } }; entry.insert(value); From 4f118cbbe1ed7546bce31eec8d2da74a63a566a2 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Thu, 24 Aug 2023 17:34:44 +0200 Subject: [PATCH 522/722] docs: correct `docker compose logs` command (#4189) --- book/installation/docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/installation/docker.md b/book/installation/docker.md index 44cd711567d5..422e692033ba 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -96,7 +96,7 @@ docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d To check if Reth is running correctly, run: ```bash -docker compose logs -f reth +docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml logs -f reth ``` The default `docker-compose.yml` file will create three containers: @@ -124,4 +124,4 @@ docker exec -it reth bash **If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command** -Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. \ No newline at end of file +Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. From f9b6a64ccb8721c3effb7633d97609e5bebfa7c1 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 24 Aug 2023 16:38:57 +0100 Subject: [PATCH 523/722] feat(grafana): DB freelist chart (#4349) --- etc/grafana/dashboards/overview.json | 195 ++++++++++++++++++++------- 1 file changed, 145 insertions(+), 50 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 01e9e67e36ba..c230db758587 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1036,13 +1036,108 @@ "title": "Overflow pages by table", "type": "table" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of pages on the MDBX freelist", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_freelist{instance=~\"$instance\"}) by (job)", + "legendFormat": "Pages ({{job}})", + "range": true, + "refId": "A" + } + ], + "title": "Freelist", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 50 }, "id": 46, "panels": [], @@ -1111,7 +1206,7 @@ "h": 8, "w": 24, "x": 0, - "y": 43 + "y": 51 }, "id": 56, "options": { @@ -1184,7 +1279,7 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 59 }, "id": 6, "panels": [], @@ -1256,7 +1351,7 @@ "h": 8, "w": 8, "x": 0, - "y": 52 + "y": 60 }, "id": 18, "options": { @@ -1350,7 +1445,7 @@ "h": 8, "w": 8, "x": 8, - "y": 52 + "y": 60 }, "id": 16, "options": { @@ -1470,7 +1565,7 @@ "h": 8, "w": 8, "x": 16, - "y": 52 + "y": 60 }, "id": 8, "options": { @@ -1549,7 +1644,7 @@ "h": 8, "w": 8, "x": 0, - "y": 60 + "y": 68 }, "id": 54, "options": { @@ -1770,7 +1865,7 @@ "h": 8, "w": 14, "x": 8, - "y": 60 + "y": 68 }, "id": 103, "options": { @@ -1807,7 +1902,7 @@ "h": 1, "w": 24, "x": 0, - "y": 68 + "y": 76 }, "id": 24, "panels": [], @@ -1903,7 +1998,7 @@ "h": 8, "w": 12, "x": 0, - "y": 69 + "y": 77 }, "id": 26, "options": { @@ -2035,7 +2130,7 @@ "h": 8, "w": 12, "x": 12, - "y": 69 + "y": 77 }, "id": 33, "options": { @@ -2153,7 +2248,7 @@ "h": 8, "w": 12, "x": 0, - "y": 77 + "y": 85 }, "id": 36, "options": { @@ -2202,7 +2297,7 @@ "h": 1, "w": 24, "x": 0, - "y": 85 + "y": 93 }, "id": 32, "panels": [], @@ -2308,7 +2403,7 @@ "h": 8, "w": 12, "x": 0, - "y": 86 + "y": 94 }, "id": 30, "options": { @@ -2472,7 +2567,7 @@ "h": 8, "w": 12, "x": 12, - "y": 86 + "y": 94 }, "id": 28, "options": { @@ -2590,7 +2685,7 @@ "h": 8, "w": 12, "x": 0, - "y": 94 + "y": 102 }, "id": 35, "options": { @@ -2714,7 +2809,7 @@ "h": 8, "w": 12, "x": 12, - "y": 94 + "y": 102 }, "id": 73, "options": { @@ -2764,7 +2859,7 @@ "h": 1, "w": 24, "x": 0, - "y": 102 + "y": 110 }, "id": 89, "panels": [], @@ -2837,7 +2932,7 @@ "h": 8, "w": 12, "x": 0, - "y": 103 + "y": 111 }, "id": 91, "options": { @@ -2955,7 +3050,7 @@ "h": 8, "w": 12, "x": 12, - "y": 103 + "y": 111 }, "id": 92, "options": { @@ -3091,7 +3186,7 @@ "h": 8, "w": 12, "x": 0, - "y": 111 + "y": 119 }, "id": 102, "options": { @@ -3211,7 +3306,7 @@ "h": 8, "w": 12, "x": 12, - "y": 111 + "y": 119 }, "id": 94, "options": { @@ -3336,7 +3431,7 @@ "h": 8, "w": 12, "x": 0, - "y": 119 + "y": 127 }, "id": 93, "options": { @@ -3480,7 +3575,7 @@ "h": 8, "w": 12, "x": 12, - "y": 119 + "y": 127 }, "id": 95, "options": { @@ -3543,7 +3638,7 @@ "h": 1, "w": 24, "x": 0, - "y": 127 + "y": 135 }, "id": 79, "panels": [], @@ -3615,7 +3710,7 @@ "h": 8, "w": 12, "x": 0, - "y": 128 + "y": 136 }, "id": 74, "options": { @@ -3710,7 +3805,7 @@ "h": 8, "w": 12, "x": 12, - "y": 128 + "y": 136 }, "id": 80, "options": { @@ -3805,7 +3900,7 @@ "h": 8, "w": 12, "x": 0, - "y": 136 + "y": 144 }, "id": 81, "options": { @@ -3843,7 +3938,7 @@ "h": 1, "w": 24, "x": 0, - "y": 144 + "y": 152 }, "id": 87, "panels": [], @@ -3915,7 +4010,7 @@ "h": 8, "w": 12, "x": 0, - "y": 145 + "y": 153 }, "id": 83, "options": { @@ -4009,7 +4104,7 @@ "h": 8, "w": 12, "x": 12, - "y": 145 + "y": 153 }, "id": 84, "options": { @@ -4115,7 +4210,7 @@ "h": 8, "w": 12, "x": 0, - "y": 153 + "y": 161 }, "id": 85, "options": { @@ -4152,7 +4247,7 @@ "h": 1, "w": 24, "x": 0, - "y": 161 + "y": 169 }, "id": 68, "panels": [], @@ -4224,7 +4319,7 @@ "h": 8, "w": 12, "x": 0, - "y": 162 + "y": 170 }, "id": 60, "options": { @@ -4318,7 +4413,7 @@ "h": 8, "w": 12, "x": 12, - "y": 162 + "y": 170 }, "id": 62, "options": { @@ -4412,7 +4507,7 @@ "h": 8, "w": 12, "x": 0, - "y": 170 + "y": 178 }, "id": 64, "options": { @@ -4449,7 +4544,7 @@ "h": 1, "w": 24, "x": 0, - "y": 178 + "y": 186 }, "id": 97, "panels": [], @@ -4519,7 +4614,7 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 187 }, "id": 98, "options": { @@ -4680,7 +4775,7 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 187 }, "id": 101, "options": { @@ -4776,7 +4871,7 @@ "h": 8, "w": 12, "x": 0, - "y": 187 + "y": 195 }, "id": 99, "options": { @@ -4872,7 +4967,7 @@ "h": 8, "w": 12, "x": 12, - "y": 187 + "y": 195 }, "id": 100, "options": { @@ -4910,7 +5005,7 @@ "h": 1, "w": 24, "x": 0, - "y": 195 + "y": 203 }, "id": 105, "panels": [], @@ -4981,7 +5076,7 @@ "h": 8, "w": 12, "x": 0, - "y": 196 + "y": 204 }, "id": 106, "options": { @@ -5077,7 +5172,7 @@ "h": 8, "w": 12, "x": 12, - "y": 196 + "y": 204 }, "id": 107, "options": { @@ -5115,7 +5210,7 @@ "h": 1, "w": 24, "x": 0, - "y": 204 + "y": 212 }, "id": 108, "panels": [], @@ -5138,7 +5233,7 @@ "h": 8, "w": 12, "x": 0, - "y": 205 + "y": 213 }, "hiddenSeries": false, "id": 109, @@ -5226,7 +5321,7 @@ "h": 8, "w": 12, "x": 12, - "y": 205 + "y": 213 }, "hiddenSeries": false, "id": 110, @@ -5323,7 +5418,7 @@ "h": 8, "w": 12, "x": 0, - "y": 213 + "y": 221 }, "id": 111, "maxDataPoints": 25, @@ -5412,7 +5507,7 @@ "h": 8, "w": 12, "x": 12, - "y": 213 + "y": 221 }, "id": 112, "maxDataPoints": 25, @@ -5516,6 +5611,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 5, + "version": 6, "weekStart": "" } \ No newline at end of file From 0beaf85f4bfbed26956fae488268db3ec626c9ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 17:44:25 +0200 Subject: [PATCH 524/722] feat: remove finalized blobs (#4342) --- crates/transaction-pool/src/blobstore/mod.rs | 2 +- .../transaction-pool/src/blobstore/tracker.rs | 11 ++-- crates/transaction-pool/src/maintain.rs | 52 ++++++++++++++++++- 3 files changed, 56 insertions(+), 9 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index bf0db1046af3..786bbcd4f4d8 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -4,7 +4,7 @@ pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; use reth_primitives::{BlobTransactionSidecar, H256}; use std::fmt; -pub use tracker::BlobStoreCanonTracker; +pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; mod mem; mod noop; diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 0d1f783331a7..20461e112657 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -13,7 +13,7 @@ pub struct BlobStoreCanonTracker { impl BlobStoreCanonTracker { /// Adds a block to the blob store maintenance. - pub(crate) fn add_block( + pub fn add_block( &mut self, block_number: BlockNumber, blob_txs: impl IntoIterator, @@ -22,7 +22,7 @@ impl BlobStoreCanonTracker { } /// Adds all blocks to the tracked list of blocks. - pub(crate) fn add_blocks( + pub fn add_blocks( &mut self, blocks: impl IntoIterator)>, ) { @@ -32,7 +32,7 @@ impl BlobStoreCanonTracker { } /// Adds all blob transactions from the given chain to the tracker. - pub(crate) fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { let blob_txs = blocks.iter().map(|(num, blocks)| { let iter = blocks.body.iter().filter(|tx| tx.transaction.is_eip4844()).map(|tx| tx.hash); @@ -42,8 +42,7 @@ impl BlobStoreCanonTracker { } /// Invoked when a block is finalized. - #[allow(unused)] - pub(crate) fn on_finalized_block(&mut self, number: BlockNumber) -> BlobStoreUpdates { + pub fn on_finalized_block(&mut self, number: BlockNumber) -> BlobStoreUpdates { let mut finalized = Vec::new(); while let Some(entry) = self.blob_txs_in_blocks.first_entry() { if *entry.key() <= number { @@ -63,7 +62,7 @@ impl BlobStoreCanonTracker { /// Updates that should be applied to the blob store. #[derive(Debug, Eq, PartialEq)] -pub(crate) enum BlobStoreUpdates { +pub enum BlobStoreUpdates { /// No updates. None, /// Delete the given finalized transactions from the blob store. diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 2e9936fe95b2..fd3c83a4854c 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,7 +1,7 @@ //! Support for maintaining the state of the transaction pool use crate::{ - blobstore::BlobStoreCanonTracker, + blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, ChangedAccount, TransactionPoolExt}, BlockInfo, TransactionPool, @@ -10,7 +10,9 @@ use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; -use reth_primitives::{Address, BlockHash, BlockNumberOrTag, FromRecoveredTransaction}; +use reth_primitives::{ + Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredTransaction, +}; use reth_provider::{ BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, PostState, StateProviderFactory, }; @@ -97,6 +99,10 @@ pub async fn maintain_transaction_pool( // keeps track of mined blob transaction so we can clean finalized transactions let mut blob_store_tracker = BlobStoreCanonTracker::default(); + // keeps track of the latest finalized block + let mut last_finalized_block = + FinalizedBlockTracker::new(client.finalized_block_number().ok().flatten()); + // keeps track of any dirty accounts that we know of are out of sync with the pool let mut dirty_addresses = HashSet::new(); @@ -154,6 +160,19 @@ pub async fn maintain_transaction_pool( task_spawner.spawn_blocking(fut); } + // check if we have a new finalized block + if let Some(finalized) = + last_finalized_block.update(client.finalized_block_number().ok().flatten()) + { + match blob_store_tracker.on_finalized_block(finalized) { + BlobStoreUpdates::None => {} + BlobStoreUpdates::Finalized(blobs) => { + // remove all finalized blobs from the blob store + pool.delete_blobs(blobs); + } + } + } + // outcomes of the futures we are waiting on let mut event = None; let mut reloaded = None; @@ -360,6 +379,35 @@ pub async fn maintain_transaction_pool( } } +struct FinalizedBlockTracker { + last_finalized_block: Option, +} + +impl FinalizedBlockTracker { + fn new(last_finalized_block: Option) -> Self { + Self { last_finalized_block } + } + + /// Updates the tracked finalized block and returns the new finalized block if it changed + fn update(&mut self, finalized_block: Option) -> Option { + match (self.last_finalized_block, finalized_block) { + (Some(last), Some(finalized)) => { + self.last_finalized_block = Some(finalized); + if last < finalized { + Some(finalized) + } else { + None + } + } + (None, Some(finalized)) => { + self.last_finalized_block = Some(finalized); + Some(finalized) + } + _ => None, + } + } +} + /// Keeps track of the pool's state, whether the accounts in the pool are in sync with the actual /// state. #[derive(Debug, Eq, PartialEq)] From f0346c697e0a26bd8c71dad9b37caa7973b23cd4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 19:27:51 +0200 Subject: [PATCH 525/722] feat: add eth extension trait for EthValidator (#4343) --- crates/rpc/rpc/src/eth/error.rs | 4 ++ crates/transaction-pool/src/error.rs | 8 ++++ crates/transaction-pool/src/lib.rs | 7 ++-- crates/transaction-pool/src/traits.rs | 19 +++++++++- crates/transaction-pool/src/validate/eth.rs | 42 +++++++++++++++++---- crates/transaction-pool/src/validate/mod.rs | 11 ++++++ 6 files changed, 79 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index b3cb1f6302ef..044f5621266c 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -470,6 +470,9 @@ pub enum RpcPoolError { /// Custom pool error #[error("{0:?}")] PoolTransactionError(Box), + /// Unable to find the blob for an EIP4844 transaction + #[error("blob not found for EIP4844 transaction")] + MissingEip4844Blob, #[error(transparent)] Other(Box), } @@ -508,6 +511,7 @@ impl From for RpcPoolError { InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), + InvalidPoolTransactionError::MissingEip4844Blob => RpcPoolError::MissingEip4844Blob, } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 12ccfbebe5ce..fd2ca0bf7e46 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -138,6 +138,9 @@ pub enum InvalidPoolTransactionError { /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, + /// Thrown if we're unable to find the blob for a transaction that was previously extracted + #[error("blob not found for EIP4844 transaction")] + MissingEip4844Blob, /// Any other error that occurred while inserting/validating that is transaction specific #[error("{0:?}")] Other(Box), @@ -195,6 +198,11 @@ impl InvalidPoolTransactionError { false } InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), + InvalidPoolTransactionError::MissingEip4844Blob => { + // this is only reachable when blob transactions are reinjected and we're unable to + // find the previously extracted blob + false + } } } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 570277656380..42f19157a822 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -180,9 +180,10 @@ pub use crate::{ }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, - EthPooledTransaction, GetPooledTransactionLimit, NewTransactionEvent, - PendingTransactionListenerKind, PoolSize, PoolTransaction, PropagateKind, - PropagatedTransactions, TransactionOrigin, TransactionPool, TransactionPoolExt, + EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, + GetPooledTransactionLimit, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, + PoolTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, TransactionPool, + TransactionPoolExt, }, validate::{ EthTransactionValidator, TransactionValidationOutcome, TransactionValidationTaskExecutor, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2da408b77c96..7540a6340f51 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -640,6 +640,13 @@ pub trait PoolTransaction: fn chain_id(&self) -> Option; } +/// An extension trait that provides additional interfaces for the +/// [EthTransactionValidator](crate::EthTransactionValidator). +pub trait EthPoolTransaction: PoolTransaction { + /// Extracts the blob sidecar from the transaction. + fn take_blob(&mut self) -> EthBlobTransactionSidecar; +} + /// The default [PoolTransaction] for the [Pool](crate::Pool) for Ethereum. /// /// This type is essentially a wrapper around [TransactionSignedEcRecovered] with additional fields @@ -659,7 +666,7 @@ pub struct EthPooledTransaction { /// Represents the blob sidecar of the [EthPooledTransaction]. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum EthBlobTransactionSidecar { +pub enum EthBlobTransactionSidecar { /// This transaction does not have a blob sidecar None, /// This transaction has a blob sidecar (EIP-4844) but it is missing @@ -812,6 +819,16 @@ impl PoolTransaction for EthPooledTransaction { } } +impl EthPoolTransaction for EthPooledTransaction { + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + if self.is_eip4844() { + std::mem::replace(&mut self.blob_sidecar, EthBlobTransactionSidecar::Missing) + } else { + EthBlobTransactionSidecar::None + } + } +} + impl FromRecoveredTransaction for EthPooledTransaction { fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { EthPooledTransaction::new(tx) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index f5899b4f8b02..a61fec1ec7f5 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -3,9 +3,10 @@ use crate::{ blobstore::BlobStore, error::InvalidPoolTransactionError, - traits::{PoolTransaction, TransactionOrigin}, + traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_SIZE, TX_MAX_SIZE}, - TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, + EthBlobTransactionSidecar, EthPoolTransaction, TransactionValidationOutcome, + TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ constants::{eip4844::KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, @@ -32,7 +33,7 @@ pub struct EthTransactionValidator { impl TransactionValidator for EthTransactionValidator where Client: StateProviderFactory, - Tx: PoolTransaction, + Tx: EthPoolTransaction, { type Transaction = Tx; @@ -57,7 +58,6 @@ pub(crate) struct EthTransactionValidatorInner { /// This type fetches account info from the db client: Client, /// Blobstore used for fetching re-injected blob transactions. - #[allow(unused)] blob_store: Box, /// tracks activated forks relevant for transaction validation fork_tracker: ForkTracker, @@ -93,14 +93,14 @@ impl EthTransactionValidatorInner { impl TransactionValidator for EthTransactionValidatorInner where Client: StateProviderFactory, - Tx: PoolTransaction, + Tx: EthPoolTransaction, { type Transaction = Tx; async fn validate_transaction( &self, origin: TransactionOrigin, - transaction: Self::Transaction, + mut transaction: Self::Transaction, ) -> TransactionValidationOutcome { // Checks for tx_type match transaction.tx_type() { @@ -198,6 +198,8 @@ where } } + let mut blob_sidecar = None; + // blob tx checks if transaction.is_eip4844() { // Cancun fork is required for blob txs @@ -207,7 +209,31 @@ where InvalidTransactionError::TxTypeNotSupported.into(), ) } - // TODO add checks for blob tx + + // extract the blob from the transaction + match transaction.take_blob() { + EthBlobTransactionSidecar::None => { + // this should not happen + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + EthBlobTransactionSidecar::Missing => { + if let Ok(Some(_)) = self.blob_store.get(*transaction.hash()) { + // validated transaction is already in the store + } else { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::MissingEip4844Blob, + ) + } + } + EthBlobTransactionSidecar::Present(blob) => { + //TODO(mattsse): verify the blob + blob_sidecar = Some(blob); + } + } } let account = match self @@ -255,7 +281,7 @@ where TransactionValidationOutcome::Valid { balance: account.balance, state_nonce: account.nonce, - transaction: ValidTransaction::Valid(transaction), + transaction: ValidTransaction::new(transaction, blob_sidecar), // by this point assume all external transactions should be propagated propagate: match origin { TransactionOrigin::External => true, diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 1e5cc78f89ad..d99100961a87 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -86,6 +86,17 @@ pub enum ValidTransaction { }, } +impl ValidTransaction { + /// Creates a new valid transaction with an optional sidecar. + pub fn new(transaction: T, sidecar: Option) -> Self { + if let Some(sidecar) = sidecar { + Self::ValidWithSidecar { transaction, sidecar } + } else { + Self::Valid(transaction) + } + } +} + impl ValidTransaction { #[inline] pub(crate) fn transaction(&self) -> &T { From aad1895cc77d04ae0e59f6cb7b56175fc4ac4d02 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Aug 2023 19:28:05 +0200 Subject: [PATCH 526/722] chore: collect full discarded tx (#4353) --- crates/transaction-pool/src/pool/mod.rs | 10 +++++----- crates/transaction-pool/src/pool/txpool.rs | 7 ++++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index e2bbd4ffe255..4efb0234f066 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -351,7 +351,7 @@ where let mut listener = self.event_listener.write(); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } /// Add a single validated transaction into the pool. @@ -568,7 +568,7 @@ where mined.iter().for_each(|tx| listener.mined(tx, block_hash)); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } /// Fire events for the newly added transaction if there are any. @@ -581,7 +581,7 @@ where listener.pending(transaction.hash(), replaced.clone()); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } AddedTransaction::Parked { transaction, replaced, .. } => { listener.queued(transaction.hash()); @@ -755,7 +755,7 @@ pub struct AddedPendingTransaction { /// transactions promoted to the pending queue promoted: Vec>>, /// transaction that failed and became discarded - discarded: Vec, + discarded: Vec>>, } impl AddedPendingTransaction { @@ -871,7 +871,7 @@ pub(crate) struct OnNewCanonicalStateOutcome { /// Transactions promoted to the ready queue. pub(crate) promoted: Vec>>, /// transaction that were discarded during the update - pub(crate) discarded: Vec, + pub(crate) discarded: Vec>>, } impl OnNewCanonicalStateOutcome { diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 8f8ba2de0f3d..29a6fa9e9e6c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -416,8 +416,9 @@ impl TxPool { match destination { Destination::Discard => { // remove the transaction from the pool and subpool - self.prune_transaction_by_hash(&hash); - outcome.discarded.push(hash); + if let Some(tx) = self.prune_transaction_by_hash(&hash) { + outcome.discarded.push(tx); + } self.metrics.removed_transactions.increment(1); } Destination::Pool(move_to) => { @@ -1336,7 +1337,7 @@ pub(crate) struct UpdateOutcome { /// transactions promoted to the pending pool pub(crate) promoted: Vec>>, /// transaction that failed and were discarded - pub(crate) discarded: Vec, + pub(crate) discarded: Vec>>, } impl Default for UpdateOutcome { From 50ba82803d40b70e4fd9fc2c0510e2d5db072d74 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 25 Aug 2023 04:51:23 +0200 Subject: [PATCH 527/722] feat: add validate-blob to extension trait (#4355) --- crates/primitives/src/lib.rs | 10 +++---- crates/primitives/src/transaction/eip4844.rs | 30 ++++++++++++++------ crates/primitives/src/transaction/mod.rs | 4 ++- crates/transaction-pool/src/traits.rs | 28 +++++++++++++++--- 4 files changed, 53 insertions(+), 19 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 22435f436b3a..ffbc27a1718c 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -91,11 +91,11 @@ pub use storage::StorageEntry; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer, sign_message}, AccessList, AccessListItem, AccessListWithGasUsed, BlobTransaction, BlobTransactionSidecar, - FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, - InvalidTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionKind, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + BlobTransactionValidationError, FromRecoveredPooledTransaction, FromRecoveredTransaction, + IntoRecoveredTransaction, InvalidTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionKind, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, + TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; pub use withdrawal::Withdrawal; diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index b569b3a4070d..1278e75e3b8c 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -115,13 +115,14 @@ impl TxEip4844 { /// commitments, and proofs. Each blob data element is verified against its commitment and /// proof. /// - /// Returns `false` if any blob KZG proof in the response fails to verify, or if the versioned - /// hashes in the transaction do not match the actual commitment versioned hashes. + /// Returns [BlobTransactionValidationError::InvalidProof] if any blob KZG proof in the response + /// fails to verify, or if the versioned hashes in the transaction do not match the actual + /// commitment versioned hashes. pub fn validate_blob( &self, sidecar: &BlobTransactionSidecar, proof_settings: &KzgSettings, - ) -> Result { + ) -> Result<(), BlobTransactionValidationError> { // Ensure the versioned hashes and commitments have the same length if self.blob_versioned_hashes.len() != sidecar.commitments.len() { return Err(kzg::Error::MismatchLength(format!( @@ -146,18 +147,24 @@ impl TxEip4844 { // validation failed? let calculated_versioned_hash = kzg_to_versioned_hash(commitment); if *versioned_hash != calculated_versioned_hash { - return Ok(false) + return Err(BlobTransactionValidationError::InvalidProof) } } // Verify as a batch - KzgProof::verify_blob_kzg_proof_batch( + let res = KzgProof::verify_blob_kzg_proof_batch( sidecar.blobs.as_slice(), sidecar.commitments.as_slice(), sidecar.proofs.as_slice(), proof_settings, ) - .map_err(Into::into) + .map_err(BlobTransactionValidationError::KZGError)?; + + if res { + Ok(()) + } else { + Err(BlobTransactionValidationError::InvalidProof) + } } /// Returns the total gas for all blobs in this transaction. @@ -307,12 +314,17 @@ impl TxEip4844 { } /// An error that can occur when validating a [BlobTransaction]. -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BlobTransactionValidationError { + /// Proof validation failed. + #[error("invalid kzg proof")] + InvalidProof, /// An error returned by the [kzg] library + #[error("kzg error: {0:?}")] KZGError(kzg::Error), /// The inner transaction is not a blob transaction - NotBlobTransaction(TxType), + #[error("unable to verify proof for non blob transaction: {0}")] + NotBlobTransaction(u8), } impl From for BlobTransactionValidationError { @@ -367,7 +379,7 @@ impl BlobTransaction { pub fn validate( &self, proof_settings: &KzgSettings, - ) -> Result { + ) -> Result<(), BlobTransactionValidationError> { self.transaction.validate_blob(&self.sidecar, proof_settings) } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 97afe6c79a81..bce71de9a0f0 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -20,7 +20,9 @@ pub use tx_type::{ pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; -pub use eip4844::{BlobTransaction, BlobTransactionSidecar, TxEip4844}; +pub use eip4844::{ + BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, TxEip4844, +}; pub use legacy::TxLegacy; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 7540a6340f51..7ad245b7fef4 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -6,10 +6,11 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_primitives::{ - Address, BlobTransactionSidecar, FromRecoveredPooledTransaction, FromRecoveredTransaction, - IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, - TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, U256, + Address, BlobTransactionSidecar, BlobTransactionValidationError, + FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, + TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + H256, U256, }; use reth_rlp::Encodable; use std::{ @@ -22,6 +23,7 @@ use std::{ use tokio::sync::mpsc::Receiver; use crate::blobstore::BlobStoreError; +use reth_primitives::kzg::KzgSettings; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -645,6 +647,13 @@ pub trait PoolTransaction: pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; + + /// Validates the blob sidecar of the transaction with the given settings. + fn validate_blob( + &self, + blob: &BlobTransactionSidecar, + settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError>; } /// The default [PoolTransaction] for the [Pool](crate::Pool) for Ethereum. @@ -827,6 +836,17 @@ impl EthPoolTransaction for EthPooledTransaction { EthBlobTransactionSidecar::None } } + + fn validate_blob( + &self, + sidecar: &BlobTransactionSidecar, + settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError> { + match &self.transaction.transaction { + Transaction::Eip4844(tx) => tx.validate_blob(sidecar, settings), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + } + } } impl FromRecoveredTransaction for EthPooledTransaction { From 6d0b00a391eaa942975710b10ce1ee4c237d744f Mon Sep 17 00:00:00 2001 From: PatStiles <33334338+PatStiles@users.noreply.github.com> Date: Fri, 25 Aug 2023 03:47:58 -0400 Subject: [PATCH 528/722] chore: use `DEFAULT_DISCOVERY_PORT` constant (#4356) --- Cargo.lock | 2 ++ crates/net/discv4/src/lib.rs | 4 ++-- crates/net/discv4/src/proto.rs | 6 +++--- crates/net/eth-wire/Cargo.toml | 1 + crates/net/eth-wire/src/builder.rs | 3 ++- crates/net/eth-wire/src/ethstream.rs | 5 +++-- crates/net/eth-wire/src/hello.rs | 10 ++++++---- crates/net/eth-wire/src/p2pstream.rs | 3 ++- crates/net/network-api/Cargo.toml | 1 + crates/net/network-api/src/noop.rs | 3 ++- 10 files changed, 24 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a2375940284d..c94fcd909149 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5542,6 +5542,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "reth-codecs", + "reth-discv4", "reth-ecies", "reth-metrics", "reth-primitives", @@ -5738,6 +5739,7 @@ name = "reth-network-api" version = "0.1.0-alpha.7" dependencies = [ "async-trait", + "reth-discv4", "reth-eth-wire", "reth-primitives", "reth-rpc-types", diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 20b729330e9d..fa7077bc3122 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2178,7 +2178,7 @@ mod tests { let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); let v6 = v4.to_ipv6_mapped(); - let addr: SocketAddr = (v6, 30303).into(); + let addr: SocketAddr = (v6, DEFAULT_DISCOVERY_PORT).into(); let ping = Ping { from: rng_endpoint(&mut rng), @@ -2210,7 +2210,7 @@ mod tests { let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); let v6 = v4.to_ipv6_mapped(); - let addr: SocketAddr = (v6, 30303).into(); + let addr: SocketAddr = (v6, DEFAULT_DISCOVERY_PORT).into(); let ping = Ping { from: rng_endpoint(&mut rng), diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 88b3bb93644d..b02082f9e86d 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -496,7 +496,7 @@ mod tests { use super::*; use crate::{ test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, - SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, + DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use enr::{EnrBuilder, EnrPublicKey}; use rand::{thread_rng, Rng, RngCore}; @@ -773,7 +773,7 @@ mod tests { assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(30303)); + assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); assert_eq!(enr.0.tcp4(), None); assert_eq!(enr.0.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); @@ -808,7 +808,7 @@ mod tests { assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(30303)); + assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); assert_eq!(enr.0.tcp4(), None); assert_eq!(enr.0.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 84e62184f882..e6d865246732 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -24,6 +24,7 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } +reth-discv4 = {path = "../discv4" } # metrics reth-metrics.workspace = true diff --git a/crates/net/eth-wire/src/builder.rs b/crates/net/eth-wire/src/builder.rs index 23da3bce69a4..3b5866bdeff0 100644 --- a/crates/net/eth-wire/src/builder.rs +++ b/crates/net/eth-wire/src/builder.rs @@ -4,6 +4,7 @@ use crate::{ capability::Capability, hello::HelloMessage, p2pstream::ProtocolVersion, EthVersion, Status, }; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_primitives::{Chain, ForkId, PeerId, H256, U256}; /// Builder for [`Status`](crate::types::Status) messages. @@ -100,7 +101,7 @@ impl HelloBuilder { client_version: "Ethereum/1.0.0".to_string(), capabilities: vec![EthVersion::Eth68.into()], // TODO: default port config - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pubkey, }, } diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index ae5d60edf3e9..70472e9e9fbd 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -323,6 +323,7 @@ mod tests { }; use ethers_core::types::Chain; use futures::{SinkExt, StreamExt}; + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::{stream::ECIESStream, util::pk2id}; use reth_primitives::{ForkFilter, Head, H256, U256}; use secp256k1::{SecretKey, SECP256K1}; @@ -591,7 +592,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; @@ -619,7 +620,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&client_key.public_key(SECP256K1)), }; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index c14759fd40ba..acbb2c4337d9 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -1,5 +1,6 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use reth_codecs::derive_arbitrary; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_primitives::{constants::RETH_CLIENT_VERSION, PeerId}; use reth_rlp::{RlpDecodable, RlpEncodable}; @@ -99,7 +100,7 @@ impl HelloMessageBuilder { capabilities: capabilities.unwrap_or_else(|| { vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()] }), - port: port.unwrap_or(30303), + port: port.unwrap_or(DEFAULT_DISCOVERY_PORT), id, } } @@ -107,6 +108,7 @@ impl HelloMessageBuilder { #[cfg(test)] mod tests { + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::util::pk2id; use reth_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use secp256k1::{SecretKey, SECP256K1}; @@ -123,7 +125,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); @@ -143,7 +145,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); @@ -162,7 +164,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 8b2b9e0fac45..b46d3ecd6a35 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -827,6 +827,7 @@ impl Decodable for ProtocolVersion { mod tests { use super::*; use crate::{DisconnectReason, EthVersion}; + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::util::pk2id; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::{TcpListener, TcpStream}; @@ -839,7 +840,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![EthVersion::Eth67.into()], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; (hello, server_key) diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index acd433d78521..8ae24e40fb57 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -13,6 +13,7 @@ description = "Network interfaces" reth-primitives.workspace = true reth-eth-wire = { path = "../eth-wire" } reth-rpc-types.workspace = true +reth-discv4 = { path = "../discv4" } # io serde = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index dc1ef17a93ca..2b453b4c4e7f 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -7,6 +7,7 @@ use crate::{ NetworkError, NetworkInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; use async_trait::async_trait; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; use reth_primitives::{Chain, NodeRecord, PeerId}; use reth_rpc_types::{EthProtocolInfo, NetworkStatus}; @@ -22,7 +23,7 @@ pub struct NoopNetwork; #[async_trait] impl NetworkInfo for NoopNetwork { fn local_addr(&self) -> SocketAddr { - (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), 30303).into() + (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into() } async fn network_status(&self) -> Result { From 4a13415f92d72f2cfab279c069c523c92d6f62de Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 25 Aug 2023 12:18:19 +0100 Subject: [PATCH 529/722] feat(book): advise to `tee` the output of `mdbx_chk` (#4360) --- book/run/troubleshooting.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 4fb2ef61e0be..e810c9b559d4 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -33,6 +33,6 @@ Caused by: git clone https://github.com/paradigmxyz/reth cd reth make db-tools - db-tools/mdbx_chk $(reth db path)/mdbx.dat + db-tools/mdbx_chk $(reth db path)/mdbx.dat | tee mdbx_chk.log ``` - If `mdbx_chk` has detected any errors, please [open an issue](https://github.com/paradigmxyz/reth/issues) and post the output. \ No newline at end of file + If `mdbx_chk` has detected any errors, please [open an issue](https://github.com/paradigmxyz/reth/issues) and post the output from the `mdbx_chk.log` file. \ No newline at end of file From 2f808c243549a1d352e9d487992fd068c4c1a1a5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 25 Aug 2023 12:24:30 +0100 Subject: [PATCH 530/722] feat(book): rpc methods pruning dependencies (#4358) --- book/run/pruning.md | 99 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 96 insertions(+), 3 deletions(-) diff --git a/book/run/pruning.md b/book/run/pruning.md index 48382f000bda..f65750f9f869 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -81,8 +81,101 @@ which can be pruned independently of each other: - Account History - Storage History -Pruning of each of these parts disables different RPC methods, because the historical data or lookup indexes +Pruning of each of these parts disables different RPC methods, because the historical data or lookup indexes become unavailable. -> TODO: `prune parts / RPC methods` table that shows which RPCs becomes unavailable when certain parts of the data -> are pruned \ No newline at end of file +The following tables describe the requirements for prune parts, per RPC method: +- ✅ – if the part is pruned, the RPC method still works +- ❌ - if the part is pruned, the RPC method doesn't work anymore + +### `debug` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|----------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | + +### `eth` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|-------------------------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_blockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_estimateGas` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBalance` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getTransactionByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionByHash` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | +| `eth_getTransactionCount` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionReceipt` | ✅ | ❌ | ❌ | ✅ | ✅ | +| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newBlockFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_protocolVersion` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | + +### `net` namespace +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|-----------------|-----------------|--------------------|----------|-----------------|-----------------| +| `net_listening` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | + +### `trace` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|---------------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | + +### `txpool` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|----------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_content` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_contentFrom` | ✅ | ✅ | ✅ | ✅ | ✅ | From d0c1fa344258ee8457f983985d3e8f0111391b01 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 25 Aug 2023 14:21:09 +0100 Subject: [PATCH 531/722] chore(book): sort RPC methods in pruning chapter (#4361) --- book/run/pruning.md | 86 +++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/book/run/pruning.md b/book/run/pruning.md index f65750f9f869..259110c32ace 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -92,71 +92,73 @@ The following tables describe the requirements for prune parts, per RPC method: | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |----------------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | | `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | | `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | | `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | | `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | | `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | + ### `eth` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |-------------------------------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_blockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | | `eth_estimateGas` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_getBalance` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | +| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_getTransactionByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_getTransactionByHash` | ✅ | ❌ | ✅ | ✅ | ✅ | -| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | | `eth_getTransactionCount` | ✅ | ✅ | ✅ | ❌ | ✅ | -| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_getTransactionReceipt` | ✅ | ❌ | ❌ | ✅ | ✅ | -| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_getUncleCountByBlockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_newBlockFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_protocolVersion` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | ### `net` namespace + | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |-----------------|-----------------|--------------------|----------|-----------------|-----------------| | `net_listening` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | | `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | ### `trace` namespace @@ -165,17 +167,17 @@ The following tables describe the requirements for prune parts, per RPC method: | `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | | `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | -| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | | `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | ### `txpool` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |----------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | | `txpool_content` | ✅ | ✅ | ✅ | ✅ | ✅ | | `txpool_contentFrom` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | From d8b1609299a4ada2bf57fd4feb62e7042d0ccbf6 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 25 Aug 2023 18:33:00 +0300 Subject: [PATCH 532/722] dep: remove kzg patch (#4363) --- Cargo.lock | 24 +++++++++++++++++++++++- Cargo.toml | 3 --- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c94fcd909149..e7c23acf882d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -720,6 +720,18 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "boa_ast" version = "0.17.0" @@ -932,9 +944,10 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/rjected/c-kzg-4844?branch=dan/add-serde-feature#4c95d6b8850f4f22a25fed0cf207560711cefe2b" +source = "git+https://github.com/ethereum/c-kzg-4844#666a9de002035eb7e929bceee3a70dee1b23aa93" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", + "blst", "cc", "glob", "hex", @@ -7424,6 +7437,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.3.25" diff --git a/Cargo.toml b/Cargo.toml index f9389a0f1abe..f9da92926d0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -155,6 +155,3 @@ c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } ### misc-testing proptest = "1.0" arbitrary = "1.1" - -[patch."https://github.com/ethereum/c-kzg-4844"] -c-kzg = { git = "https://github.com/rjected/c-kzg-4844", branch = "dan/add-serde-feature" } \ No newline at end of file From 5d971d626f437312fbe6496dc1c2c102054d1bc2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 27 Aug 2023 13:29:36 -0700 Subject: [PATCH 533/722] chore(deps): weekly `cargo update` (#4374) Co-authored-by: github-merge-queue --- Cargo.lock | 332 +++++++++++++++++++++++++---------------------------- 1 file changed, 158 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7c23acf882d..2c58d61bdd99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,9 +25,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -181,24 +181,23 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "anstyle-parse" @@ -220,9 +219,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -511,9 +510,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -538,9 +537,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" [[package]] name = "base64ct" @@ -559,9 +558,9 @@ dependencies = [ [[package]] name = "bech32" -version = "0.7.3" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" [[package]] name = "beef" @@ -670,16 +669,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - [[package]] name = "bitvec" version = "1.0.1" @@ -687,7 +676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", - "radium 0.7.0", + "radium", "serde", "tap", "wyz", @@ -735,7 +724,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -748,7 +737,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -786,7 +775,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "boa_macros", "boa_profiler", @@ -797,7 +786,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "icu_collections", "icu_normalizer", @@ -810,7 +799,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "boa_gc", "boa_macros", @@ -825,7 +814,7 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -836,7 +825,7 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -856,7 +845,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b204dcb0014fe825a06a2683b01a29b886436c46" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" [[package]] name = "boyer-moore-magiclen" @@ -890,11 +879,12 @@ dependencies = [ [[package]] name = "bs58" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" dependencies = [ - "sha2 0.9.9", + "sha2", + "tinyvec", ] [[package]] @@ -1015,9 +1005,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", @@ -1110,9 +1100,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.23" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +checksum = "1d5f1946157a96594eb2d2c10eb7ad9a2b27518cb3000209dec700c35df9197d" dependencies = [ "clap_builder", "clap_derive", @@ -1121,9 +1111,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.23" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +checksum = "78116e32a042dd73c2901f0dc30790d20ff3447f3e3472fad359e8c3d282bcd6" dependencies = [ "anstream", "anstyle", @@ -1133,9 +1123,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" dependencies = [ "heck", "proc-macro2 1.0.66", @@ -1145,9 +1135,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] name = "cobs" @@ -1169,47 +1159,43 @@ dependencies = [ [[package]] name = "coins-bip32" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30a84aab436fcb256a2ab3c80663d8aec686e6bae12827bb05fef3e1e439c9f" +checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" dependencies = [ - "bincode", "bs58", "coins-core", "digest 0.10.7", - "getrandom 0.2.10", "hmac", "k256", - "lazy_static", "serde", - "sha2 0.10.7", + "sha2", "thiserror", ] [[package]] name = "coins-bip39" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f4d04ee18e58356accd644896aeb2094ddeafb6a713e056cef0c0a8e468c15" +checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" dependencies = [ - "bitvec 0.17.4", + "bitvec", "coins-bip32", - "getrandom 0.2.10", "hmac", "once_cell", "pbkdf2 0.12.2", "rand 0.8.5", - "sha2 0.10.7", + "sha2", "thiserror", ] [[package]] name = "coins-core" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b949a1c63fb7eb591eb7ba438746326aedf0ae843e51ec92ba6bec5bb382c4f" +checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bech32", "bs58", "digest 0.10.7", @@ -1218,7 +1204,7 @@ dependencies = [ "ripemd", "serde", "serde_derive", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", ] @@ -1635,9 +1621,9 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.0" +version = "5.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" +checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" dependencies = [ "cfg-if", "hashbrown 0.14.0", @@ -1689,9 +1675,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" dependencies = [ "serde", ] @@ -1854,7 +1840,7 @@ dependencies = [ [[package]] name = "discv5" version = "0.3.1" -source = "git+https://github.com/sigp/discv5#1439decd4e7d7c9de78ef61b5d67be3fee688510" +source = "git+https://github.com/sigp/discv5#d2e30e04ee62418b9e57278cee907c02b99d5bd1" dependencies = [ "aes 0.7.5", "aes-gcm", @@ -1938,9 +1924,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8", "signature", @@ -1956,7 +1942,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.7", + "sha2", "zeroize", ] @@ -2023,9 +2009,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] @@ -2042,7 +2028,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bytes", "ed25519-dalek", "hex", @@ -2159,7 +2145,7 @@ dependencies = [ "scrypt", "serde", "serde_json", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", "uuid 0.8.2", @@ -2350,7 +2336,7 @@ checksum = "00c84664b294e47fc2860d6db0db0246f79c4c724e552549631bb9505b834bee" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.2", + "base64 0.21.3", "bytes", "const-hex", "enr", @@ -2394,7 +2380,7 @@ dependencies = [ "eth-keystore", "ethers-core", "rand 0.8.5", - "sha2 0.10.7", + "sha2", "thiserror", "tracing", ] @@ -2526,9 +2512,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", @@ -2717,10 +2703,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -2735,9 +2719,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "glob" @@ -2804,9 +2788,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -3029,9 +3013,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human_bytes" @@ -3126,7 +3110,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows 0.48.0", ] [[package]] @@ -3356,12 +3340,12 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.15" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc" +checksum = "73c0fefcb6d409a6587c07515951495d482006f89a21daa0f2f783aa4fd5e027" dependencies = [ "ahash 0.8.3", - "indexmap 1.9.3", + "indexmap 2.0.0", "is-terminal", "itoa", "log", @@ -3437,7 +3421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.8", + "rustix 0.38.9", "windows-sys 0.48.0", ] @@ -3680,7 +3664,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "pem", "ring", "serde", @@ -3698,7 +3682,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", - "sha2 0.10.7", + "sha2", "signature", ] @@ -3921,7 +3905,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "hyper", "indexmap 1.9.3", "ipnet", @@ -3946,9 +3930,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006271a8019ad7a9a28cfac2cc40e3ee104d54be763c4a0901e228a63f49d706" +checksum = "1c93f6ad342d3f7bc14724147e2dbc6eb6fdbe5a832ace16ea23b73618e8cc17" dependencies = [ "libproc", "mach2", @@ -3956,7 +3940,7 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows", + "windows 0.51.1", ] [[package]] @@ -4144,9 +4128,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -4156,9 +4140,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" dependencies = [ "num-traits", ] @@ -4258,9 +4242,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" dependencies = [ "memchr", ] @@ -4326,9 +4310,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "3.9.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126d3e6f3926bfb0fb24495b4f4da50626f547e54956594748e3d8882a0320b4" +checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" dependencies = [ "num-traits", ] @@ -4356,7 +4340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", - "bitvec 1.0.1", + "bitvec", "byte-slice-cast", "bytes", "impl-trait-for-tuples", @@ -4568,9 +4552,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -4657,9 +4641,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" +checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" [[package]] name = "postcard" @@ -4930,12 +4914,6 @@ dependencies = [ "proc-macro2 1.0.66", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - [[package]] name = "radium" version = "0.7.0" @@ -5094,14 +5072,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ - "aho-corasick 1.0.3", + "aho-corasick 1.0.4", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.3.7", + "regex-syntax 0.7.5", ] [[package]] @@ -5115,13 +5093,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ - "aho-corasick 1.0.3", + "aho-corasick 1.0.4", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.7.5", ] [[package]] @@ -5132,9 +5110,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regress" @@ -5152,7 +5130,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bytes", "encoding_rs", "futures-core", @@ -5528,7 +5506,7 @@ dependencies = [ "reth-primitives", "reth-rlp", "secp256k1", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", "tokio", @@ -5774,7 +5752,7 @@ dependencies = [ "reth-rlp", "reth-rpc-types", "revm-primitives", - "sha2 0.10.7", + "sha2", "thiserror", "tokio", "tokio-stream", @@ -5817,7 +5795,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "sha2 0.10.7", + "sha2", "strum 0.25.0", "sucds", "tempfile", @@ -6236,7 +6214,7 @@ dependencies = [ "revm-primitives", "ripemd", "secp256k1", - "sha2 0.10.7", + "sha2", "sha3", "substrate-bn", ] @@ -6248,7 +6226,7 @@ source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d1 dependencies = [ "arbitrary", "auto_impl", - "bitvec 1.0.1", + "bitvec", "bytes", "derive_more", "enumn", @@ -6310,9 +6288,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a29d87a652dc4d43c586328706bb5cdff211f3f39a530f240b53f7221dab8e" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" dependencies = [ "libc", ] @@ -6427,9 +6405,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" dependencies = [ "bitflags 2.4.0", "errno 0.3.2", @@ -6468,7 +6446,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", ] [[package]] @@ -6588,7 +6566,7 @@ dependencies = [ "hmac", "pbkdf2 0.11.0", "salsa20", - "sha2 0.10.7", + "sha2", ] [[package]] @@ -6699,9 +6677,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -6719,9 +6697,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -6873,19 +6851,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.7" @@ -6983,9 +6948,9 @@ dependencies = [ [[package]] name = "similar-asserts" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf644ad016b75129f01a34a355dcb8d66a5bc803e417c7a77cc5d5ee9fa0f18" +checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" dependencies = [ "console", "similar", @@ -7005,9 +6970,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" @@ -7017,9 +6982,9 @@ checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -7325,7 +7290,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.8", + "rustix 0.38.9", "windows-sys 0.48.0", ] @@ -7448,9 +7413,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.25" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" dependencies = [ "deranged", "itoa", @@ -7469,9 +7434,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" dependencies = [ "time-core", ] @@ -7684,7 +7649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ "async-compression", - "base64 0.21.2", + "base64 0.21.3", "bitflags 2.4.0", "bytes", "futures-core", @@ -8001,9 +7966,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] @@ -8331,6 +8296,25 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +dependencies = [ + "windows-core", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -8465,9 +8449,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" +checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" dependencies = [ "memchr", ] From b2a0548e4733820f2a8a0049524a124da3174536 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 27 Aug 2023 22:29:55 +0200 Subject: [PATCH 534/722] feat(net): rm TODO for `DEFAULT_DISCOVERY_PORT` (#4366) --- crates/net/eth-wire/src/builder.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/net/eth-wire/src/builder.rs b/crates/net/eth-wire/src/builder.rs index 3b5866bdeff0..6b271f72df1d 100644 --- a/crates/net/eth-wire/src/builder.rs +++ b/crates/net/eth-wire/src/builder.rs @@ -100,7 +100,6 @@ impl HelloBuilder { // TODO: proper client versioning client_version: "Ethereum/1.0.0".to_string(), capabilities: vec![EthVersion::Eth68.into()], - // TODO: default port config port: DEFAULT_DISCOVERY_PORT, id: pubkey, }, From 72b211ed4f90a27097cee351adfc209e027659c0 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Aug 2023 11:00:23 +0200 Subject: [PATCH 535/722] feat(reth): remove `History` variant in `StageEnum` (#4365) --- bin/reth/src/args/stage_args.rs | 2 -- bin/reth/src/stage/drop.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/bin/reth/src/args/stage_args.rs b/bin/reth/src/args/stage_args.rs index f955a5781024..997f0f687c94 100644 --- a/bin/reth/src/args/stage_args.rs +++ b/bin/reth/src/args/stage_args.rs @@ -13,8 +13,6 @@ pub enum StageEnum { Hashing, Merkle, TxLookup, - // TODO: Combine or remove `History` variant. - History, AccountHistory, StorageHistory, TotalDifficulty, diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index 33298d287fd5..073c14c2a087 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -135,7 +135,7 @@ impl Command { None, )?; } - StageEnum::History => { + StageEnum::AccountHistory | StageEnum::StorageHistory => { tx.clear::()?; tx.clear::()?; tx.put::( From 5339502f3f41102b729b261008cc09d3f3c7b5d1 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Aug 2023 14:14:49 +0100 Subject: [PATCH 536/722] chore(stages): document index history stages pruning (#4379) --- crates/stages/src/stages/index_account_history.rs | 4 ++++ crates/stages/src/stages/index_storage_history.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 14943d38a2c0..c13123d08688 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -7,6 +7,10 @@ use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::AccountHistory`] +/// +/// Pruning: we don't need to store and act on [`reth_primitives::PruneModes`], +/// because this stage indexes the already pruned account changesets generated by +/// [`crate::stages::ExecutionStage`]. #[derive(Debug)] pub struct IndexAccountHistoryStage { /// Number of blocks after which the control diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 4d746817e2ac..0f077fac5874 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -7,6 +7,10 @@ use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::StorageHistory`]. +/// +/// Pruning: we don't need to store and act on [`reth_primitives::PruneModes`], +/// because this stage indexes the already pruned storage changesets generated by +/// [`crate::stages::ExecutionStage`]. #[derive(Debug)] pub struct IndexStorageHistoryStage { /// Number of blocks after which the control From 7095733449a0b21bd8c4f27ab6068c64980febcf Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:42:34 +0200 Subject: [PATCH 537/722] feat(book): add documentation for ports (#4370) --- book/SUMMARY.md | 1 + book/run/ports.md | 38 ++++++++++++++++++++++++++++++++++++++ book/run/run-a-node.md | 1 + 3 files changed, 40 insertions(+) create mode 100644 book/run/ports.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 9a8b903153a8..0bab86a8bb71 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -14,6 +14,7 @@ 1. [Configuring Reth](./run/config.md) 1. [Transaction types](./run/transactions.md) 1. [Pruning](./run/pruning.md) + 1. [Ports](./run/ports.md) 1. [Troubleshooting](./run/troubleshooting.md) 1. [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) 1. [eth](./jsonrpc/eth.md) diff --git a/book/run/ports.md b/book/run/ports.md new file mode 100644 index 000000000000..5239a5262c48 --- /dev/null +++ b/book/run/ports.md @@ -0,0 +1,38 @@ +# Ports + +This section provides essential information about the ports used by the system, their primary purposes, and recommendations for exposure settings. + +## Peering Ports + +- **Port:** 30303 +- **Protocol:** TCP and UDP +- **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information. +- **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network. + +## Metrics Port + +- **Port:** 9001 +- **Protocol:** TCP +- **Purpose:** This port is designated for serving metrics related to the system's performance and operation. It allows internal monitoring and data collection for analysis. +- **Exposure Recommendation:** By default, this port should not be exposed to the public. It is intended for internal monitoring and analysis purposes. + +## HTTP RPC Port + +- **Port:** 8545 +- **Protocol:** TCP +- **Purpose:** Port 8545 provides an HTTP-based Remote Procedure Call (RPC) interface. It enables external applications to interact with the blockchain by sending requests over HTTP. +- **Exposure Recommendation:** Similar to the metrics port, exposing this port to the public is not recommended by default due to security considerations. + +## WS RPC Port + +- **Port:** 8546 +- **Protocol:** TCP +- **Purpose:** Port 8546 offers a WebSocket-based Remote Procedure Call (RPC) interface. It allows real-time communication between external applications and the blockchain. +- **Exposure Recommendation:** As with the HTTP RPC port, the WS RPC port should not be exposed by default for security reasons. + +## Engine API Port + +- **Port:** 8551 +- **Protocol:** TCP +- **Purpose:** Port 8551 facilitates communication between specific components, such as "reth" and "CL" (assuming their definitions are understood within the context of the system). It enables essential internal processes. +- **Exposure Recommendation:** This port is not meant to be exposed to the public by default. It should be reserved for internal communication between vital components of the system. diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 54ffe873fa64..06a3f8f2d64f 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -8,6 +8,7 @@ In this chapter we'll go through a few different topics you'll encounter when ru 1. [Configuring reth.toml](./config.md) 1. [Transaction types](./transactions.md) 1. [Pruning](./pruning.md) +1. [Ports](./ports.md) 1. [Troubleshooting](./troubleshooting.md) In the future, we also intend to support the [OP Stack](https://stack.optimism.io/docs/understand/explainer/), which will allow you to run Reth as a Layer 2 client. More there soon! From 47683fd6e8bb7770489a183ebf4678e9b9d76956 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20H?= Date: Mon, 28 Aug 2023 15:52:30 +0200 Subject: [PATCH 538/722] feat(grafana): sort stages (#4373) --- etc/grafana/dashboards/overview.json | 31 +++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index c230db758587..18d59926db54 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -249,6 +249,35 @@ } ], "title": "Stage checkpoints", + "transformations": [ + { + "id": "joinByField", + "options": { + "mode": "outer" + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "AccountHashing": 6, + "Bodies": 2, + "Execution": 4, + "Finish": 12, + "Headers": 0, + "IndexAccountHistory": 11, + "IndexStorageHistory": 10, + "MerkleExecute": 8, + "MerkleUnwind": 5, + "SenderRecovery": 3, + "StorageHashing": 7, + "TotalDifficulty": 1, + "TransactionLookup": 9 + } + } + } + ], "transparent": true, "type": "bargauge" }, @@ -5611,6 +5640,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 6, + "version": 7, "weekStart": "" } \ No newline at end of file From e527531a70a5994c13883e551cb4e39f91350c4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Gonz=C3=A1lez?= Date: Mon, 28 Aug 2023 17:01:06 +0200 Subject: [PATCH 539/722] ref(cli): remove duplicated clap arg (#4336) --- bin/reth/src/cli/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 50864f5bfa0a..ea2624f2ef64 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -43,7 +43,6 @@ pub struct Cli { #[arg( long, value_name = "CHAIN_OR_PATH", - global = true, verbatim_doc_comment, default_value = "mainnet", value_parser = genesis_value_parser, From f303b5cb17d3b171276d1119fe1407e029426a31 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Aug 2023 18:08:59 +0100 Subject: [PATCH 540/722] feat(primitives): increase transaction senders pruning batch size (#4383) --- crates/primitives/src/prune/batch_sizes.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/prune/batch_sizes.rs b/crates/primitives/src/prune/batch_sizes.rs index 9498ea627b4b..f689ee1da816 100644 --- a/crates/primitives/src/prune/batch_sizes.rs +++ b/crates/primitives/src/prune/batch_sizes.rs @@ -57,7 +57,7 @@ impl PruneBatchSizes { Self { receipts: 250, transaction_lookup: 250, - transaction_senders: 250, + transaction_senders: 1000, account_history: 1000, storage_history: 1000, } @@ -69,7 +69,7 @@ impl PruneBatchSizes { Self { receipts: 100, transaction_lookup: 100, - transaction_senders: 100, + transaction_senders: 500, account_history: 500, storage_history: 500, } From 0f14ec4007f2cc7d5838d3a04cb531d8c8f72dc7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Aug 2023 18:38:07 +0100 Subject: [PATCH 541/722] release: v0.1.0-alpha.8 (#4386) --- Cargo.lock | 94 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c58d61bdd99..777feca5044f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1147,7 +1147,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -1960,7 +1960,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "reth-db", "reth-interfaces", @@ -5170,7 +5170,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "backon", "boyer-moore-magiclen", @@ -5242,7 +5242,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5259,7 +5259,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "futures-core", "futures-util", @@ -5279,7 +5279,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "futures", @@ -5308,7 +5308,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "aquamarine", "assert_matches", @@ -5328,7 +5328,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arbitrary", "bytes", @@ -5343,7 +5343,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "confy", "reth-discv4", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "mockall", @@ -5371,7 +5371,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arbitrary", "assert_matches", @@ -5413,7 +5413,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "discv5", "enr", @@ -5437,7 +5437,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "async-trait", "data-encoding", @@ -5461,7 +5461,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "futures", @@ -5487,7 +5487,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "aes 0.8.3", "block-padding", @@ -5518,7 +5518,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arbitrary", "async-trait", @@ -5553,7 +5553,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arbitrary", "async-trait", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "async-trait", "bytes", @@ -5600,7 +5600,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -5620,7 +5620,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "bindgen 0.65.1", "cc", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "futures", "metrics", @@ -5639,7 +5639,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "metrics", "once_cell", @@ -5653,7 +5653,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "pin-project", "reth-primitives", @@ -5662,7 +5662,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "igd", "pin-project-lite", @@ -5676,7 +5676,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "aquamarine", "async-trait", @@ -5727,7 +5727,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "async-trait", "reth-discv4", @@ -5741,7 +5741,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "futures-util", "metrics", @@ -5761,7 +5761,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arbitrary", "assert_matches", @@ -5813,7 +5813,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "auto_impl", "derive_more", @@ -5834,7 +5834,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -5852,7 +5852,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "once_cell", "reth-consensus-common", @@ -5868,7 +5868,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "boa_engine", "boa_gc", @@ -5884,7 +5884,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "reth-primitives", "revm", @@ -5892,7 +5892,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "arrayvec", "auto_impl", @@ -5911,7 +5911,7 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -5920,7 +5920,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "async-trait", @@ -5970,7 +5970,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "jsonrpsee", "reth-primitives", @@ -5980,7 +5980,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "async-trait", "futures", @@ -5994,7 +5994,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "hyper", "jsonrpsee", @@ -6026,7 +6026,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "async-trait", @@ -6048,7 +6048,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "itertools 0.11.0", "jsonrpsee-types", @@ -6063,7 +6063,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "reth-primitives", "reth-rlp", @@ -6072,7 +6072,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "aquamarine", "assert_matches", @@ -6109,7 +6109,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "dyn-clone", "futures-util", @@ -6123,7 +6123,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "tracing", "tracing-appender", @@ -6133,7 +6133,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "aquamarine", "assert_matches", @@ -6163,7 +6163,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" dependencies = [ "criterion", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index f9da92926d0b..98c77b92ac39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" From 863cc166a3ac2580e83b32b00fe58ce48b80a27f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 28 Aug 2023 18:41:13 +0100 Subject: [PATCH 542/722] feat(grafana): blockchain tree canonical commit time (#4385) --- etc/grafana/dashboards/overview.json | 130 ++++++++++++++++++++------- 1 file changed, 98 insertions(+), 32 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 18d59926db54..628f53d529c4 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -249,35 +249,6 @@ } ], "title": "Stage checkpoints", - "transformations": [ - { - "id": "joinByField", - "options": { - "mode": "outer" - } - }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": { - "AccountHashing": 6, - "Bodies": 2, - "Execution": 4, - "Finish": 12, - "Headers": 0, - "IndexAccountHistory": 11, - "IndexStorageHistory": 10, - "MerkleExecute": 8, - "MerkleUnwind": 5, - "SenderRecovery": 3, - "StorageHashing": 7, - "TotalDifficulty": 1, - "TransactionLookup": 9 - } - } - } - ], "transparent": true, "type": "bargauge" }, @@ -3961,6 +3932,101 @@ "title": "Sidechains", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 144 + }, + "id": 114, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_make_canonical_committed_latency_sum{instance=~\"$instance\"}[$__rate_interval]) / rate(reth_consensus_engine_beacon_make_canonical_committed_latency_count{instance=~\"$instance\"}[$__rate_interval])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Canonical Commit Latency time", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { @@ -5500,7 +5566,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum(increase(reth_rpc_server_request_latency{instance=~\"$instance\"}[$__interval])) by (quantile)", + "expr": "sum(increase(reth_rpc_server_request_latency{instance=~\"$instance\"}[$__rate_interval])) by (quantile)", "format": "time_series", "instant": false, "legendFormat": "{{quantile}}", @@ -5589,7 +5655,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum(increase(reth_rpc_server_call_latency{instance=~\"$instance\"}[$__interval])) by (quantile)", + "expr": "sum(increase(reth_rpc_server_call_latency{instance=~\"$instance\"}[$__rate_interval])) by (quantile)", "format": "time_series", "instant": false, "legendFormat": "{{quantile}}", @@ -5640,6 +5706,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 7, + "version": 8, "weekStart": "" } \ No newline at end of file From 55ec82bb53d998c4737e5da69d75e6350b777df4 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 29 Aug 2023 04:12:19 +0100 Subject: [PATCH 543/722] feat(book): note on pruning (#4387) --- book/run/pruning.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/book/run/pruning.md b/book/run/pruning.md index 259110c32ace..e9aa69ec5e68 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -1,7 +1,8 @@ # Pruning -> WARNING: pruning and full node are experimental features of Reth, -> and available only on `main` branch of the main repository now. +> Pruning and full node are new features of Reth, +> and we will be happy to hear about your experience using them either +> on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth). By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks available for querying and tracing. From b7541943b83ca850e53ed52d8b578a781088286a Mon Sep 17 00:00:00 2001 From: Bjerg Date: Tue, 29 Aug 2023 05:12:57 +0200 Subject: [PATCH 544/722] docs: clean up payload builder docs (#4380) --- crates/payload/basic/src/lib.rs | 4 +-- crates/payload/builder/src/database.rs | 2 +- crates/payload/builder/src/lib.rs | 29 +++++++++++--------- crates/payload/builder/src/service.rs | 8 +++--- crates/payload/builder/src/traits.rs | 37 +++++++++++++++----------- 5 files changed, 44 insertions(+), 36 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 16be06110baa..461325c6f3ab 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -11,7 +11,7 @@ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] -//! reth basic payload job generator +//! A basic payload generator for reth. use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; @@ -60,7 +60,7 @@ use tracing::{debug, trace}; mod metrics; -/// The [PayloadJobGenerator] that creates [BasicPayloadJob]s. +/// The [`PayloadJobGenerator`] that creates [`BasicPayloadJob`]s. pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 78a25a1e07d5..c69f7bafaae6 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -10,7 +10,7 @@ use std::{ collections::{hash_map::Entry, HashMap}, }; -/// A container type that caches all [DatabaseRef] reads from an underlying [DatabaseRef]. +/// A container type that caches reads from an underlying [DatabaseRef]. /// /// This is intended to be used in conjunction with [CacheDB](reth_revm_primitives::db::CacheDB) /// during payload building which repeatedly accesses the same data. diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index fdc57c913b97..b1b6e8989d68 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -16,27 +16,30 @@ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] -//! This crate defines the abstractions to create and update payloads: -//! - [PayloadJobGenerator]: a type that knows how to create new jobs for creating payloads based -//! on [PayloadAttributes](reth_rpc_types::engine::PayloadAttributes). -//! - [PayloadJob]: a type that can yields (better) payloads over time. +//! This crate defines abstractions to create and update payloads (blocks): +//! - [`PayloadJobGenerator`]: a type that knows how to create new jobs for creating payloads based +//! on [`PayloadAttributes`](reth_rpc_types::engine::PayloadAttributes). +//! - [`PayloadJob`]: a type that yields (better) payloads over time. //! -//! This crate comes with the generic [PayloadBuilderService] responsible for managing payload jobs. +//! This crate comes with the generic [`PayloadBuilderService`] responsible for managing payload +//! jobs. //! //! ## Node integration //! -//! In a standard node the [PayloadBuilderService] sits downstream of the engine API or rather the -//! component that handles requests from the Beacon Node like `engine_forkchoiceUpdatedV1`. +//! In a standard node the [`PayloadBuilderService`] sits downstream of the engine API, or rather +//! the component that handles requests from the consensus layer like `engine_forkchoiceUpdatedV1`. +//! //! Payload building is enabled if the forkchoice update request contains payload attributes. -//! See also -//! If the forkchoice update request is VALID and contains payload attributes the -//! [PayloadBuilderService] will create a new [PayloadJob] via the [PayloadJobGenerator] and start -//! polling it until the payload is requested by the CL and the payload job is resolved: -//! [PayloadJob::resolve] +//! +//! See also [the engine API docs](https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/shanghai.md#engine_forkchoiceupdatedv2) +//! If the forkchoice update request is `VALID` and contains payload attributes the +//! [`PayloadBuilderService`] will create a new [`PayloadJob`] via the given [`PayloadJobGenerator`] +//! and start polling it until the payload is requested by the CL and the payload job is resolved +//! (see [`PayloadJob::resolve`]). //! //! ## Example //! -//! A simple example of a [PayloadJobGenerator] that creates empty blocks: +//! A simple example of a [`PayloadJobGenerator`] that creates empty blocks: //! //! ``` //! use std::future::Future; diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 67d6bbbaf733..278beef6e9c4 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -124,10 +124,10 @@ impl PayloadBuilderHandle { /// /// This type is an endless future that manages the building of payloads. /// -/// It tracks active payloads and their build jobs that run in the worker pool. +/// It tracks active payloads and their build jobs that run in a worker pool. /// -/// By design, this type relies entirely on the [PayloadJobGenerator] to create new payloads and -/// does know nothing about how to build them, itt just drives the payload jobs. +/// By design, this type relies entirely on the [`PayloadJobGenerator`] to create new payloads and +/// does know nothing about how to build them, it just drives their jobs to completion. #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct PayloadBuilderService where @@ -141,7 +141,7 @@ where _service_tx: mpsc::UnboundedSender, /// Receiver half of the command channel. command_rx: UnboundedReceiverStream, - /// metrics for the payload builder service + /// Metrics for the payload builder service metrics: PayloadBuilderServiceMetrics, } diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index e855a20c8d84..4fddbebecb93 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -5,14 +5,15 @@ use std::{future::Future, sync::Arc}; /// A type that can build a payload. /// -/// This type is a Future that resolves when the job is done (e.g. timed out) or it failed. It's not -/// supposed to return the best payload built when it resolves instead [PayloadJob::best_payload] -/// should be used for that. +/// This type is a [`Future`] that resolves when the job is done (e.g. complete, timed out) or it +/// failed. It's not supposed to return the best payload built when it resolves, instead +/// [`PayloadJob::best_payload`] should be used for that. /// -/// A PayloadJob must always be prepared to return the best payload built so far to make there's a -/// valid payload to deliver to the CL, so it does not miss a slot, even if the payload is empty. +/// A `PayloadJob` must always be prepared to return the best payload built so far to ensure there +/// is a valid payload to deliver to the CL, so it does not miss a slot, even if the payload is +/// empty. /// -/// Note: A PayloadJob need to be cancel safe because it might be dropped after the CL has requested the payload via `engine_getPayloadV1`, See also +/// Note: A `PayloadJob` need to be cancel safe because it might be dropped after the CL has requested the payload via `engine_getPayloadV1` (see also [engine API docs](https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_getpayloadv1)) pub trait PayloadJob: Future> + Send + Sync { /// Represents the future that resolves the block that's returned to the CL. type ResolvePayloadFuture: Future, PayloadBuilderError>> @@ -29,9 +30,11 @@ pub trait PayloadJob: Future> + Send + /// /// This is invoked on [`engine_getPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) and [`engine_getPayloadV1`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_getpayloadv1). /// - /// The timeout for returning the payload to the CL is 1s. - /// Ideally, future returned by this method must resolve in under 1s. Ideally this is the best - /// payload built so far or an empty block without transactions if nothing has been built yet. + /// The timeout for returning the payload to the CL is 1s, thus the future returned should + /// resolve in under 1 second. + /// + /// Ideally this is the best payload built so far, or an empty block without transactions, if + /// nothing has been built yet. /// /// According to the spec: /// > Client software MAY stop the corresponding build process after serving this call. @@ -39,9 +42,9 @@ pub trait PayloadJob: Future> + Send + /// It is at the discretion of the implementer whether the build job should be kept alive or /// terminated. /// - /// If this returns [KeepPayloadJobAlive::Yes] then the future the [PayloadJob] will be polled - /// once more, if this returns [KeepPayloadJobAlive::No] then the [PayloadJob] will be dropped - /// after this call + /// If this returns [`KeepPayloadJobAlive::Yes`], then the [`PayloadJob`] will be polled + /// once more. If this returns [`KeepPayloadJobAlive::No`] then the [`PayloadJob`] will be + /// dropped after this call. fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); } @@ -59,15 +62,17 @@ pub enum KeepPayloadJobAlive { pub trait PayloadJobGenerator: Send + Sync { /// The type that manages the lifecycle of a payload. /// - /// This type is a Stream that yields better payloads. + /// This type is a future that yields better payloads. type Job: PayloadJob; - /// Creates the initial payload and a new [PayloadJob] that yields better payloads. + /// Creates the initial payload and a new [`PayloadJob`] that yields better payloads over time. /// /// This is called when the CL requests a new payload job via a fork choice update. /// - /// Note: this is expected to build a new (empty) payload without transactions, so it can be - /// returned directly. when asked for + /// # Note + /// + /// This is expected to initially build a new (empty) payload without transactions, so it can be + /// returned directly. fn new_payload_job( &self, attr: PayloadBuilderAttributes, From 03afe376b85cb424270a48eb3e5a97db8c5f2af4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Gonz=C3=A1lez?= Date: Tue, 29 Aug 2023 05:16:10 +0200 Subject: [PATCH 545/722] fix(txpool): respect propagate setting in the full tx stream (#4362) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/lib.rs | 16 ++--- crates/transaction-pool/src/noop.rs | 11 ++- crates/transaction-pool/src/pool/mod.rs | 72 +++++++++++-------- crates/transaction-pool/src/traits.rs | 32 +++++---- crates/transaction-pool/tests/it/listeners.rs | 29 +++++++- 5 files changed, 106 insertions(+), 54 deletions(-) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 42f19157a822..73db0fd23858 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -181,8 +181,8 @@ pub use crate::{ traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, - GetPooledTransactionLimit, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, - PoolTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, TransactionPool, + GetPooledTransactionLimit, NewTransactionEvent, PoolSize, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionListenerKind, TransactionOrigin, TransactionPool, TransactionPoolExt, }, validate::{ @@ -374,15 +374,15 @@ where self.pool.add_all_transactions_event_listener() } - fn pending_transactions_listener_for( - &self, - kind: PendingTransactionListenerKind, - ) -> Receiver { + fn pending_transactions_listener_for(&self, kind: TransactionListenerKind) -> Receiver { self.pool.add_pending_listener(kind) } - fn new_transactions_listener(&self) -> Receiver> { - self.pool.add_new_transaction_listener() + fn new_transactions_listener_for( + &self, + kind: TransactionListenerKind, + ) -> Receiver> { + self.pool.add_new_transaction_listener(kind) } fn pooled_transaction_hashes(&self) -> Vec { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index fe59bd93ac70..abbbddf8572b 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -6,7 +6,7 @@ use crate::{ blobstore::BlobStoreError, error::PoolError, - traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + traits::{GetPooledTransactionLimit, TransactionListenerKind}, validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PropagatedTransactions, @@ -83,7 +83,7 @@ impl TransactionPool for NoopTransactionPool { fn pending_transactions_listener_for( &self, - _kind: PendingTransactionListenerKind, + _kind: TransactionListenerKind, ) -> Receiver { mpsc::channel(1).1 } @@ -92,6 +92,13 @@ impl TransactionPool for NoopTransactionPool { mpsc::channel(1).1 } + fn new_transactions_listener_for( + &self, + _kind: TransactionListenerKind, + ) -> Receiver> { + mpsc::channel(1).1 + } + fn pooled_transaction_hashes(&self) -> Vec { vec![] } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 4efb0234f066..8403449945c7 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -1,6 +1,6 @@ //! Transaction Pool internals. //! -//! Incoming transactions validated are before they enter the pool first. The validation outcome can +//! Incoming transactions are validated before they enter the pool first. The validation outcome can //! have 3 states: //! //! 1. Transaction can _never_ be valid @@ -103,7 +103,7 @@ use crate::{ blobstore::BlobStore, metrics::BlobStoreMetrics, pool::txpool::UpdateOutcome, - traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + traits::{GetPooledTransactionLimit, TransactionListenerKind}, validate::ValidTransaction, }; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -137,7 +137,7 @@ where /// Listeners for new pending transactions. pending_transaction_listener: Mutex>, /// Listeners for new transactions added to the pool. - transaction_listener: Mutex>>>, + transaction_listener: Mutex>>, /// Metrics for the blob store blob_store_metrics: BlobStoreMetrics, } @@ -222,10 +222,7 @@ where /// Adds a new transaction listener to the pool that gets notified about every new _pending_ /// transaction inserted into the pool - pub fn add_pending_listener( - &self, - kind: PendingTransactionListenerKind, - ) -> mpsc::Receiver { + pub fn add_pending_listener(&self, kind: TransactionListenerKind) -> mpsc::Receiver { const TX_LISTENER_BUFFER_SIZE: usize = 2048; let (sender, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); let listener = PendingTransactionListener { sender, kind }; @@ -236,10 +233,12 @@ where /// Adds a new transaction listener to the pool that gets notified about every new transaction. pub fn add_new_transaction_listener( &self, + kind: TransactionListenerKind, ) -> mpsc::Receiver> { const TX_LISTENER_BUFFER_SIZE: usize = 1024; - let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); - self.transaction_listener.lock().push(tx); + let (sender, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); + let listener = TransactionListener { sender, kind }; + self.transaction_listener.lock().push(listener); rx } @@ -517,18 +516,25 @@ where /// Notify all listeners about a newly inserted pending transaction. fn on_new_transaction(&self, event: NewTransactionEvent) { let mut transaction_listeners = self.transaction_listener.lock(); + transaction_listeners.retain_mut(|listener| { + if listener.kind.is_propagate_only() && !event.transaction.propagate { + // only emit this hash to listeners that are only allowed to receive propagate only + // transactions, such as network + return !listener.sender.is_closed() + } - transaction_listeners.retain_mut(|listener| match listener.try_send(event.clone()) { - Ok(()) => true, - Err(err) => { - if matches!(err, mpsc::error::TrySendError::Full(_)) { - debug!( - target: "txpool", - "skipping transaction on full transaction listener", - ); - true - } else { - false + match listener.sender.try_send(event.clone()) { + Ok(()) => true, + Err(err) => { + if matches!(err, mpsc::error::TrySendError::Full(_)) { + debug!( + target: "txpool", + "skipping transaction on full transaction listener", + ); + true + } else { + false + } } } }); @@ -742,7 +748,15 @@ impl fmt::Debug for PoolInner { struct PendingTransactionListener { sender: mpsc::Sender, /// Whether to include transactions that should not be propagated over the network. - kind: PendingTransactionListenerKind, + kind: TransactionListenerKind, +} + +/// An active listener for new pending transactions. +#[derive(Debug)] +struct TransactionListener { + sender: mpsc::Sender>, + /// Whether to include transactions that should not be propagated over the network. + kind: TransactionListenerKind, } /// Tracks an added transaction and all graph changes caused by adding it. @@ -754,19 +768,19 @@ pub struct AddedPendingTransaction { replaced: Option>>, /// transactions promoted to the pending queue promoted: Vec>>, - /// transaction that failed and became discarded + /// transactions that failed and became discarded discarded: Vec>>, } impl AddedPendingTransaction { /// Returns all transactions that were promoted to the pending pool and adhere to the given - /// [PendingTransactionListenerKind]. + /// [TransactionListenerKind]. /// - /// If the kind is [PendingTransactionListenerKind::PropagateOnly], then only transactions that + /// If the kind is [TransactionListenerKind::PropagateOnly], then only transactions that /// are allowed to be propagated are returned. pub(crate) fn pending_transactions( &self, - kind: PendingTransactionListenerKind, + kind: TransactionListenerKind, ) -> impl Iterator + '_ { let iter = std::iter::once(&self.transaction).chain(self.promoted.iter()); PendingTransactionIter { kind, iter } @@ -779,7 +793,7 @@ impl AddedPendingTransaction { } pub(crate) struct PendingTransactionIter { - kind: PendingTransactionListenerKind, + kind: TransactionListenerKind, iter: Iter, } @@ -876,13 +890,13 @@ pub(crate) struct OnNewCanonicalStateOutcome { impl OnNewCanonicalStateOutcome { /// Returns all transactions that were promoted to the pending pool and adhere to the given - /// [PendingTransactionListenerKind]. + /// [TransactionListenerKind]. /// - /// If the kind is [PendingTransactionListenerKind::PropagateOnly], then only transactions that + /// If the kind is [TransactionListenerKind::PropagateOnly], then only transactions that /// are allowed to be propagated are returned. pub(crate) fn pending_transactions( &self, - kind: PendingTransactionListenerKind, + kind: TransactionListenerKind, ) -> impl Iterator + '_ { let iter = self.promoted.iter(); PendingTransactionIter { kind, iter } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 7ad245b7fef4..53c7d9f711c1 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -118,18 +118,24 @@ pub trait TransactionPool: Send + Sync + Clone { /// /// Consumer: RPC/P2P fn pending_transactions_listener(&self) -> Receiver { - self.pending_transactions_listener_for(PendingTransactionListenerKind::PropagateOnly) + self.pending_transactions_listener_for(TransactionListenerKind::PropagateOnly) } /// Returns a new Stream that yields transactions hashes for new __pending__ transactions - /// inserted into the pool depending on the given [PendingTransactionListenerKind] argument. - fn pending_transactions_listener_for( - &self, - kind: PendingTransactionListenerKind, - ) -> Receiver; + /// inserted into the pool depending on the given [TransactionListenerKind] argument. + fn pending_transactions_listener_for(&self, kind: TransactionListenerKind) -> Receiver; /// Returns a new stream that yields new valid transactions added to the pool. - fn new_transactions_listener(&self) -> Receiver>; + fn new_transactions_listener(&self) -> Receiver> { + self.new_transactions_listener_for(TransactionListenerKind::PropagateOnly) + } + + /// Returns a new stream that yields new valid transactions added to the pool + /// depending on the given [TransactionListenerKind] argument. + fn new_transactions_listener_for( + &self, + kind: TransactionListenerKind, + ) -> Receiver>; /// Returns a new Stream that yields new transactions added to the pending sub-pool. /// @@ -138,7 +144,10 @@ pub trait TransactionPool: Send + Sync + Clone { fn new_pending_pool_transactions_listener( &self, ) -> NewSubpoolTransactionStream { - NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Pending) + NewSubpoolTransactionStream::new( + self.new_transactions_listener_for(TransactionListenerKind::PropagateOnly), + SubPool::Pending, + ) } /// Returns a new Stream that yields new transactions added to the basefee sub-pool. @@ -326,12 +335,11 @@ pub trait TransactionPoolExt: TransactionPool { fn delete_blobs(&self, txs: Vec); } -/// Determines what kind of new pending transactions should be emitted by a stream of pending -/// transactions. +/// Determines what kind of new transactions should be emitted by a stream of transactions. /// /// This gives control whether to include transactions that are allowed to be propagated. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum PendingTransactionListenerKind { +pub enum TransactionListenerKind { /// Any new pending transactions All, /// Only transactions that are allowed to be propagated. @@ -340,7 +348,7 @@ pub enum PendingTransactionListenerKind { PropagateOnly, } -impl PendingTransactionListenerKind { +impl TransactionListenerKind { /// Returns true if we're only interested in transactions that are allowed to be propagated. #[inline] pub fn is_propagate_only(&self) -> bool { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index 6503dedc2bf3..e4a2c068e87a 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -2,7 +2,7 @@ use assert_matches::assert_matches; use reth_transaction_pool::{ noop::MockTransactionValidator, test_utils::{testing_pool, testing_pool_with_validator, MockTransactionFactory}, - FullTransactionEvent, PendingTransactionListenerKind, TransactionEvent, TransactionOrigin, + FullTransactionEvent, TransactionEvent, TransactionListenerKind, TransactionOrigin, TransactionPool, }; use std::{future::poll_fn, task::Poll}; @@ -48,8 +48,7 @@ async fn txpool_listener_propagate_only() { let transaction = mock_tx_factory.create_eip1559(); let expected = *transaction.hash(); let mut listener_network = txpool.pending_transactions_listener(); - let mut listener_all = - txpool.pending_transactions_listener_for(PendingTransactionListenerKind::All); + let mut listener_all = txpool.pending_transactions_listener_for(TransactionListenerKind::All); let result = txpool.add_transaction(TransactionOrigin::Local, transaction.transaction.clone()).await; assert!(result.is_ok()); @@ -64,3 +63,27 @@ async fn txpool_listener_propagate_only() { }) .await; } + +#[tokio::test(flavor = "multi_thread")] +async fn txpool_listener_new_propagate_only() { + let txpool = testing_pool_with_validator(MockTransactionValidator::no_propagate_local()); + let mut mock_tx_factory = MockTransactionFactory::default(); + let transaction = mock_tx_factory.create_eip1559(); + let expected = *transaction.hash(); + let mut listener_network = txpool.new_transactions_listener(); + let mut listener_all = txpool.new_transactions_listener_for(TransactionListenerKind::All); + let result = + txpool.add_transaction(TransactionOrigin::Local, transaction.transaction.clone()).await; + assert!(result.is_ok()); + + let inserted = listener_all.recv().await.unwrap(); + let actual = *inserted.transaction.hash(); + assert_eq!(actual, expected); + + poll_fn(|cx| { + // no propagation + assert!(listener_network.poll_recv(cx).is_pending()); + Poll::Ready(()) + }) + .await; +} From 2fc574f32999fbfd966ad137c9b3cfc190e48e48 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Aug 2023 09:25:34 -0700 Subject: [PATCH 546/722] feat: validate blobs (#4388) --- crates/primitives/src/transaction/mod.rs | 32 +++++++++++++++++++++ crates/rpc/rpc/src/eth/error.rs | 10 ++++++- crates/transaction-pool/src/error.rs | 9 +++++- crates/transaction-pool/src/traits.rs | 11 +++++-- crates/transaction-pool/src/validate/eth.rs | 24 ++++++++++++---- 5 files changed, 77 insertions(+), 9 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index bce71de9a0f0..ae420d4f7bce 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -414,6 +414,38 @@ impl Transaction { pub fn is_eip4844(&self) -> bool { matches!(self, Transaction::Eip4844(_)) } + + /// Returns the [TxLegacy] variant if the transaction is a legacy transaction. + pub fn as_legacy(&self) -> Option<&TxLegacy> { + match self { + Transaction::Legacy(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [TxEip2930] variant if the transaction is an EIP-2930 transaction. + pub fn as_eip2830(&self) -> Option<&TxEip2930> { + match self { + Transaction::Eip2930(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [TxEip1559] variant if the transaction is an EIP-1559 transaction. + pub fn as_eip1559(&self) -> Option<&TxEip1559> { + match self { + Transaction::Eip1559(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [TxEip4844] variant if the transaction is an EIP-4844 transaction. + pub fn as_eip4844(&self) -> Option<&TxEip4844> { + match self { + Transaction::Eip4844(tx) => Some(tx), + _ => None, + } + } } impl Compact for Transaction { diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 044f5621266c..ea281c431432 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -5,7 +5,9 @@ use jsonrpsee::{ core::Error as RpcError, types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}, }; -use reth_primitives::{abi::decode_revert_reason, Address, Bytes, U256}; +use reth_primitives::{ + abi::decode_revert_reason, Address, BlobTransactionValidationError, Bytes, U256, +}; use reth_revm::tracing::js::JsInspectorError; use reth_rpc_types::{error::EthRpcErrorCode, BlockError, CallInputError}; use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError, PoolTransactionError}; @@ -473,6 +475,9 @@ pub enum RpcPoolError { /// Unable to find the blob for an EIP4844 transaction #[error("blob not found for EIP4844 transaction")] MissingEip4844Blob, + /// Thrown if validating the blob sidecar for the transaction failed. + #[error(transparent)] + InvalidEip4844Blob(BlobTransactionValidationError), #[error(transparent)] Other(Box), } @@ -512,6 +517,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), InvalidPoolTransactionError::MissingEip4844Blob => RpcPoolError::MissingEip4844Blob, + InvalidPoolTransactionError::InvalidEip4844Blob(err) => { + RpcPoolError::InvalidEip4844Blob(err) + } } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index fd2ca0bf7e46..658f10d324b0 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,6 +1,6 @@ //! Transaction pool errors -use reth_primitives::{Address, InvalidTransactionError, TxHash}; +use reth_primitives::{Address, BlobTransactionValidationError, InvalidTransactionError, TxHash}; /// Transaction pool result type. pub type PoolResult = Result; @@ -141,6 +141,9 @@ pub enum InvalidPoolTransactionError { /// Thrown if we're unable to find the blob for a transaction that was previously extracted #[error("blob not found for EIP4844 transaction")] MissingEip4844Blob, + /// Thrown if validating the blob sidecar for the transaction failed. + #[error(transparent)] + InvalidEip4844Blob(BlobTransactionValidationError), /// Any other error that occurred while inserting/validating that is transaction specific #[error("{0:?}")] Other(Box), @@ -203,6 +206,10 @@ impl InvalidPoolTransactionError { // find the previously extracted blob false } + InvalidPoolTransactionError::InvalidEip4844Blob(_) => { + // This is only reachable when the blob is invalid + true + } } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 53c7d9f711c1..7b94d354db84 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -9,8 +9,8 @@ use reth_primitives::{ Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, - TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - H256, U256, + TransactionKind, TransactionSignedEcRecovered, TxEip4844, TxHash, EIP1559_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, H256, U256, }; use reth_rlp::Encodable; use std::{ @@ -656,6 +656,9 @@ pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; + /// Returns the transaction as EIP-4844 transaction if it is one. + fn as_eip4844(&self) -> Option<&TxEip4844>; + /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( &self, @@ -845,6 +848,10 @@ impl EthPoolTransaction for EthPooledTransaction { } } + fn as_eip4844(&self) -> Option<&TxEip4844> { + self.transaction.as_eip4844() + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index a61fec1ec7f5..785fd0f60298 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -74,7 +74,6 @@ pub(crate) struct EthTransactionValidatorInner { /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, /// Stores the setup and parameters needed for validating KZG proofs. - #[allow(unused)] kzg_settings: Arc, /// Marker for the transaction type _marker: PhantomData, @@ -198,7 +197,7 @@ where } } - let mut blob_sidecar = None; + let mut maybe_blob_sidecar = None; // blob tx checks if transaction.is_eip4844() { @@ -230,8 +229,23 @@ where } } EthBlobTransactionSidecar::Present(blob) => { - //TODO(mattsse): verify the blob - blob_sidecar = Some(blob); + if let Some(eip4844) = transaction.as_eip4844() { + // validate the blob + if let Err(err) = eip4844.validate_blob(&blob, &self.kzg_settings) { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::InvalidEip4844Blob(err), + ) + } + // store the extracted blob + maybe_blob_sidecar = Some(blob); + } else { + // this should not happen + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } } } } @@ -281,7 +295,7 @@ where TransactionValidationOutcome::Valid { balance: account.balance, state_nonce: account.nonce, - transaction: ValidTransaction::new(transaction, blob_sidecar), + transaction: ValidTransaction::new(transaction, maybe_blob_sidecar), // by this point assume all external transactions should be propagated propagate: match origin { TransactionOrigin::External => true, From 80ee49b77708d227be818309a0ac1b99ffb2f57f Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Tue, 29 Aug 2023 18:48:18 +0200 Subject: [PATCH 547/722] fix: complete vmTrace memory delta recording (#4227) Co-authored-by: Matthias Seitz --- .../src/tracing/builder/parity.rs | 87 +++++++++++++++++-- 1 file changed, 81 insertions(+), 6 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index edc9d2b6463e..014aca08e9e6 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -7,7 +7,7 @@ use reth_primitives::{Address, U64}; use reth_rpc_types::{trace::parity::*, TransactionInfo}; use revm::{ db::DatabaseRef, - interpreter::opcode::spec_opcode_gas, + interpreter::opcode::{self, spec_opcode_gas}, primitives::{AccountInfo, ExecutionResult, ResultAndState, SpecId, KECCAK_EMPTY}, }; use std::collections::{HashSet, VecDeque}; @@ -363,16 +363,91 @@ impl ParityTraceBuilder { val: storage_change.value, }); - let maybe_memory = match step.memory.len() { - 0 => None, - _ => { - Some(MemoryDelta { off: step.memory_size, data: step.memory.data().clone().into() }) + let maybe_memory = if step.memory.is_empty() { + None + } else { + Some(MemoryDelta { off: step.memory_size, data: step.memory.data().clone().into() }) + }; + + // Calculate the stack items at this step + let push_stack = { + let step_op = step.op.u8(); + let show_stack: usize; + if (opcode::PUSH0..=opcode::PUSH32).contains(&step_op) { + show_stack = 1; + } else if (opcode::SWAP1..=opcode::SWAP16).contains(&step_op) { + show_stack = (step_op - opcode::SWAP1) as usize + 2; + } else if (opcode::DUP1..=opcode::DUP16).contains(&step_op) { + show_stack = (step_op - opcode::DUP1) as usize + 2; + } else { + show_stack = match step_op { + opcode::CALLDATALOAD | + opcode::SLOAD | + opcode::MLOAD | + opcode::CALLDATASIZE | + opcode::LT | + opcode::GT | + opcode::DIV | + opcode::SDIV | + opcode::SAR | + opcode::AND | + opcode::EQ | + opcode::CALLVALUE | + opcode::ISZERO | + opcode::ADD | + opcode::EXP | + opcode::CALLER | + opcode::SHA3 | + opcode::SUB | + opcode::ADDRESS | + opcode::GAS | + opcode::MUL | + opcode::RETURNDATASIZE | + opcode::NOT | + opcode::SHR | + opcode::SHL | + opcode::EXTCODESIZE | + opcode::SLT | + opcode::OR | + opcode::NUMBER | + opcode::PC | + opcode::TIMESTAMP | + opcode::BALANCE | + opcode::SELFBALANCE | + opcode::MULMOD | + opcode::ADDMOD | + opcode::BASEFEE | + opcode::BLOCKHASH | + opcode::BYTE | + opcode::XOR | + opcode::ORIGIN | + opcode::CODESIZE | + opcode::MOD | + opcode::SIGNEXTEND | + opcode::GASLIMIT | + opcode::DIFFICULTY | + opcode::SGT | + opcode::GASPRICE | + opcode::MSIZE | + opcode::EXTCODEHASH | + opcode::SMOD | + opcode::CHAINID | + opcode::COINBASE => 1, + _ => 0, + } + }; + let mut push_stack = step.push_stack.clone().unwrap_or_default(); + for idx in (0..show_stack).rev() { + if step.stack.len() > idx { + push_stack.push(step.stack.peek(idx).unwrap_or_default()) + } } + push_stack }; let maybe_execution = Some(VmExecutedOperation { used: step.gas_remaining, - push: step.push_stack.clone().unwrap_or_default(), + push: push_stack, mem: maybe_memory, store: maybe_storage, }); From 405795301ed3b123bb52ca41f92926fad4cbaa15 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 29 Aug 2023 18:17:37 +0100 Subject: [PATCH 548/722] chore: pin Grafana version in docker-compose.yml (#4395) --- etc/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 0261711eb227..02caac274d7b 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -41,7 +41,7 @@ services: grafana: restart: unless-stopped - image: grafana/grafana + image: grafana/grafana:10.1.0 depends_on: - reth - prometheus From 5556505e3573302177b64b3606405640c7711d9e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 29 Aug 2023 18:17:45 +0100 Subject: [PATCH 549/722] chore: mention Sepolia changes in Docker Compose files (#4391) --- etc/docker-compose.yml | 9 +++++++-- etc/lighthouse.yml | 5 ++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 02caac274d7b..b97559e34cbb 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -11,11 +11,14 @@ services: - '8545:8545' # rpc - '8551:8551' # engine volumes: - - rethdata:/root/.local/share/reth/mainnet/db + - rethdata_mainnet:/root/.local/share/reth/mainnet/db + - rethdata_sepolia:/root/.local/share/reth/sepolia/db - rethlogs:/root/rethlogs - ./jwttoken:/root/jwt:ro + # For Sepolia, replace `--chain mainnet` with `--chain sepolia` command: > node + --chain mainnet --metrics 0.0.0.0:9001 --log.persistent --log.directory /root/rethlogs @@ -62,7 +65,9 @@ services: /run.sh" volumes: - rethdata: + rethdata_mainnet: + driver: local + rethdata_sepolia: driver: local rethlogs: driver: local diff --git a/etc/lighthouse.yml b/etc/lighthouse.yml index 339ac173f2d6..fc8f00f15d91 100644 --- a/etc/lighthouse.yml +++ b/etc/lighthouse.yml @@ -2,7 +2,6 @@ version: '3.9' name: reth services: - lighthouse: restart: unless-stopped image: sigp/lighthouse @@ -17,8 +16,12 @@ services: volumes: - lighthousedata:/root/.lighthouse - ./jwttoken:/root/jwt:ro + # For Sepolia: + # - Replace `--network mainnet` with `--network sepolia` + # - Use different checkpoint sync URL: `--checkpoint-sync-url https://sepolia.checkpoint-sync.ethpandaops.io` command: > lighthouse bn + --network mainnet --http --http-address 0.0.0.0 --execution-endpoint http://reth:8551 --metrics --metrics-address 0.0.0.0 From 3a1eeee812d3acf82f70f4c4cce65be56db21787 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Aug 2023 10:40:19 -0700 Subject: [PATCH 550/722] perf: no need to keep track of replaced txs (#4394) --- crates/transaction-pool/src/blobstore/mem.rs | 29 ++++++++------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 187b9026f0b3..352ecae61520 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -21,22 +21,16 @@ struct InMemoryBlobStoreInner { } impl InMemoryBlobStoreInner { + #[inline] fn add_size(&self, add: usize) { self.data_size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); } + #[inline] fn sub_size(&self, sub: usize) { self.data_size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); } - fn update_size(&self, add: usize, sub: usize) { - if add > sub { - self.add_size(add - sub); - } else { - self.sub_size(sub - add); - } - } - fn update_len(&self, len: usize) { self.num_blobs.store(len, std::sync::atomic::Ordering::Relaxed); } @@ -45,8 +39,7 @@ impl InMemoryBlobStoreInner { impl BlobStore for InMemoryBlobStore { fn insert(&self, tx: H256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { let mut store = self.inner.store.write(); - let (add, sub) = insert_size(&mut store, tx, data); - self.inner.update_size(add, sub); + self.inner.add_size(insert_size(&mut store, tx, data)); self.inner.update_len(store.len()); Ok(()) } @@ -57,13 +50,11 @@ impl BlobStore for InMemoryBlobStore { } let mut store = self.inner.store.write(); let mut total_add = 0; - let mut total_sub = 0; for (tx, data) in txs { - let (add, sub) = insert_size(&mut store, tx, data); + let add = insert_size(&mut store, tx, data); total_add += add; - total_sub += sub; } - self.inner.update_size(total_add, total_sub); + self.inner.add_size(total_add); self.inner.update_len(store.len()); Ok(()) } @@ -126,14 +117,16 @@ fn remove_size(store: &mut HashMap, tx: &H256) -> store.remove(tx).map(|rem| rem.size()).unwrap_or_default() } -/// Inserts the given blob into the store and returns the size of the blob that was (added,removed) +/// Inserts the given blob into the store and returns the size of the blob that was added +/// +/// We don't need to handle the size updates for replacements because transactions are unique. #[inline] fn insert_size( store: &mut HashMap, tx: H256, blob: BlobTransactionSidecar, -) -> (usize, usize) { +) -> usize { let add = blob.size(); - let sub = store.insert(tx, blob).map(|rem| rem.size()).unwrap_or_default(); - (add, sub) + store.insert(tx, blob).map(|rem| rem.size()); + add } From cb82498526c6d9271edb0662eb15700c23f7eb1c Mon Sep 17 00:00:00 2001 From: rakita Date: Tue, 29 Aug 2023 19:47:41 +0200 Subject: [PATCH 551/722] bump(revm): with fix (#4389) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 777feca5044f..e9dbd9603733 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6185,7 +6185,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "auto_impl", "revm-interpreter", @@ -6195,7 +6195,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "derive_more", "enumn", @@ -6206,7 +6206,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "k256", "num", @@ -6222,7 +6222,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "arbitrary", "auto_impl", From 4a2c1691fc55cabf8debda2e47a56ff6970a4b96 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Aug 2023 10:54:15 -0700 Subject: [PATCH 552/722] chore: rename price bump arg (#4399) --- bin/reth/src/args/txpool_args.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/args/txpool_args.rs b/bin/reth/src/args/txpool_args.rs index 2edfb20af698..51d102a0a6d8 100644 --- a/bin/reth/src/args/txpool_args.rs +++ b/bin/reth/src/args/txpool_args.rs @@ -35,7 +35,7 @@ pub struct TxPoolArgs { pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. - #[arg(long = "txpool.price_bump", help_heading = "TxPool", default_value_t = DEFAULT_PRICE_BUMP)] + #[arg(long = "txpool.pricebump", help_heading = "TxPool", default_value_t = DEFAULT_PRICE_BUMP)] pub price_bump: u128, } From 0c7a93717aa516f34e79ac6bafd8be088be8b7cc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 29 Aug 2023 10:55:13 -0700 Subject: [PATCH 553/722] feat: add EIP-4788 parent_beacon_block_root to Header (#4299) --- crates/consensus/auto-seal/src/lib.rs | 1 + crates/consensus/beacon/src/engine/handle.rs | 8 +- crates/consensus/beacon/src/engine/message.rs | 3 + crates/consensus/beacon/src/engine/mod.rs | 30 +++--- .../consensus/beacon/src/engine/test_utils.rs | 6 +- crates/consensus/common/src/validation.rs | 8 ++ crates/interfaces/src/consensus.rs | 4 + crates/net/eth-wire/src/types/blocks.rs | 4 + crates/payload/basic/src/lib.rs | 2 + crates/primitives/src/header.rs | 98 +++++++++++++------ crates/rpc/rpc-engine-api/src/engine_api.rs | 17 +++- crates/rpc/rpc-engine-api/tests/it/payload.rs | 14 +-- crates/rpc/rpc-types/src/eth/block.rs | 7 ++ .../rpc/rpc-types/src/eth/engine/payload.rs | 77 +++++++-------- crates/rpc/rpc/src/eth/api/pending_block.rs | 1 + testing/ef-tests/src/models.rs | 3 + 16 files changed, 189 insertions(+), 94 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index cadfb5593b7c..f68d1271a7b9 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -277,6 +277,7 @@ impl StorageInner { blob_gas_used: None, excess_blob_gas: None, extra_data: Default::default(), + parent_beacon_block_root: None, }; header.transactions_root = if transactions.is_empty() { diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index d2b8661b6080..176a8cbbf9de 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -5,6 +5,7 @@ use crate::{ BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; use futures::TryFutureExt; +use reth_primitives::H256; use reth_rpc_types::engine::{ ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, }; @@ -34,9 +35,14 @@ impl BeaconConsensusEngineHandle { pub async fn new_payload( &self, payload: ExecutionPayload, + parent_beacon_block_root: Option, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { + payload, + parent_beacon_block_root, + tx, + }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index f894603ab4b4..9e29a07d54bd 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -5,6 +5,7 @@ use crate::{ use futures::{future::Either, FutureExt}; use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::error::PayloadBuilderError; +use reth_primitives::H256; use reth_rpc_types::engine::{ ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, PayloadStatusEnum, @@ -146,6 +147,8 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, + /// The parent beacon block root, if any. + parent_beacon_block_root: Option, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 6f6dfb35dd50..7665d575ce1c 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1049,12 +1049,13 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload), fields(block_hash= ?payload.block_hash, block_number = %payload.block_number.as_u64(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, parent_beacon_block_root), fields(block_hash= ?payload.block_hash, block_number = %payload.block_number.as_u64(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, + parent_beacon_block_root: Option, ) -> Result { - let block = match self.ensure_well_formed_payload(payload) { + let block = match self.ensure_well_formed_payload(payload, parent_beacon_block_root) { Ok(block) => block, Err(status) => return Ok(status), }; @@ -1118,9 +1119,10 @@ where fn ensure_well_formed_payload( &self, payload: ExecutionPayload, + parent_beacon_block_root: Option, ) -> Result { let parent_hash = payload.parent_hash; - let block = match SealedBlock::try_from(payload) { + let block = match payload.try_into_sealed_block(parent_beacon_block_root) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", ?error, "Invalid payload"); @@ -1725,9 +1727,9 @@ where } } } - BeaconEngineMessage::NewPayload { payload, tx } => { + BeaconEngineMessage::NewPayload { payload, parent_beacon_block_root, tx } => { this.metrics.new_payload_messages.increment(1); - let res = this.on_new_payload(payload); + let res = this.on_new_payload(payload, parent_beacon_block_root); let _ = tx.send(res); } BeaconEngineMessage::TransitionConfigurationExchanged => { @@ -1865,7 +1867,7 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(SealedBlock::default().into()).await; + let _ = env.send_new_payload(SealedBlock::default().into(), None).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because pruning is running @@ -2279,14 +2281,16 @@ mod tests { let mut engine_rx = spawn_consensus_engine(consensus_engine); // Send new payload - let res = - env.send_new_payload(random_block(&mut rng, 0, None, None, Some(0)).into()).await; + let res = env + .send_new_payload(random_block(&mut rng, 0, None, None, Some(0)).into(), None) + .await; // Invalid, because this is a genesis block assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); // Send new payload - let res = - env.send_new_payload(random_block(&mut rng, 1, None, None, Some(0)).into()).await; + let res = env + .send_new_payload(random_block(&mut rng, 1, None, None, Some(0)).into(), None) + .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2336,7 +2340,7 @@ mod tests { // Send new payload let result = - env.send_new_payload_retry_on_syncing(block2.clone().into()).await.unwrap(); + env.send_new_payload_retry_on_syncing(block2.clone().into(), None).await.unwrap(); let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) .with_latest_valid_hash(block2.hash); assert_eq!(result, expected_result); @@ -2434,7 +2438,7 @@ mod tests { // Send new payload let block = random_block(&mut rng, 2, Some(H256::random()), None, Some(0)); - let res = env.send_new_payload(block.into()).await; + let res = env.send_new_payload(block.into(), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2497,7 +2501,7 @@ mod tests { // Send new payload let result = - env.send_new_payload_retry_on_syncing(block2.clone().into()).await.unwrap(); + env.send_new_payload_retry_on_syncing(block2.clone().into(), None).await.unwrap(); let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 2aafd028da3f..287cd0f41ceb 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -69,8 +69,9 @@ impl TestEnv { pub async fn send_new_payload( &self, payload: ExecutionPayload, + parent_beacon_block_root: Option, ) -> Result { - self.engine_handle.new_payload(payload).await + self.engine_handle.new_payload(payload, parent_beacon_block_root).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -78,9 +79,10 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing( &self, payload: ExecutionPayload, + parent_beacon_block_root: Option, ) -> Result { loop { - let result = self.send_new_payload(payload.clone()).await?; + let result = self.send_new_payload(payload.clone(), parent_beacon_block_root).await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 2c2bde80fa40..d7feec42c972 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -50,6 +50,8 @@ pub fn validate_header_standalone( return Err(ConsensusError::BlobGasUsedUnexpected) } else if header.excess_blob_gas.is_some() { return Err(ConsensusError::ExcessBlobGasUnexpected) + } else if header.parent_beacon_block_root.is_some() { + return Err(ConsensusError::ParentBeaconBlockRootUnexpected) } Ok(()) @@ -451,6 +453,7 @@ pub fn validate_4844_header_with_parent( /// /// * `blob_gas_used` exists as a header field /// * `excess_blob_gas` exists as a header field +/// * `parent_beacon_block_root` exists as a header field /// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), ConsensusError> { @@ -460,6 +463,10 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons return Err(ConsensusError::ExcessBlobGasMissing) } + if header.parent_beacon_block_root.is_none() { + return Err(ConsensusError::ParentBeaconBlockRootMissing) + } + if blob_gas_used > MAX_DATA_GAS_PER_BLOCK { return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock { blob_gas_used, @@ -633,6 +640,7 @@ mod tests { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }; // size: 0x9b5 diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index 9176bd06d7f2..9274cdcbe52f 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -143,6 +143,10 @@ pub enum ConsensusError { ExcessBlobGasMissing, #[error("Unexpected excess blob gas")] ExcessBlobGasUnexpected, + #[error("Missing parent beacon block root")] + ParentBeaconBlockRootMissing, + #[error("Unexpected parent beacon block root")] + ParentBeaconBlockRootUnexpected, #[error("Blob gas used {blob_gas_used} exceeds maximum allowance {max_blob_gas_per_block}")] BlobGasUsedExceedsMaxBlobGasPerBlock { blob_gas_used: u64, max_blob_gas_per_block: u64 }, #[error( diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index 808c8f4a4609..2b5bfd0207f3 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -260,6 +260,7 @@ mod test { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, ]), }.encode(&mut data); @@ -293,6 +294,7 @@ mod test { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, ]), }; @@ -407,6 +409,7 @@ mod test { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, ], withdrawals: None, @@ -493,6 +496,7 @@ mod test { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, ], withdrawals: None, diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 461325c6f3ab..864a6443d272 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -785,6 +785,7 @@ where extra_data: extra_data.into(), blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }; // seal the block @@ -856,6 +857,7 @@ where blob_gas_used: None, excess_blob_gas: None, extra_data: extra_data.into(), + parent_beacon_block_root: None, }; let block = Block { header, body: vec![], ommers: vec![], withdrawals }; diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index bc6d75194a2b..12f4e3fca82a 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -100,6 +100,9 @@ pub struct Header { /// with above-target blob gas consumption increase this value, blocks with below-target blob /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. pub excess_blob_gas: Option, + /// TODO: Docs + /// This was added in EIP-4788. + pub parent_beacon_block_root: Option, /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or /// fewer; formally Hx. pub extra_data: Bytes, @@ -127,6 +130,7 @@ impl Default for Header { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, } } } @@ -227,6 +231,7 @@ impl Header { mem::size_of::>() + // base fee per gas mem::size_of::>() + // blob gas used mem::size_of::>() + // excess blob gas + mem::size_of::>() + // parent beacon block root self.extra_data.len() // extra data } @@ -252,32 +257,42 @@ impl Header { length += U256::from(base_fee).length(); } else if self.withdrawals_root.is_some() || self.blob_gas_used.is_some() || - self.excess_blob_gas.is_some() + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { - length += 1; // EMPTY STRING CODE + length += 1; // EMPTY LIST CODE } if let Some(root) = self.withdrawals_root { length += root.length(); - } else if self.blob_gas_used.is_some() || self.excess_blob_gas.is_some() { + } else if self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() + { length += 1; // EMPTY STRING CODE } if let Some(blob_gas_used) = self.blob_gas_used { length += U256::from(blob_gas_used).length(); - } else if self.excess_blob_gas.is_some() { - length += 1; // EMPTY STRING CODE + } else if self.excess_blob_gas.is_some() || self.parent_beacon_block_root.is_some() { + length += 1; // EMPTY LIST CODE } - // Encode excess blob gas length. If new fields are added, the above pattern will need to - // be repeated and placeholder length added. Otherwise, it's impossible to tell _which_ - // fields are missing. This is mainly relevant for contrived cases where a header is - // created at random, for example: + if let Some(excess_blob_gas) = self.excess_blob_gas { + length += U256::from(excess_blob_gas).length(); + } else if self.parent_beacon_block_root.is_some() { + length += 1; // EMPTY LIST CODE + } + + // Encode parent beacon block root length. If new fields are added, the above pattern will + // need to be repeated and placeholder length added. Otherwise, it's impossible to + // tell _which_ fields are missing. This is mainly relevant for contrived cases + // where a header is created at random, for example: // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are // post-London, so this is technically not valid. However, a tool like proptest would // generate a block like this. - if let Some(excess_blob_gas) = self.excess_blob_gas { - length += U256::from(excess_blob_gas).length(); + if let Some(parent_beacon_block_root) = self.parent_beacon_block_root { + length += parent_beacon_block_root.length(); } length @@ -305,42 +320,54 @@ impl Encodable for Header { self.mix_hash.encode(out); H64::from_low_u64_be(self.nonce).encode(out); - // Encode base fee. Put empty string if base fee is missing, + // Encode base fee. Put empty list if base fee is missing, // but withdrawals root is present. if let Some(ref base_fee) = self.base_fee_per_gas { U256::from(*base_fee).encode(out); } else if self.withdrawals_root.is_some() || self.blob_gas_used.is_some() || - self.excess_blob_gas.is_some() + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { - out.put_u8(EMPTY_STRING_CODE); + out.put_u8(EMPTY_LIST_CODE); } // Encode withdrawals root. Put empty string if withdrawals root is missing, // but blob gas used is present. if let Some(ref root) = self.withdrawals_root { root.encode(out); - } else if self.blob_gas_used.is_some() || self.excess_blob_gas.is_some() { + } else if self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() + { out.put_u8(EMPTY_STRING_CODE); } - // Encode blob gas used. Put empty string if blob gas used is missing, + // Encode blob gas used. Put empty list if blob gas used is missing, // but excess blob gas is present. if let Some(ref blob_gas_used) = self.blob_gas_used { U256::from(*blob_gas_used).encode(out); - } else if self.excess_blob_gas.is_some() { + } else if self.excess_blob_gas.is_some() || self.parent_beacon_block_root.is_some() { + out.put_u8(EMPTY_LIST_CODE); + } + + // Encode excess blob gas. Put empty list if excess blob gas is missing, + // but parent beacon block root is present. + if let Some(ref excess_blob_gas) = self.excess_blob_gas { + U256::from(*excess_blob_gas).encode(out); + } else if self.parent_beacon_block_root.is_some() { out.put_u8(EMPTY_LIST_CODE); } - // Encode excess blob gas. If new fields are added, the above pattern will need to be - // repeated and placeholders added. Otherwise, it's impossible to tell _which_ fields - // are missing. This is mainly relevant for contrived cases where a header is created - // at random, for example: + // Encode parent beacon block root. If new fields are added, the above pattern will need to + // be repeated and placeholders added. Otherwise, it's impossible to tell _which_ + // fields are missing. This is mainly relevant for contrived cases where a header is + // created at random, for example: // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are // post-London, so this is technically not valid. However, a tool like proptest would // generate a block like this. - if let Some(ref excess_blob_gas) = self.excess_blob_gas { - U256::from(*excess_blob_gas).encode(out); + if let Some(ref parent_beacon_block_root) = self.parent_beacon_block_root { + parent_beacon_block_root.encode(out); } } @@ -379,10 +406,11 @@ impl Decodable for Header { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }; if started_len - buf.len() < rlp_head.payload_length { - if buf.first().map(|b| *b == EMPTY_STRING_CODE).unwrap_or_default() { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { buf.advance(1) } else { this.base_fee_per_gas = Some(U256::decode(buf)?.to::()); @@ -407,15 +435,23 @@ impl Decodable for Header { } } - // Decode excess blob gas. If new fields are added, the above pattern will need to be - // repeated and placeholders decoded. Otherwise, it's impossible to tell _which_ fields are - // missing. This is mainly relevant for contrived cases where a header is created at - // random, for example: + if started_len - buf.len() < rlp_head.payload_length { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { + buf.advance(1) + } else { + this.excess_blob_gas = Some(U256::decode(buf)?.to::()); + } + } + + // Decode parent beacon block root. If new fields are added, the above pattern will need to + // be repeated and placeholders decoded. Otherwise, it's impossible to tell _which_ + // fields are missing. This is mainly relevant for contrived cases where a header is + // created at random, for example: // * A header is created with a withdrawals root, but no base fee. Shanghai blocks are // post-London, so this is technically not valid. However, a tool like proptest would // generate a block like this. if started_len - buf.len() < rlp_head.payload_length { - this.excess_blob_gas = Some(U256::decode(buf)?.to::()); + this.parent_beacon_block_root = Some(H256::decode(buf)?); } let consumed = started_len - buf.len(); @@ -642,6 +678,7 @@ mod ethers_compat { logs_bloom: block.logs_bloom.unwrap_or_default().0.into(), blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, } } } @@ -713,6 +750,7 @@ mod tests { withdrawals_root: None, blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }; assert_eq!(header.hash_slow(), expected_hash); } @@ -836,6 +874,7 @@ mod tests { ), blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), + parent_beacon_block_root: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -892,6 +931,7 @@ mod tests { H256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") .unwrap(), ), + parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), }; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 057b3954ee25..1a6758b86e8d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -75,7 +75,7 @@ where EngineApiMessageVersion::V1, PayloadOrAttributes::from_execution_payload(&payload, None), )?; - Ok(self.inner.beacon_consensus.new_payload(payload).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) } /// See also @@ -87,7 +87,7 @@ where EngineApiMessageVersion::V2, PayloadOrAttributes::from_execution_payload(&payload, None), )?; - Ok(self.inner.beacon_consensus.new_payload(payload).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) } /// See also @@ -103,7 +103,7 @@ where )?; // TODO: validate versioned hashes and figure out what to do with parent_beacon_block_root - Ok(self.inner.beacon_consensus.new_payload(payload).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, Some(parent_beacon_block_root)).await?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -443,6 +443,8 @@ where Ok(EngineApi::new_payload_v2(self, payload).await?) } + /// Handler for `engine_newPayloadV3` + /// See also async fn new_payload_v3( &self, _payload: ExecutionPayload, @@ -517,6 +519,15 @@ where Ok(EngineApi::get_payload_v2(self, payload_id).await?) } + /// Handler for `engine_getPayloadV3` + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. async fn get_payload_v3(&self, _payload_id: PayloadId) -> RpcResult { Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index bc9f1f4249ae..a39b59eea5a7 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -57,7 +57,7 @@ fn payload_validation() { b.header.extra_data = BytesMut::zeroed(32).freeze().into(); b }); - assert_matches!(TryInto::::try_into(block_with_valid_extra_data), Ok(_)); + assert_matches!(block_with_valid_extra_data.try_into_sealed_block(None), Ok(_)); // Invalid extra data let block_with_invalid_extra_data: Bytes = BytesMut::zeroed(33).freeze(); @@ -66,7 +66,7 @@ fn payload_validation() { b }); assert_matches!( - TryInto::::try_into(invalid_extra_data_block), + invalid_extra_data_block.try_into_sealed_block(None), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -76,7 +76,7 @@ fn payload_validation() { b }); assert_matches!( - TryInto::::try_into(block_with_zero_base_fee), + block_with_zero_base_fee.try_into_sealed_block(None), Err(PayloadError::BaseFee(val)) if val == U256::ZERO ); @@ -86,7 +86,7 @@ fn payload_validation() { *tx = Bytes::new().into(); }); assert_matches!( - TryInto::::try_into(payload_with_invalid_txs), + payload_with_invalid_txs.try_into_sealed_block(None), Err(PayloadError::Decode(DecodeError::InputTooShort)) ); @@ -96,7 +96,7 @@ fn payload_validation() { b }); assert_matches!( - TryInto::::try_into(block_with_ommers.clone()), + block_with_ommers.clone().try_into_sealed_block(None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash ); @@ -107,7 +107,7 @@ fn payload_validation() { b }); assert_matches!( - TryInto::::try_into(block_with_difficulty.clone()), + block_with_difficulty.clone().try_into_sealed_block(None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash ); @@ -117,7 +117,7 @@ fn payload_validation() { b }); assert_matches!( - TryInto::::try_into(block_with_nonce.clone()), + block_with_nonce.clone().try_into_sealed_block(None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash ); diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index 5588d55611f6..6db49cb7dee8 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -131,6 +131,9 @@ pub struct Header { /// Excess blob gas #[serde(rename = "excessBlobGas", skip_serializing_if = "Option::is_none")] pub excess_blob_gas: Option, + /// Parent beacon block root + #[serde(rename = "parentBeaconBlockRoot", skip_serializing_if = "Option::is_none")] + pub parent_beacon_block_root: Option, } // === impl Header === @@ -162,6 +165,7 @@ impl Header { withdrawals_root, blob_gas_used, excess_blob_gas, + parent_beacon_block_root, }, hash, } = primitive_header; @@ -187,6 +191,7 @@ impl Header { base_fee_per_gas: base_fee_per_gas.map(U256::from), blob_gas_used: blob_gas_used.map(U64::from), excess_blob_gas: excess_blob_gas.map(U64::from), + parent_beacon_block_root, } } } @@ -318,6 +323,7 @@ mod tests { base_fee_per_gas: Some(U256::from(20)), blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, total_difficulty: Some(U256::from(100000)), uncles: vec![H256::from_low_u64_be(17)], @@ -358,6 +364,7 @@ mod tests { base_fee_per_gas: Some(U256::from(20)), blob_gas_used: None, excess_blob_gas: None, + parent_beacon_block_root: None, }, total_difficulty: Some(U256::from(100000)), uncles: vec![H256::from_low_u64_be(17)], diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 84751e7f7aa2..38d330ff94ef 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -135,27 +135,28 @@ impl From for ExecutionPayload { } } -/// Try to construct a block from given payload. Perform addition validation of `extra_data` and -/// `base_fee_per_gas` fields. -/// -/// NOTE: The log bloom is assumed to be validated during serialization. -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. -/// -/// See -impl TryFrom for SealedBlock { - type Error = PayloadError; - - fn try_from(payload: ExecutionPayload) -> Result { - if payload.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - return Err(PayloadError::ExtraData(payload.extra_data)) +impl ExecutionPayload { + /// Tries to create a new block from the given payload and optional parent beacon block root. + /// Perform additional validation of `extra_data` and `base_fee_per_gas` fields. + /// + /// NOTE: The log bloom is assumed to be validated during serialization. + /// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and + /// comparing the value with `payload.block_hash`. + /// + /// See + pub fn try_into_sealed_block( + self, + parent_beacon_block_root: Option, + ) -> Result { + if self.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + return Err(PayloadError::ExtraData(self.extra_data)) } - if payload.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { - return Err(PayloadError::BaseFee(payload.base_fee_per_gas)) + if self.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { + return Err(PayloadError::BaseFee(self.base_fee_per_gas)) } - let transactions = payload + let transactions = self .transactions .iter() .map(|tx| TransactionSigned::decode(&mut tx.as_ref())) @@ -163,32 +164,30 @@ impl TryFrom for SealedBlock { let transactions_root = proofs::calculate_transaction_root(&transactions); let withdrawals_root = - payload.withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); + self.withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); let header = Header { - parent_hash: payload.parent_hash, - beneficiary: payload.fee_recipient, - state_root: payload.state_root, + parent_hash: self.parent_hash, + beneficiary: self.fee_recipient, + state_root: self.state_root, transactions_root, - receipts_root: payload.receipts_root, + receipts_root: self.receipts_root, withdrawals_root, - logs_bloom: payload.logs_bloom, - number: payload.block_number.as_u64(), - gas_limit: payload.gas_limit.as_u64(), - gas_used: payload.gas_used.as_u64(), - timestamp: payload.timestamp.as_u64(), - mix_hash: payload.prev_randao, + parent_beacon_block_root, + logs_bloom: self.logs_bloom, + number: self.block_number.as_u64(), + gas_limit: self.gas_limit.as_u64(), + gas_used: self.gas_used.as_u64(), + timestamp: self.timestamp.as_u64(), + mix_hash: self.prev_randao, base_fee_per_gas: Some( - payload - .base_fee_per_gas + self.base_fee_per_gas .uint_try_to() - .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?, + .map_err(|_| PayloadError::BaseFee(self.base_fee_per_gas))?, ), - blob_gas_used: payload.blob_gas_used.map(|blob_gas_used| blob_gas_used.as_u64()), - excess_blob_gas: payload - .excess_blob_gas - .map(|excess_blob_gas| excess_blob_gas.as_u64()), - extra_data: payload.extra_data, + blob_gas_used: self.blob_gas_used.map(|blob_gas_used| blob_gas_used.as_u64()), + excess_blob_gas: self.excess_blob_gas.map(|excess_blob_gas| excess_blob_gas.as_u64()), + extra_data: self.extra_data, // Defaults ommers_hash: EMPTY_LIST_HASH, difficulty: Default::default(), @@ -196,17 +195,17 @@ impl TryFrom for SealedBlock { } .seal_slow(); - if payload.block_hash != header.hash() { + if self.block_hash != header.hash() { return Err(PayloadError::BlockHash { execution: header.hash(), - consensus: payload.block_hash, + consensus: self.block_hash, }) } Ok(SealedBlock { header, body: transactions, - withdrawals: payload.withdrawals, + withdrawals: self.withdrawals, ommers: Default::default(), }) } diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index 97285309e6e4..3700de63d6f0 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -145,6 +145,7 @@ impl PendingBlockEnv { blob_gas_used: None, excess_blob_gas: None, extra_data: Default::default(), + parent_beacon_block_root: None, }; // seal the block diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 7276174b4099..5171f4fe9922 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -82,6 +82,8 @@ pub struct Header { pub blob_gas_used: Option, /// Excess blob gas. pub excess_blob_gas: Option, + /// Parent beacon block root. + pub parent_beacon_block_root: Option, } impl From
for SealedHeader { @@ -106,6 +108,7 @@ impl From
for SealedHeader { withdrawals_root: value.withdrawals_root, blob_gas_used: value.blob_gas_used.map(|v| v.0.to::()), excess_blob_gas: value.excess_blob_gas.map(|v| v.0.to::()), + parent_beacon_block_root: value.parent_beacon_block_root, }; header.seal(value.hash) } From 505be4555997790713ca682dcbc08d3b3c509012 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Aug 2023 11:22:28 -0700 Subject: [PATCH 554/722] feat: integrate price bump (#4398) --- bin/reth/src/args/txpool_args.rs | 14 ++++-- crates/primitives/src/transaction/mod.rs | 15 +++---- crates/transaction-pool/src/config.rs | 21 +++++++-- crates/transaction-pool/src/pool/txpool.rs | 52 +++++++++++++--------- 4 files changed, 67 insertions(+), 35 deletions(-) diff --git a/bin/reth/src/args/txpool_args.rs b/bin/reth/src/args/txpool_args.rs index 51d102a0a6d8..0f01f5c42284 100644 --- a/bin/reth/src/args/txpool_args.rs +++ b/bin/reth/src/args/txpool_args.rs @@ -2,8 +2,9 @@ use clap::Args; use reth_transaction_pool::{ - PoolConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, - TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, + TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; /// Parameters for debugging purposes @@ -37,6 +38,10 @@ pub struct TxPoolArgs { /// Price bump (in %) for the transaction pool underpriced check. #[arg(long = "txpool.pricebump", help_heading = "TxPool", default_value_t = DEFAULT_PRICE_BUMP)] pub price_bump: u128, + + /// Price bump percentage to replace an already existing blob transaction + #[arg(long = "blobpool.pricebump", help_heading = "TxPool", default_value_t = REPLACE_BLOB_PRICE_BUMP)] + pub blob_transaction_price_bump: u128, } impl TxPoolArgs { @@ -56,7 +61,10 @@ impl TxPoolArgs { max_size: self.queued_max_size * 1024 * 1024, }, max_account_slots: self.max_account_slots, - price_bump: self.price_bump, + price_bumps: PriceBumpConfig { + default_price_bump: self.price_bump, + replace_blob_tx_price_bump: self.blob_transaction_price_bump, + }, } } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ae420d4f7bce..a47471e96e65 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -5,9 +5,16 @@ use crate::{ pub use access_list::{AccessList, AccessListItem, AccessListWithGasUsed}; use bytes::{Buf, BytesMut}; use derive_more::{AsRef, Deref}; +pub use eip1559::TxEip1559; +pub use eip2930::TxEip2930; +pub use eip4844::{ + BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, TxEip4844, +}; pub use error::InvalidTransactionError; +pub use legacy::TxLegacy; pub use meta::TransactionMeta; use once_cell::sync::Lazy; +pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; use reth_rlp::{Decodable, DecodeError, Encodable, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE}; @@ -18,14 +25,6 @@ pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; -pub use eip1559::TxEip1559; -pub use eip2930::TxEip2930; -pub use eip4844::{ - BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, TxEip4844, -}; -pub use legacy::TxLegacy; -pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; - mod access_list; mod eip1559; mod eip2930; diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index aa6896305b40..2e561e35fbe3 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -1,3 +1,5 @@ +use reth_primitives::EIP4844_TX_TYPE_ID; + /// Guarantees max transactions for one sender, compatible with geth/erigon pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16; @@ -11,6 +13,8 @@ pub const TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT: usize = 20; pub const DEFAULT_PRICE_BUMP: u128 = 10; /// Replace blob price bump (in %) for the transaction pool underpriced check. +/// +/// This enforces that a blob transaction requires a 100% price bump to be replaced pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100; /// Configuration options for the Transaction pool. @@ -25,7 +29,7 @@ pub struct PoolConfig { /// Max number of executable transaction slots guaranteed per account pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. - pub price_bump: u128, + pub price_bumps: PriceBumpConfig, } impl Default for PoolConfig { @@ -35,7 +39,7 @@ impl Default for PoolConfig { basefee_limit: Default::default(), queued_limit: Default::default(), max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, - price_bump: PriceBumpConfig::default().default_price_bump, + price_bumps: Default::default(), } } } @@ -68,7 +72,7 @@ impl Default for SubPoolLimit { } /// Price bump config (in %) for the transaction pool underpriced check. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct PriceBumpConfig { /// Default price bump (in %) for the transaction pool underpriced check. pub default_price_bump: u128, @@ -76,6 +80,17 @@ pub struct PriceBumpConfig { pub replace_blob_tx_price_bump: u128, } +impl PriceBumpConfig { + /// Returns the price bump required to replace the given transaction type. + #[inline] + pub(crate) fn price_bump(&self, tx_type: u8) -> u128 { + if tx_type == EIP4844_TX_TYPE_ID { + return self.replace_blob_tx_price_bump + } + self.default_price_bump + } +} + impl Default for PriceBumpConfig { fn default() -> Self { Self { diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 29a6fa9e9e6c..1b49ea5d7de1 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -13,7 +13,8 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, TransactionOrdering, ValidPoolTransaction, U256, + PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, + ValidPoolTransaction, U256, }; use fnv::FnvHashMap; use reth_primitives::{ @@ -99,7 +100,7 @@ impl TxPool { pending_pool: PendingPool::new(ordering), queued_pool: Default::default(), basefee_pool: Default::default(), - all_transactions: AllTransactions::new(config.max_account_slots), + all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), } @@ -682,12 +683,18 @@ pub(crate) struct AllTransactions { last_seen_block_hash: H256, /// Expected base fee for the pending block. pending_basefee: u64, + /// Configured price bump settings for replacements + price_bumps: PriceBumpConfig, } impl AllTransactions { /// Create a new instance - fn new(max_account_slots: usize) -> Self { - Self { max_account_slots, ..Default::default() } + fn new(config: &PoolConfig) -> Self { + Self { + max_account_slots: config.max_account_slots, + price_bumps: config.price_bumps, + ..Default::default() + } } /// Returns an iterator over all _unique_ hashes in the pool @@ -1031,23 +1038,26 @@ impl AllTransactions { Ok(transaction) } - /// Returns true if `transaction_a` is underpriced compared to `transaction_B`. + /// Returns true if the replacement candidate is underpriced and can't replace the existing + /// transaction. fn is_underpriced( - transaction_a: &ValidPoolTransaction, - transaction_b: &ValidPoolTransaction, - price_bump: u128, + existing_transaction: &ValidPoolTransaction, + maybe_replacement: &ValidPoolTransaction, + price_bumps: &PriceBumpConfig, ) -> bool { - let tx_a_max_priority_fee_per_gas = - transaction_a.transaction.max_priority_fee_per_gas().unwrap_or(0); - let tx_b_max_priority_fee_per_gas = - transaction_b.transaction.max_priority_fee_per_gas().unwrap_or(0); + let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); + + let existing_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); + let replacement_max_priority_fee_per_gas = + existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); - transaction_a.max_fee_per_gas() <= - transaction_b.max_fee_per_gas() * (100 + price_bump) / 100 || - (tx_a_max_priority_fee_per_gas <= - tx_b_max_priority_fee_per_gas * (100 + price_bump) / 100 && - tx_a_max_priority_fee_per_gas != 0 && - tx_b_max_priority_fee_per_gas != 0) + maybe_replacement.max_fee_per_gas() <= + existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 || + (existing_max_priority_fee_per_gas <= + replacement_max_priority_fee_per_gas * (100 + price_bump) / 100 && + existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0) } /// Inserts a new transaction into the pool. @@ -1117,11 +1127,10 @@ impl AllTransactions { Entry::Occupied(mut entry) => { // Transaction already exists // Ensure the new transaction is not underpriced - if Self::is_underpriced( - transaction.as_ref(), entry.get().transaction.as_ref(), - PoolConfig::default().price_bump, + transaction.as_ref(), + &self.price_bumps, ) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, @@ -1257,6 +1266,7 @@ impl Default for AllTransactions { last_seen_block_number: 0, last_seen_block_hash: Default::default(), pending_basefee: Default::default(), + price_bumps: Default::default(), } } } From 82fb0eedb392f6324c316210b524f2bf7fb4c864 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 29 Aug 2023 11:33:51 -0700 Subject: [PATCH 555/722] feat: integrate blobs into the payload builder (#4305) --- Cargo.lock | 1 + crates/payload/basic/src/lib.rs | 61 +++++++++++++++++-- crates/payload/builder/Cargo.toml | 1 + crates/payload/builder/src/error.rs | 4 ++ crates/payload/builder/src/payload.rs | 39 ++++++++++-- crates/rpc/rpc-engine-api/src/engine_api.rs | 22 ++++++- .../rpc/rpc-types/src/eth/engine/payload.rs | 33 +++++++--- crates/transaction-pool/src/lib.rs | 2 +- 8 files changed, 142 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9dbd9603733..9ed01683261d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5751,6 +5751,7 @@ dependencies = [ "reth-revm-primitives", "reth-rlp", "reth-rpc-types", + "reth-transaction-pool", "revm-primitives", "sha2", "thiserror", diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 864a6443d272..1ae9d0ef896a 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -22,7 +22,9 @@ use reth_payload_builder::{ }; use reth_primitives::{ bytes::{Bytes, BytesMut}, + calculate_excess_blob_gas, constants::{ + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION, }, @@ -651,6 +653,7 @@ where let mut post_state = PostState::default(); let mut cumulative_gas_used = 0; + let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); let base_fee = initialized_block_env.basefee.to::(); @@ -679,6 +682,20 @@ where // convert tx to a signed transaction let tx = pool_tx.to_recovered_transaction(); + if let Some(blob_tx) = tx.transaction.as_eip4844() { + let tx_blob_gas = blob_tx.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB; + if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { + // we can't fit this _blob_ transaction into the block, so we mark it as invalid, + // which removes its dependent transactions from the iterator. This is similar to + // the gas limit condition for regular transactions above. + best_txs.mark_invalid(&pool_tx); + continue + } else { + // add to the data gas if we're going to execute the transaction + sum_blob_gas_used += tx_blob_gas; + } + } + // Configure the environment for the block. let env = Env { cfg: initialized_cfg.clone(), @@ -765,6 +782,34 @@ where // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); + // initialize empty blob sidecars at first. If cancun is active then this will + let mut blob_sidecars = Vec::new(); + let mut excess_blob_gas = None; + let mut blob_gas_used = None; + + // only determine cancun fields when active + if chain_spec.is_cancun_activated_at_timestamp(attributes.timestamp) { + // grab the blob sidecars from the executed txs + let blobs = pool.get_all_blobs( + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), + )?; + + // map to just the sidecars + blob_sidecars = blobs.into_iter().map(|(_, sidecars)| sidecars).collect(); + + excess_blob_gas = if chain_spec.is_cancun_activated_at_timestamp(parent_block.timestamp) { + let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) + } else { + // for the first post-fork block, both parent.blob_gas_used and parent.excess_blob_gas + // are evaluated as 0 + Some(calculate_excess_blob_gas(0, 0)) + }; + + blob_gas_used = Some(sum_blob_gas_used); + } + let header = Header { parent_hash: parent_block.hash, ommers_hash: EMPTY_OMMER_ROOT, @@ -783,19 +828,23 @@ where difficulty: U256::ZERO, gas_used: cumulative_gas_used, extra_data: extra_data.into(), - blob_gas_used: None, - excess_blob_gas: None, parent_beacon_block_root: None, + blob_gas_used, + excess_blob_gas, }; // seal the block let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; let sealed_block = block.seal_slow(); - Ok(BuildOutcome::Better { - payload: BuiltPayload::new(attributes.id, sealed_block, total_fees), - cached_reads, - }) + let mut payload = BuiltPayload::new(attributes.id, sealed_block, total_fees); + + if !blob_sidecars.is_empty() { + // extend the payload with the blob sidecars from the executed txs + payload.extend_sidecars(blob_sidecars); + } + + Ok(BuildOutcome::Better { payload, cached_reads }) } /// Builds an empty payload without any transactions. diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 7fe3b992feaa..48a8a3ec6b44 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -13,6 +13,7 @@ description = "reth payload builder" reth-primitives.workspace = true reth-rpc-types.workspace = true reth-rlp.workspace = true +reth-transaction-pool.workspace = true reth-interfaces.workspace = true reth-revm-primitives = { path = "../../revm/revm-primitives" } diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index c357662a1ec6..87c183d424f1 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -1,6 +1,7 @@ //! Error types emitted by types or implementations of this crate. use reth_primitives::H256; +use reth_transaction_pool::BlobStoreError; use revm_primitives::EVMError; use tokio::sync::oneshot; @@ -13,6 +14,9 @@ pub enum PayloadBuilderError { /// An oneshot channels has been closed. #[error("sender has been dropped")] ChannelClosed, + /// Error occurring in the blob store. + #[error(transparent)] + BlobStore(#[from] BlobStoreError), /// Other internal error #[error(transparent)] Internal(#[from] reth_interfaces::Error), diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 0deb6980875b..cb17dc9b8fec 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -1,6 +1,8 @@ //! Contains types required for building a payload. -use reth_primitives::{Address, ChainSpec, Header, SealedBlock, Withdrawal, H256, U256}; +use reth_primitives::{ + Address, BlobTransactionSidecar, ChainSpec, Header, SealedBlock, Withdrawal, H256, U256, +}; use reth_revm_primitives::config::revm_spec_by_timestamp_after_merge; use reth_rlp::Encodable; use reth_rpc_types::engine::{ @@ -21,6 +23,9 @@ pub struct BuiltPayload { pub(crate) block: SealedBlock, /// The fees of the block pub(crate) fees: U256, + /// The blobs, proofs, and commitments in the block. If the block is pre-cancun, this will be + /// empty. + pub(crate) sidecars: Vec, } // === impl BuiltPayload === @@ -28,7 +33,7 @@ pub struct BuiltPayload { impl BuiltPayload { /// Initializes the payload with the given initial block. pub fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self { - Self { id, block, fees } + Self { id, block, fees, sidecars: Vec::new() } } /// Returns the identifier of the payload. @@ -46,6 +51,11 @@ impl BuiltPayload { self.fees } + /// Adds sidecars to the payload. + pub fn extend_sidecars(&mut self, sidecars: Vec) { + self.sidecars.extend(sidecars) + } + /// Converts the type into the response expected by `engine_getPayloadV1` pub fn into_v1_payload(self) -> ExecutionPayload { self.into() @@ -53,6 +63,14 @@ impl BuiltPayload { /// Converts the type into the response expected by `engine_getPayloadV2` pub fn into_v2_payload(self) -> ExecutionPayloadEnvelope { + let mut envelope: ExecutionPayloadEnvelope = self.into(); + envelope.blobs_bundle = None; + envelope.should_override_builder = None; + envelope + } + + /// Converts the type into the response expected by `engine_getPayloadV2` + pub fn into_v3_payload(self) -> ExecutionPayloadEnvelope { self.into() } } @@ -65,14 +83,27 @@ impl From for ExecutionPayload { } // V2 engine_getPayloadV2 response +// TODO(rjected): we could improve this by wrapping envelope / payload types by version, so we can +// have explicitly versioned return types for getPayload. Then BuiltPayload could essentially be a +// builder for those types, and it would not be possible to e.g. return cancun fields for a +// pre-cancun endpoint. impl From for ExecutionPayloadEnvelope { fn from(value: BuiltPayload) -> Self { - let BuiltPayload { block, fees, .. } = value; + let BuiltPayload { block, fees, sidecars, .. } = value; ExecutionPayloadEnvelope { block_value: fees, payload: block.into(), - should_override_builder: None, + // From the engine API spec: + // + // > Client software **MAY** use any heuristics to decide whether to set + // `shouldOverrideBuilder` flag or not. If client software does not implement any + // heuristic this flag **SHOULD** be set to `false`. + // + // Spec: + // + should_override_builder: Some(false), + blobs_bundle: Some(sidecars.into()), } } } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 1a6758b86e8d..6a9afef4362f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -180,7 +180,7 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - async fn get_payload_v2( + pub async fn get_payload_v2( &self, payload_id: PayloadId, ) -> EngineApiResult { @@ -193,6 +193,26 @@ where .map(|payload| (*payload).clone().into_v2_payload())?) } + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + pub async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> EngineApiResult { + Ok(self + .inner + .payload_store + .resolve(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)? + .map(|payload| (*payload).clone().into_v3_payload())?) + } + /// Returns the execution payload bodies by the range starting at `start`, containing `count` /// blocks. /// diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 38d330ff94ef..fa8cae18422e 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -1,8 +1,9 @@ use reth_primitives::{ constants::{MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, + kzg::{Blob, Bytes48}, proofs::{self, EMPTY_LIST_HASH}, - Address, Block, Bloom, Bytes, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, - H256, H64, U256, U64, + Address, BlobTransactionSidecar, Block, Bloom, Bytes, Header, SealedBlock, TransactionSigned, + UintTryTo, Withdrawal, H256, H64, U256, U64, }; use reth_rlp::Decodable; use serde::{ser::SerializeMap, Deserialize, Serialize, Serializer}; @@ -49,10 +50,9 @@ pub struct ExecutionPayloadEnvelope { /// The expected value to be received by the feeRecipient in wei #[serde(rename = "blockValue")] pub block_value: U256, - // - // // TODO(mattsse): for V3 - // #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] - // pub blobs_bundle: Option, + /// The blobs, commitments, and proofs associated with the executed payload. + #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] + pub blobs_bundle: Option, /// Introduced in V3, this represents a suggestion from the execution layer if the payload /// should be used instead of an externally provided one. #[serde(rename = "shouldOverrideBuilder", skip_serializing_if = "Option::is_none")] @@ -214,9 +214,24 @@ impl ExecutionPayload { /// This includes all bundled blob related data of an executed payload. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BlobsBundleV1 { - pub commitments: Vec, - pub proofs: Vec, - pub blobs: Vec, + pub commitments: Vec, + pub proofs: Vec, + pub blobs: Vec, +} + +impl From> for BlobsBundleV1 { + fn from(sidecars: Vec) -> Self { + let (commitments, proofs, blobs) = sidecars.into_iter().fold( + (Vec::new(), Vec::new(), Vec::new()), + |(mut commitments, mut proofs, mut blobs), sidecar| { + commitments.extend(sidecar.commitments); + proofs.extend(sidecar.proofs); + blobs.extend(sidecar.blobs); + (commitments, proofs, blobs) + }, + ); + Self { commitments, proofs, blobs } + } } /// Error that can occur when handling payloads. diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 73db0fd23858..cdf928024fdf 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -165,8 +165,8 @@ use std::{ use tokio::sync::mpsc::Receiver; use tracing::{instrument, trace}; -use crate::blobstore::{BlobStore, BlobStoreError}; pub use crate::{ + blobstore::{BlobStore, BlobStoreError}, config::{ PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, From 3ffcae360e043d0585112e7dd02a5f914db4ed71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Gonz=C3=A1lez?= Date: Wed, 30 Aug 2023 02:41:29 +0200 Subject: [PATCH 556/722] feat(cli): allow overriding kzg trusted setup (#4335) --- bin/reth/src/cli/mod.rs | 9 +++++ bin/reth/src/node/mod.rs | 43 ++++++++++++++------- crates/primitives/src/constants/eip4844.rs | 4 +- crates/transaction-pool/src/validate/eth.rs | 4 +- 4 files changed, 42 insertions(+), 18 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index ea2624f2ef64..d63ed726366c 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -301,4 +301,13 @@ mod tests { let log_dir = reth.logs.log_directory; assert!(log_dir.as_ref().ends_with("reth/logs/sepolia"), "{:?}", log_dir); } + + #[test] + fn override_trusted_setup_file() { + // We already have a test that asserts that this has been initialized, + // so we cheat a little bit and check that loading a random file errors. + let reth = Cli::<()>::try_parse_from(["reth", "node", "--trusted-setup-file", "README.md"]) + .unwrap(); + assert!(reth.run().is_err()); + } } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index bfdd6a711cbb..36d9c01fd1a7 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -44,8 +44,10 @@ use reth_interfaces::{ use reth_network::{error::NetworkError, NetworkConfig, NetworkHandle, NetworkManager}; use reth_network_api::NetworkInfo; use reth_primitives::{ - stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, - SealedHeader, H256, + constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, + kzg::KzgSettings, + stage::StageId, + BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, SealedHeader, H256, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions, @@ -122,6 +124,10 @@ pub struct NodeCommand { #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] pub metrics: Option, + /// Overrides the KZG trusted setup by reading from the supplied file. + #[arg(long, value_name = "PATH")] + trusted_setup_file: Option, + /// All networking related arguments #[clap(flatten)] pub network: NetworkArgs, @@ -167,6 +173,7 @@ impl NodeCommand { config, chain, metrics, + trusted_setup_file, network, rpc, txpool, @@ -182,6 +189,7 @@ impl NodeCommand { config, chain, metrics, + trusted_setup_file, network, rpc, txpool, @@ -262,19 +270,14 @@ impl NodeCommand { // setup the blockchain provider let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?; - let blob_store = InMemoryBlobStore::default(); - let transaction_pool = reth_transaction_pool::Pool::eth_pool( - TransactionValidationTaskExecutor::eth_with_additional_tasks( - blockchain_db.clone(), - Arc::clone(&self.chain), - blob_store.clone(), - ctx.task_executor.clone(), - 1, - ), - blob_store, - self.txpool.pool_config(), - ); + let validator = TransactionValidationTaskExecutor::eth_builder(Arc::clone(&self.chain)) + .kzg_settings(self.kzg_settings()?) + .with_additional_tasks(1) + .build_with_tasks(blockchain_db.clone(), ctx.task_executor.clone(), blob_store.clone()); + + let transaction_pool = + reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.txpool.pool_config()); info!(target: "reth::cli", "Transaction pool initialized"); // spawn txpool maintenance task @@ -566,6 +569,18 @@ impl NodeCommand { .wrap_err_with(|| format!("Could not load config file {:?}", config_path)) } + /// Loads the trusted setup params from a given file path or falls back to + /// `MAINNET_KZG_TRUSTED_SETUP`. + fn kzg_settings(&self) -> eyre::Result> { + if let Some(ref trusted_setup_file) = self.trusted_setup_file { + let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file.into()) + .map_err(LoadKzgSettingsError::KzgError)?; + Ok(Arc::new(trusted_setup)) + } else { + Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + } + } + fn init_trusted_nodes(&self, config: &mut Config) { config.peers.connect_trusted_nodes_only = self.network.trusted_only; diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 69f6cbce76bf..2cde1da77520 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -35,7 +35,7 @@ pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; const TRUSTED_SETUP_RAW: &[u8] = include_bytes!("../../res/eip4844/trusted_setup.txt"); /// KZG trusted setup -pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { +pub static MAINNET_KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { Arc::new( load_trusted_setup_from_bytes(TRUSTED_SETUP_RAW).expect("Failed to load trusted setup"), ) @@ -69,6 +69,6 @@ mod tests { #[test] fn ensure_load_kzg_settings() { - let _settings = Arc::clone(&KZG_TRUSTED_SETUP); + let _settings = Arc::clone(&MAINNET_KZG_TRUSTED_SETUP); } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 785fd0f60298..223149b07329 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -9,7 +9,7 @@ use crate::{ TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ - constants::{eip4844::KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, + constants::{eip4844::MAINNET_KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, kzg::KzgSettings, ChainSpec, InvalidTransactionError, SealedBlock, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -356,7 +356,7 @@ impl EthTransactionValidatorBuilder { additional_tasks: 1, // default to true, can potentially take this as a param in the future propagate_local_transactions: true, - kzg_settings: Arc::clone(&KZG_TRUSTED_SETUP), + kzg_settings: Arc::clone(&MAINNET_KZG_TRUSTED_SETUP), // by default all transaction types are allowed eip2718: true, From d66eff1f7627f1c1270c54d752b64c30e24a90b9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 29 Aug 2023 18:53:24 -0700 Subject: [PATCH 557/722] feat: add holesky bootnodes (#4404) --- crates/primitives/src/lib.rs | 4 ++-- crates/primitives/src/net.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ffbc27a1718c..a1cfdfa274cb 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -76,8 +76,8 @@ pub use hex_bytes::Bytes; pub use integer_list::IntegerList; pub use log::Log; pub use net::{ - goerli_nodes, mainnet_nodes, sepolia_nodes, NodeRecord, GOERLI_BOOTNODES, MAINNET_BOOTNODES, - SEPOLIA_BOOTNODES, + goerli_nodes, holesky_nodes, mainnet_nodes, sepolia_nodes, NodeRecord, GOERLI_BOOTNODES, + HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; pub use prune::{ diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index 327544713537..b6b3d4f37f98 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -150,6 +150,12 @@ pub static GOERLI_BOOTNODES : [&str; 7] = [ "enode://d2b720352e8216c9efc470091aa91ddafc53e222b32780f505c817ceef69e01d5b0b0797b69db254c586f493872352f5a022b4d8479a00fc92ec55f9ad46a27e@88.99.70.182:30303", ]; +/// Ethereum Foundation Holesky BOOTNODES +pub static HOLESKY_BOOTNODES : [&str; 2] = [ + "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", + "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", +]; + /// Returns parsed mainnet nodes pub fn mainnet_nodes() -> Vec { parse_nodes(&MAINNET_BOOTNODES[..]) @@ -165,6 +171,11 @@ pub fn sepolia_nodes() -> Vec { parse_nodes(&SEPOLIA_BOOTNODES[..]) } +/// Returns parsed holesky nodes +pub fn holesky_nodes() -> Vec { + parse_nodes(&HOLESKY_BOOTNODES[..]) +} + /// Parses all the nodes fn parse_nodes(nodes: impl IntoIterator>) -> Vec { nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() From e576c007e3d8f7e462ffb65781ade8be382a016b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 29 Aug 2023 19:28:58 -0700 Subject: [PATCH 558/722] chore: introduce versioned `ExecutionPayload` (#4400) --- crates/consensus/beacon/src/engine/mod.rs | 34 +- .../consensus/beacon/src/engine/test_utils.rs | 11 +- crates/payload/builder/src/payload.rs | 34 +- crates/rpc/rpc-api/src/engine.rs | 17 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 63 ++- crates/rpc/rpc-engine-api/src/payload.rs | 8 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 15 +- .../rpc/rpc-types/src/eth/engine/payload.rs | 498 ++++++++++++++---- 8 files changed, 512 insertions(+), 168 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7665d575ce1c..c6f7cadfe5e7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1049,7 +1049,7 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, parent_beacon_block_root), fields(block_hash= ?payload.block_hash, block_number = %payload.block_number.as_u64(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, parent_beacon_block_root), fields(block_hash= ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, @@ -1121,7 +1121,7 @@ where payload: ExecutionPayload, parent_beacon_block_root: Option, ) -> Result { - let parent_hash = payload.parent_hash; + let parent_hash = payload.parent_hash(); let block = match payload.try_into_sealed_block(parent_beacon_block_root) { Ok(block) => block, Err(error) => { @@ -1807,7 +1807,9 @@ mod tests { use assert_matches::assert_matches; use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; - use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; + use reth_rpc_types::engine::{ + ExecutionPayloadV1, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + }; use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc, time::Duration}; use tokio::sync::oneshot::error::TryRecvError; @@ -1867,7 +1869,7 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(SealedBlock::default().into(), None).await; + let _ = env.send_new_payload(ExecutionPayloadV1::from(SealedBlock::default()), None).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because pruning is running @@ -2282,14 +2284,20 @@ mod tests { // Send new payload let res = env - .send_new_payload(random_block(&mut rng, 0, None, None, Some(0)).into(), None) + .send_new_payload( + ExecutionPayloadV1::from(random_block(&mut rng, 0, None, None, Some(0))), + None, + ) .await; // Invalid, because this is a genesis block assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); // Send new payload let res = env - .send_new_payload(random_block(&mut rng, 1, None, None, Some(0)).into(), None) + .send_new_payload( + ExecutionPayloadV1::from(random_block(&mut rng, 1, None, None, Some(0))), + None, + ) .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2339,8 +2347,10 @@ mod tests { assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); // Send new payload - let result = - env.send_new_payload_retry_on_syncing(block2.clone().into(), None).await.unwrap(); + let result = env + .send_new_payload_retry_on_syncing(ExecutionPayloadV1::from(block2.clone()), None) + .await + .unwrap(); let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) .with_latest_valid_hash(block2.hash); assert_eq!(result, expected_result); @@ -2438,7 +2448,7 @@ mod tests { // Send new payload let block = random_block(&mut rng, 2, Some(H256::random()), None, Some(0)); - let res = env.send_new_payload(block.into(), None).await; + let res = env.send_new_payload(ExecutionPayloadV1::from(block), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2500,8 +2510,10 @@ mod tests { assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); // Send new payload - let result = - env.send_new_payload_retry_on_syncing(block2.clone().into(), None).await.unwrap(); + let result = env + .send_new_payload_retry_on_syncing(ExecutionPayloadV1::from(block2.clone()), None) + .await + .unwrap(); let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 287cd0f41ceb..c9c76a786f39 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -66,21 +66,22 @@ impl TestEnv { Self { db, tip_rx, engine_handle } } - pub async fn send_new_payload( + pub async fn send_new_payload>( &self, - payload: ExecutionPayload, + payload: T, parent_beacon_block_root: Option, ) -> Result { - self.engine_handle.new_payload(payload, parent_beacon_block_root).await + self.engine_handle.new_payload(payload.into(), parent_beacon_block_root).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine /// is syncing. - pub async fn send_new_payload_retry_on_syncing( + pub async fn send_new_payload_retry_on_syncing>( &self, - payload: ExecutionPayload, + payload: T, parent_beacon_block_root: Option, ) -> Result { + let payload: ExecutionPayload = payload.into(); loop { let result = self.send_new_payload(payload.clone(), parent_beacon_block_root).await?; if !result.is_syncing() { diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index cb17dc9b8fec..c5e199ef718e 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -6,7 +6,8 @@ use reth_primitives::{ use reth_revm_primitives::config::revm_spec_by_timestamp_after_merge; use reth_rlp::Encodable; use reth_rpc_types::engine::{ - ExecutionPayload, ExecutionPayloadEnvelope, PayloadAttributes, PayloadId, + ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, PayloadAttributes, + PayloadId, }; use revm_primitives::{BlockEnv, CfgEnv}; @@ -57,26 +58,23 @@ impl BuiltPayload { } /// Converts the type into the response expected by `engine_getPayloadV1` - pub fn into_v1_payload(self) -> ExecutionPayload { + pub fn into_v1_payload(self) -> ExecutionPayloadV1 { self.into() } /// Converts the type into the response expected by `engine_getPayloadV2` - pub fn into_v2_payload(self) -> ExecutionPayloadEnvelope { - let mut envelope: ExecutionPayloadEnvelope = self.into(); - envelope.blobs_bundle = None; - envelope.should_override_builder = None; - envelope + pub fn into_v2_payload(self) -> ExecutionPayloadEnvelopeV2 { + self.into() } /// Converts the type into the response expected by `engine_getPayloadV2` - pub fn into_v3_payload(self) -> ExecutionPayloadEnvelope { + pub fn into_v3_payload(self) -> ExecutionPayloadEnvelopeV3 { self.into() } } // V1 engine_getPayloadV1 response -impl From for ExecutionPayload { +impl From for ExecutionPayloadV1 { fn from(value: BuiltPayload) -> Self { value.block.into() } @@ -87,13 +85,21 @@ impl From for ExecutionPayload { // have explicitly versioned return types for getPayload. Then BuiltPayload could essentially be a // builder for those types, and it would not be possible to e.g. return cancun fields for a // pre-cancun endpoint. -impl From for ExecutionPayloadEnvelope { +impl From for ExecutionPayloadEnvelopeV2 { + fn from(value: BuiltPayload) -> Self { + let BuiltPayload { block, fees, .. } = value; + + ExecutionPayloadEnvelopeV2 { block_value: fees, execution_payload: block.into() } + } +} + +impl From for ExecutionPayloadEnvelopeV3 { fn from(value: BuiltPayload) -> Self { let BuiltPayload { block, fees, sidecars, .. } = value; - ExecutionPayloadEnvelope { + ExecutionPayloadEnvelopeV3 { + payload_inner: block.into(), block_value: fees, - payload: block.into(), // From the engine API spec: // // > Client software **MAY** use any heuristics to decide whether to set @@ -102,8 +108,8 @@ impl From for ExecutionPayloadEnvelope { // // Spec: // - should_override_builder: Some(false), - blobs_bundle: Some(sidecars.into()), + should_override_builder: false, + blobs_bundle: sidecars.into(), } } } diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 9905957e91da..0fc169e62658 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -2,8 +2,9 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, H256, U256, U64}; use reth_rpc_types::{ engine::{ - ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelope, ForkchoiceState, - ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, + ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, + ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, + PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, BlockOverrides, CallRequest, Filter, Log, RichBlock, SyncStatus, @@ -15,11 +16,11 @@ pub trait EngineApi { /// See also /// Caution: This should not accept the `withdrawals` field #[method(name = "newPayloadV1")] - async fn new_payload_v1(&self, payload: ExecutionPayload) -> RpcResult; + async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult; /// See also #[method(name = "newPayloadV2")] - async fn new_payload_v2(&self, payload: ExecutionPayload) -> RpcResult; + async fn new_payload_v2(&self, payload: ExecutionPayloadV1) -> RpcResult; /// Post Cancun payload handler /// @@ -27,7 +28,7 @@ pub trait EngineApi { #[method(name = "newPayloadV3")] async fn new_payload_v3( &self, - payload: ExecutionPayload, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: H256, ) -> RpcResult; @@ -70,7 +71,7 @@ pub trait EngineApi { /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV1")] - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; /// See also /// @@ -78,7 +79,7 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV2")] - async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; /// Post Cancun payload handler which also returns a blobs bundle. /// @@ -88,7 +89,7 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV3")] - async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; /// See also #[method(name = "getPayloadBodiesByHashV1")] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 6a9afef4362f..cea1003ed0ee 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -10,7 +10,8 @@ use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hard use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ - ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelope, ForkchoiceUpdated, + ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, + ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_tasks::TaskSpawner; @@ -69,38 +70,36 @@ where /// Caution: This should not accept the `withdrawals` field pub async fn new_payload_v1( &self, - payload: ExecutionPayload, + payload: ExecutionPayloadV1, ) -> EngineApiResult { - self.validate_version_specific_fields( - EngineApiMessageVersion::V1, - PayloadOrAttributes::from_execution_payload(&payload, None), - )?; + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = PayloadOrAttributes::from_execution_payload(&payload, None); + self.validate_version_specific_fields(EngineApiMessageVersion::V1, &payload_or_attrs)?; Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) } /// See also pub async fn new_payload_v2( &self, - payload: ExecutionPayload, + payload: ExecutionPayloadV1, ) -> EngineApiResult { - self.validate_version_specific_fields( - EngineApiMessageVersion::V2, - PayloadOrAttributes::from_execution_payload(&payload, None), - )?; + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = PayloadOrAttributes::from_execution_payload(&payload, None); + self.validate_version_specific_fields(EngineApiMessageVersion::V2, &payload_or_attrs)?; Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) } /// See also pub async fn new_payload_v3( &self, - payload: ExecutionPayload, + payload: ExecutionPayloadV1, _versioned_hashes: Vec, parent_beacon_block_root: H256, ) -> EngineApiResult { - self.validate_version_specific_fields( - EngineApiMessageVersion::V3, - PayloadOrAttributes::from_execution_payload(&payload, Some(parent_beacon_block_root)), - )?; + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = + PayloadOrAttributes::from_execution_payload(&payload, Some(parent_beacon_block_root)); + self.validate_version_specific_fields(EngineApiMessageVersion::V3, &payload_or_attrs)?; // TODO: validate versioned hashes and figure out what to do with parent_beacon_block_root Ok(self.inner.beacon_consensus.new_payload(payload, Some(parent_beacon_block_root)).await?) @@ -118,7 +117,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V1, attrs.into())?; + self.validate_version_specific_fields(EngineApiMessageVersion::V1, &attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } @@ -133,7 +132,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V2, attrs.into())?; + self.validate_version_specific_fields(EngineApiMessageVersion::V2, &attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) } @@ -148,7 +147,7 @@ where payload_attrs: Option, ) -> EngineApiResult { if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V3, attrs.into())?; + self.validate_version_specific_fields(EngineApiMessageVersion::V3, &attrs.into())?; } Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) @@ -163,7 +162,10 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - pub async fn get_payload_v1(&self, payload_id: PayloadId) -> EngineApiResult { + pub async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> EngineApiResult { Ok(self .inner .payload_store @@ -183,7 +185,7 @@ where pub async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { Ok(self .inner .payload_store @@ -203,7 +205,7 @@ where pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { Ok(self .inner .payload_store @@ -428,7 +430,7 @@ where fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_>, + payload_or_attrs: &PayloadOrAttributes<'_>, ) -> EngineApiResult<()> { self.validate_withdrawals_presence( version, @@ -451,14 +453,14 @@ where /// Handler for `engine_newPayloadV1` /// See also /// Caution: This should not accept the `withdrawals` field - async fn new_payload_v1(&self, payload: ExecutionPayload) -> RpcResult { + async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); Ok(EngineApi::new_payload_v1(self, payload).await?) } /// Handler for `engine_newPayloadV2` /// See also - async fn new_payload_v2(&self, payload: ExecutionPayload) -> RpcResult { + async fn new_payload_v2(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); Ok(EngineApi::new_payload_v2(self, payload).await?) } @@ -467,7 +469,7 @@ where /// See also async fn new_payload_v3( &self, - _payload: ExecutionPayload, + _payload: ExecutionPayloadV3, _versioned_hashes: Vec, _parent_beacon_block_root: H256, ) -> RpcResult { @@ -520,7 +522,7 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult { + async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); Ok(EngineApi::get_payload_v1(self, payload_id).await?) } @@ -534,7 +536,7 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult { + async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); Ok(EngineApi::get_payload_v2(self, payload_id).await?) } @@ -548,7 +550,10 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - async fn get_payload_v3(&self, _payload_id: PayloadId) -> RpcResult { + async fn get_payload_v3( + &self, + _payload_id: PayloadId, + ) -> RpcResult { Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) } diff --git a/crates/rpc/rpc-engine-api/src/payload.rs b/crates/rpc/rpc-engine-api/src/payload.rs index 95db05a3f7a8..f738f6ef2695 100644 --- a/crates/rpc/rpc-engine-api/src/payload.rs +++ b/crates/rpc/rpc-engine-api/src/payload.rs @@ -25,17 +25,17 @@ impl<'a> PayloadOrAttributes<'a> { } /// Return the withdrawals for the payload or attributes. - pub(crate) fn withdrawals(&self) -> &Option> { + pub(crate) fn withdrawals(&self) -> Option<&Vec> { match self { - Self::ExecutionPayload { payload, .. } => &payload.withdrawals, - Self::PayloadAttributes(attributes) => &attributes.withdrawals, + Self::ExecutionPayload { payload, .. } => payload.withdrawals(), + Self::PayloadAttributes(attributes) => attributes.withdrawals.as_ref(), } } /// Return the timestamp for the payload or attributes. pub(crate) fn timestamp(&self) -> u64 { match self { - Self::ExecutionPayload { payload, .. } => payload.timestamp.as_u64(), + Self::ExecutionPayload { payload, .. } => payload.timestamp(), Self::PayloadAttributes(attributes) => attributes.timestamp.as_u64(), } } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index a39b59eea5a7..1dcac070253b 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -10,7 +10,9 @@ use reth_primitives::{ Block, SealedBlock, TransactionSigned, H256, U256, }; use reth_rlp::{Decodable, DecodeError}; -use reth_rpc_types::engine::{ExecutionPayload, ExecutionPayloadBodyV1, PayloadError}; +use reth_rpc_types::engine::{ + ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, +}; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { let unsealed = src.unseal(); @@ -81,12 +83,13 @@ fn payload_validation() { ); // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayload = block.clone().into(); + let mut payload_with_invalid_txs: ExecutionPayloadV1 = block.clone().into(); payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { *tx = Bytes::new().into(); }); + let payload_with_invalid_txs = Block::try_from(payload_with_invalid_txs); assert_matches!( - payload_with_invalid_txs.try_into_sealed_block(None), + payload_with_invalid_txs, Err(PayloadError::Decode(DecodeError::InputTooShort)) ); @@ -98,7 +101,7 @@ fn payload_validation() { assert_matches!( block_with_ommers.clone().try_into_sealed_block(None), Err(PayloadError::BlockHash { consensus, .. }) - if consensus == block_with_ommers.block_hash + if consensus == block_with_ommers.block_hash() ); // None zero difficulty @@ -108,7 +111,7 @@ fn payload_validation() { }); assert_matches!( block_with_difficulty.clone().try_into_sealed_block(None), - Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash + Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() ); // None zero nonce @@ -118,7 +121,7 @@ fn payload_validation() { }); assert_matches!( block_with_nonce.clone().try_into_sealed_block(None), - Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash + Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() ); // Valid block diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index fa8cae18422e..7749ce41b40e 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -30,14 +30,55 @@ impl std::fmt::Display for PayloadId { } } +/// This represents the `executionPayload` field in the return value of `engine_getPayloadV2`, +/// specified as: +/// +/// - `executionPayload`: `ExecutionPayloadV1` | `ExecutionPayloadV2` where: +/// - `ExecutionPayloadV1` **MUST** be returned if the payload `timestamp` is lower than the +/// Shanghai timestamp +/// - `ExecutionPayloadV2` **MUST** be returned if the payload `timestamp` is greater or equal +/// to the Shanghai timestamp +/// +/// See: +/// +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ExecutionPayloadFieldV2 { + /// V1 payload + V1(ExecutionPayloadV1), + /// V2 payload + V2(ExecutionPayloadV2), +} + +impl ExecutionPayloadFieldV2 { + /// Returns the inner [ExecutionPayloadV1] + pub fn into_v1_payload(self) -> ExecutionPayloadV1 { + match self { + ExecutionPayloadFieldV2::V1(payload) => payload, + ExecutionPayloadFieldV2::V2(payload) => payload.payload_inner, + } + } +} + +impl From for ExecutionPayloadFieldV2 { + fn from(value: SealedBlock) -> Self { + // if there are withdrawals, return V2 + if value.withdrawals.is_some() { + ExecutionPayloadFieldV2::V2(value.into()) + } else { + ExecutionPayloadFieldV2::V1(value.into()) + } + } +} + /// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for -/// both V2 and V3. +/// V2. /// /// See also: /// -/// #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadEnvelope { +#[serde(rename_all = "camelCase")] +pub struct ExecutionPayloadEnvelopeV2 { /// Execution payload, which could be either V1 or V2 /// /// V1 (_NO_ withdrawals) MUST be returned if the payload timestamp is lower than the Shanghai @@ -45,35 +86,44 @@ pub struct ExecutionPayloadEnvelope { /// /// V2 (_WITH_ withdrawals) MUST be returned if the payload timestamp is greater or equal to /// the Shanghai timestamp - #[serde(rename = "executionPayload")] - pub payload: ExecutionPayload, + pub execution_payload: ExecutionPayloadFieldV2, /// The expected value to be received by the feeRecipient in wei - #[serde(rename = "blockValue")] pub block_value: U256, - /// The blobs, commitments, and proofs associated with the executed payload. - #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] - pub blobs_bundle: Option, - /// Introduced in V3, this represents a suggestion from the execution layer if the payload - /// should be used instead of an externally provided one. - #[serde(rename = "shouldOverrideBuilder", skip_serializing_if = "Option::is_none")] - pub should_override_builder: Option, } -impl ExecutionPayloadEnvelope { +impl ExecutionPayloadEnvelopeV2 { /// Returns the [ExecutionPayload] for the `engine_getPayloadV1` endpoint - pub fn into_v1_payload(mut self) -> ExecutionPayload { - // ensure withdrawals are removed - self.payload.withdrawals.take(); - self.payload + pub fn into_v1_payload(self) -> ExecutionPayloadV1 { + self.execution_payload.into_v1_payload() } } +/// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for +/// V3. +/// +/// See also: +/// +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionPayloadEnvelopeV3 { + /// Execution payload V3 + #[serde(flatten)] + pub payload_inner: ExecutionPayloadV3, + /// The expected value to be received by the feeRecipient in wei + pub block_value: U256, + /// The blobs, commitments, and proofs associated with the executed payload. + pub blobs_bundle: BlobsBundleV1, + /// Introduced in V3, this represents a suggestion from the execution layer if the payload + /// should be used instead of an externally provided one. + pub should_override_builder: bool, +} + /// This structure maps on the ExecutionPayload structure of the beacon chain spec. /// /// See also: #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ExecutionPayload { +pub struct ExecutionPayloadV1 { pub parent_hash: H256, pub fee_recipient: Address, pub state_root: H256, @@ -88,21 +138,9 @@ pub struct ExecutionPayload { pub base_fee_per_gas: U256, pub block_hash: H256, pub transactions: Vec, - /// Array of [`Withdrawal`] enabled with V2 - /// See - #[serde(default, skip_serializing_if = "Option::is_none")] - pub withdrawals: Option>, - /// Array of [`U64`] representing blob gas used, enabled with V3 - /// See - #[serde(default, skip_serializing_if = "Option::is_none")] - pub blob_gas_used: Option, - /// Array of [`U64`] representing excess blob gas, enabled with V3 - /// See - #[serde(default, skip_serializing_if = "Option::is_none")] - pub excess_blob_gas: Option, } -impl From for ExecutionPayload { +impl From for ExecutionPayloadV1 { fn from(value: SealedBlock) -> Self { let transactions = value .body @@ -113,7 +151,7 @@ impl From for ExecutionPayload { encoded.into() }) .collect(); - ExecutionPayload { + ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, state_root: value.state_root, @@ -126,88 +164,226 @@ impl From for ExecutionPayload { timestamp: value.timestamp.into(), extra_data: value.extra_data.clone(), base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), - blob_gas_used: value.blob_gas_used.map(U64::from), - excess_blob_gas: value.excess_blob_gas.map(U64::from), block_hash: value.hash(), transactions, - withdrawals: value.withdrawals, } } } -impl ExecutionPayload { - /// Tries to create a new block from the given payload and optional parent beacon block root. - /// Perform additional validation of `extra_data` and `base_fee_per_gas` fields. - /// - /// NOTE: The log bloom is assumed to be validated during serialization. - /// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and - /// comparing the value with `payload.block_hash`. - /// - /// See - pub fn try_into_sealed_block( - self, - parent_beacon_block_root: Option, - ) -> Result { - if self.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - return Err(PayloadError::ExtraData(self.extra_data)) +/// Try to construct a block from given payload. Perform addition validation of `extra_data` and +/// `base_fee_per_gas` fields. +/// +/// NOTE: The log bloom is assumed to be validated during serialization. +/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and +/// comparing the value with `payload.block_hash`. +/// +/// See +impl TryFrom for Block { + type Error = PayloadError; + + fn try_from(payload: ExecutionPayloadV1) -> Result { + if payload.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + return Err(PayloadError::ExtraData(payload.extra_data)) } - if self.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { - return Err(PayloadError::BaseFee(self.base_fee_per_gas)) + if payload.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { + return Err(PayloadError::BaseFee(payload.base_fee_per_gas)) } - let transactions = self + let transactions = payload .transactions .iter() .map(|tx| TransactionSigned::decode(&mut tx.as_ref())) .collect::, _>>()?; let transactions_root = proofs::calculate_transaction_root(&transactions); - let withdrawals_root = - self.withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); - let header = Header { - parent_hash: self.parent_hash, - beneficiary: self.fee_recipient, - state_root: self.state_root, + parent_hash: payload.parent_hash, + beneficiary: payload.fee_recipient, + state_root: payload.state_root, transactions_root, - receipts_root: self.receipts_root, - withdrawals_root, - parent_beacon_block_root, - logs_bloom: self.logs_bloom, - number: self.block_number.as_u64(), - gas_limit: self.gas_limit.as_u64(), - gas_used: self.gas_used.as_u64(), - timestamp: self.timestamp.as_u64(), - mix_hash: self.prev_randao, + receipts_root: payload.receipts_root, + withdrawals_root: None, + logs_bloom: payload.logs_bloom, + number: payload.block_number.as_u64(), + gas_limit: payload.gas_limit.as_u64(), + gas_used: payload.gas_used.as_u64(), + timestamp: payload.timestamp.as_u64(), + mix_hash: payload.prev_randao, base_fee_per_gas: Some( - self.base_fee_per_gas + payload + .base_fee_per_gas .uint_try_to() - .map_err(|_| PayloadError::BaseFee(self.base_fee_per_gas))?, + .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?, ), - blob_gas_used: self.blob_gas_used.map(|blob_gas_used| blob_gas_used.as_u64()), - excess_blob_gas: self.excess_blob_gas.map(|excess_blob_gas| excess_blob_gas.as_u64()), - extra_data: self.extra_data, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + extra_data: payload.extra_data, // Defaults ommers_hash: EMPTY_LIST_HASH, difficulty: Default::default(), nonce: Default::default(), + }; + + Ok(Block { header, body: transactions, withdrawals: None, ommers: Default::default() }) + } +} + +/// This structure maps on the ExecutionPayloadV2 structure of the beacon chain spec. +/// +/// See also: +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionPayloadV2 { + /// Inner V1 payload + #[serde(flatten)] + pub payload_inner: ExecutionPayloadV1, + + /// Array of [`Withdrawal`] enabled with V2 + /// See + pub withdrawals: Vec, +} + +impl ExecutionPayloadV2 { + /// Returns the timestamp for the execution payload. + pub fn timestamp(&self) -> u64 { + self.payload_inner.timestamp.as_u64() + } +} + +impl From for ExecutionPayloadV2 { + fn from(value: SealedBlock) -> Self { + let transactions = value + .body + .iter() + .map(|tx| { + let mut encoded = Vec::new(); + tx.encode_enveloped(&mut encoded); + encoded.into() + }) + .collect(); + + ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: value.parent_hash, + fee_recipient: value.beneficiary, + state_root: value.state_root, + receipts_root: value.receipts_root, + logs_bloom: value.logs_bloom, + prev_randao: value.mix_hash, + block_number: value.number.into(), + gas_limit: value.gas_limit.into(), + gas_used: value.gas_used.into(), + timestamp: value.timestamp.into(), + extra_data: value.extra_data.clone(), + base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + block_hash: value.hash(), + transactions, + }, + withdrawals: value.withdrawals.unwrap_or_default(), } - .seal_slow(); + } +} + +impl TryFrom for Block { + type Error = PayloadError; + + fn try_from(payload: ExecutionPayloadV2) -> Result { + // this performs the same conversion as the underlying V1 payload, but calculates the + // withdrawals root and adds withdrawals + let mut base_sealed_block = Block::try_from(payload.payload_inner)?; + + let withdrawals_root = proofs::calculate_withdrawals_root(&payload.withdrawals); + base_sealed_block.withdrawals = Some(payload.withdrawals); + base_sealed_block.header.withdrawals_root = Some(withdrawals_root); + Ok(base_sealed_block) + } +} + +/// This structure maps on the ExecutionPayloadV3 structure of the beacon chain spec. +/// +/// See also: +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionPayloadV3 { + /// Inner V2 payload + #[serde(flatten)] + pub payload_inner: ExecutionPayloadV2, - if self.block_hash != header.hash() { - return Err(PayloadError::BlockHash { - execution: header.hash(), - consensus: self.block_hash, + /// Array of [`U64`] representing blob gas used, enabled with V3 + /// See + pub blob_gas_used: U64, + /// Array of [`U64`] representing excess blob gas, enabled with V3 + /// See + pub excess_blob_gas: U64, +} + +impl ExecutionPayloadV3 { + /// Returns the withdrawals for the payload. + pub fn withdrawals(&self) -> &Vec { + &self.payload_inner.withdrawals + } + + /// Returns the timestamp for the payload. + pub fn timestamp(&self) -> u64 { + self.payload_inner.payload_inner.timestamp.as_u64() + } +} + +impl From for ExecutionPayloadV3 { + fn from(mut value: SealedBlock) -> Self { + let transactions = value + .body + .iter() + .map(|tx| { + let mut encoded = Vec::new(); + tx.encode_enveloped(&mut encoded); + encoded.into() }) + .collect(); + + let withdrawals = value.withdrawals.take().unwrap_or_default(); + + ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: value.parent_hash, + fee_recipient: value.beneficiary, + state_root: value.state_root, + receipts_root: value.receipts_root, + logs_bloom: value.logs_bloom, + prev_randao: value.mix_hash, + block_number: value.number.into(), + gas_limit: value.gas_limit.into(), + gas_used: value.gas_used.into(), + timestamp: value.timestamp.into(), + extra_data: value.extra_data.clone(), + base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + block_hash: value.hash(), + transactions, + }, + withdrawals, + }, + + blob_gas_used: value.blob_gas_used.unwrap_or_default().into(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default().into(), } + } +} + +impl TryFrom for Block { + type Error = PayloadError; + + fn try_from(payload: ExecutionPayloadV3) -> Result { + // this performs the same conversion as the underlying V2 payload, but inserts the blob gas + // used and excess blob gas + let mut base_block = Block::try_from(payload.payload_inner)?; + + base_block.header.blob_gas_used = Some(payload.blob_gas_used.as_u64()); + base_block.header.excess_blob_gas = Some(payload.excess_blob_gas.as_u64()); - Ok(SealedBlock { - header, - body: transactions, - withdrawals: self.withdrawals, - ommers: Default::default(), - }) + Ok(base_block) } } @@ -234,6 +410,130 @@ impl From> for BlobsBundleV1 { } } +/// An execution payload, which can be either [ExecutionPayloadV1], [ExecutionPayloadV2], or +/// [ExecutionPayloadV3]. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ExecutionPayload { + /// V1 payload + V1(ExecutionPayloadV1), + /// V2 payload + V2(ExecutionPayloadV2), + /// V3 payload + V3(ExecutionPayloadV3), +} + +impl ExecutionPayload { + /// Returns the withdrawals for the payload. + pub fn withdrawals(&self) -> Option<&Vec> { + match self { + ExecutionPayload::V1(_) => None, + ExecutionPayload::V2(payload) => Some(&payload.withdrawals), + ExecutionPayload::V3(payload) => Some(payload.withdrawals()), + } + } + + /// Returns the timestamp for the payload. + pub fn timestamp(&self) -> u64 { + match self { + ExecutionPayload::V1(payload) => payload.timestamp.as_u64(), + ExecutionPayload::V2(payload) => payload.timestamp(), + ExecutionPayload::V3(payload) => payload.timestamp(), + } + } + + /// Returns the parent hash for the payload. + pub fn parent_hash(&self) -> H256 { + match self { + ExecutionPayload::V1(payload) => payload.parent_hash, + ExecutionPayload::V2(payload) => payload.payload_inner.parent_hash, + ExecutionPayload::V3(payload) => payload.payload_inner.payload_inner.parent_hash, + } + } + + /// Returns the block hash for the payload. + pub fn block_hash(&self) -> H256 { + match self { + ExecutionPayload::V1(payload) => payload.block_hash, + ExecutionPayload::V2(payload) => payload.payload_inner.block_hash, + ExecutionPayload::V3(payload) => payload.payload_inner.payload_inner.block_hash, + } + } + + /// Returns the block number for this payload. + pub fn block_number(&self) -> u64 { + match self { + ExecutionPayload::V1(payload) => payload.block_number.as_u64(), + ExecutionPayload::V2(payload) => payload.payload_inner.block_number.as_u64(), + ExecutionPayload::V3(payload) => { + payload.payload_inner.payload_inner.block_number.as_u64() + } + } + } + + /// Tries to create a new block from the given payload and optional parent beacon block root. + /// Perform additional validation of `extra_data` and `base_fee_per_gas` fields. + /// + /// NOTE: The log bloom is assumed to be validated during serialization. + /// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and + /// comparing the value with `payload.block_hash`. + /// + /// See + pub fn try_into_sealed_block( + self, + parent_beacon_block_root: Option, + ) -> Result { + let block_hash = self.block_hash(); + let mut base_payload = match self { + ExecutionPayload::V1(payload) => Block::try_from(payload)?, + ExecutionPayload::V2(payload) => Block::try_from(payload)?, + ExecutionPayload::V3(payload) => Block::try_from(payload)?, + }; + + base_payload.header.parent_beacon_block_root = parent_beacon_block_root; + + let payload = base_payload.seal_slow(); + + if block_hash != payload.hash() { + return Err(PayloadError::BlockHash { execution: payload.hash(), consensus: block_hash }) + } + + Ok(payload) + } +} + +impl From for ExecutionPayload { + fn from(payload: ExecutionPayloadV1) -> Self { + Self::V1(payload) + } +} + +impl From for ExecutionPayload { + fn from(payload: ExecutionPayloadV2) -> Self { + Self::V2(payload) + } +} + +impl From for ExecutionPayload { + fn from(payload: ExecutionPayloadV3) -> Self { + Self::V3(payload) + } +} + +impl From for ExecutionPayload { + fn from(block: SealedBlock) -> Self { + if block.header.parent_beacon_block_root.is_some() { + // block with parent beacon block root: V3 + Self::V3(block.into()) + } else if block.withdrawals.is_some() { + // block with withdrawals: V2 + Self::V2(block.into()) + } else { + // otherwise V1 + Self::V1(block.into()) + } + } +} + /// Error that can occur when handling payloads. #[derive(thiserror::Error, Debug)] pub enum PayloadError { @@ -555,18 +855,34 @@ mod tests { } #[test] - fn serde_roundtrip_legacy_txs_payload() { + fn serde_roundtrip_legacy_txs_payload_v1() { + // pulled from hive tests + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + let payload: ExecutionPayloadV1 = serde_json::from_str(s).unwrap(); + assert_eq!(serde_json::to_string(&payload).unwrap(), s); + } + + #[test] + fn serde_roundtrip_legacy_txs_payload_v3() { // pulled from hive tests - modified with 4844 fields - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; - let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"withdrawals":[],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; + let payload: ExecutionPayloadV3 = serde_json::from_str(s).unwrap(); + assert_eq!(serde_json::to_string(&payload).unwrap(), s); + } + + #[test] + fn serde_roundtrip_enveloped_txs_payload_v1() { + // pulled from hive tests + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + let payload: ExecutionPayloadV1 = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } #[test] - fn serde_roundtrip_enveloped_txs_payload() { + fn serde_roundtrip_enveloped_txs_payload_v3() { // pulled from hive tests - modified with 4844 fields - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; - let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"withdrawals":[],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; + let payload: ExecutionPayloadV3 = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } } From 1c837407200d00d502b8075682f5ab8c33c4c837 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 30 Aug 2023 09:28:53 -0700 Subject: [PATCH 559/722] feat: add CancunPayloadFields for engine_newPayloadV3 (#4407) --- crates/consensus/beacon/src/engine/handle.rs | 12 ++++-------- crates/consensus/beacon/src/engine/message.rs | 9 ++++----- crates/consensus/beacon/src/engine/mod.rs | 16 ++++++++++------ .../consensus/beacon/src/engine/test_utils.rs | 12 +++++++----- crates/rpc/rpc-engine-api/src/engine_api.rs | 10 ++++++---- crates/rpc/rpc-types/src/eth/engine/cancun.rs | 17 +++++++++++++++++ crates/rpc/rpc-types/src/eth/engine/mod.rs | 3 ++- 7 files changed, 50 insertions(+), 29 deletions(-) create mode 100644 crates/rpc/rpc-types/src/eth/engine/cancun.rs diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 176a8cbbf9de..f7b70929bff7 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -5,9 +5,9 @@ use crate::{ BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, }; use futures::TryFutureExt; -use reth_primitives::H256; use reth_rpc_types::engine::{ - ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, + CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, + PayloadStatus, }; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -35,14 +35,10 @@ impl BeaconConsensusEngineHandle { pub async fn new_payload( &self, payload: ExecutionPayload, - parent_beacon_block_root: Option, + cancun_fields: Option, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { - payload, - parent_beacon_block_root, - tx, - }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 9e29a07d54bd..b3b3c86c8166 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -5,10 +5,9 @@ use crate::{ use futures::{future::Either, FutureExt}; use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::error::PayloadBuilderError; -use reth_primitives::H256; use reth_rpc_types::engine::{ - ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, ForkchoiceUpdated, - PayloadAttributes, PayloadId, PayloadStatus, PayloadStatusEnum, + CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, + ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, PayloadStatusEnum, }; use std::{ future::Future, @@ -147,8 +146,8 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, - /// The parent beacon block root, if any. - parent_beacon_block_root: Option, + /// The cancun-related newPayload fields, if any. + cancun_fields: Option, /// The sender for returning payload status result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index c6f7cadfe5e7..71bc15da3f73 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -31,7 +31,8 @@ use reth_provider::{ }; use reth_prune::Pruner; use reth_rpc_types::engine::{ - ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, PayloadValidationError, + CancunPayloadFields, ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, }; use reth_stages::{ControlFlow, Pipeline, PipelineError}; use reth_tasks::TaskSpawner; @@ -1049,13 +1050,16 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, parent_beacon_block_root), fields(block_hash= ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, cancun_fields), fields(block_hash= ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, - parent_beacon_block_root: Option, + cancun_fields: Option, ) -> Result { - let block = match self.ensure_well_formed_payload(payload, parent_beacon_block_root) { + let block = match self.ensure_well_formed_payload( + payload, + cancun_fields.map(|fields| fields.parent_beacon_block_root), + ) { Ok(block) => block, Err(status) => return Ok(status), }; @@ -1727,9 +1731,9 @@ where } } } - BeaconEngineMessage::NewPayload { payload, parent_beacon_block_root, tx } => { + BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { this.metrics.new_payload_messages.increment(1); - let res = this.on_new_payload(payload, parent_beacon_block_root); + let res = this.on_new_payload(payload, cancun_fields); let _ = tx.send(res); } BeaconEngineMessage::TransitionConfigurationExchanged => { diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index c9c76a786f39..0fdda91da98b 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -27,7 +27,9 @@ use reth_provider::{ }; use reth_prune::Pruner; use reth_revm::Factory; -use reth_rpc_types::engine::{ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; +use reth_rpc_types::engine::{ + CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, +}; use reth_stages::{ sets::DefaultStages, stages::HeaderSyncMode, test_utils::TestStages, ExecOutput, Pipeline, StageError, @@ -69,9 +71,9 @@ impl TestEnv { pub async fn send_new_payload>( &self, payload: T, - parent_beacon_block_root: Option, + cancun_fields: Option, ) -> Result { - self.engine_handle.new_payload(payload.into(), parent_beacon_block_root).await + self.engine_handle.new_payload(payload.into(), cancun_fields).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -79,11 +81,11 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing>( &self, payload: T, - parent_beacon_block_root: Option, + cancun_fields: Option, ) -> Result { let payload: ExecutionPayload = payload.into(); loop { - let result = self.send_new_payload(payload.clone(), parent_beacon_block_root).await?; + let result = self.send_new_payload(payload.clone(), cancun_fields.clone()).await?; if !result.is_syncing() { return Ok(result) } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index cea1003ed0ee..c61731d5c569 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -10,7 +10,7 @@ use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hard use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ - ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; @@ -92,8 +92,8 @@ where /// See also pub async fn new_payload_v3( &self, - payload: ExecutionPayloadV1, - _versioned_hashes: Vec, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, parent_beacon_block_root: H256, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); @@ -101,8 +101,10 @@ where PayloadOrAttributes::from_execution_payload(&payload, Some(parent_beacon_block_root)); self.validate_version_specific_fields(EngineApiMessageVersion::V3, &payload_or_attrs)?; + let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + // TODO: validate versioned hashes and figure out what to do with parent_beacon_block_root - Ok(self.inner.beacon_consensus.new_payload(payload, Some(parent_beacon_block_root)).await?) + Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ diff --git a/crates/rpc/rpc-types/src/eth/engine/cancun.rs b/crates/rpc/rpc-types/src/eth/engine/cancun.rs new file mode 100644 index 000000000000..92dea607420e --- /dev/null +++ b/crates/rpc/rpc-types/src/eth/engine/cancun.rs @@ -0,0 +1,17 @@ +//! Contains types related to the Cancun hardfork that will be used by RPC to communicate with the +//! beacon consensus engine. +use reth_primitives::H256; + +/// Fields introduced in `engine_newPayloadV3` that are not present in the `ExecutionPayload` RPC +/// object. +/// +/// See also: +/// +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct CancunPayloadFields { + /// The parent beacon block root. + pub parent_beacon_block_root: H256, + + /// The expected blob versioned hashes. + pub versioned_hashes: Vec, +} diff --git a/crates/rpc/rpc-types/src/eth/engine/mod.rs b/crates/rpc/rpc-types/src/eth/engine/mod.rs index 2a814374fb26..54cf8ccf0c20 100644 --- a/crates/rpc/rpc-types/src/eth/engine/mod.rs +++ b/crates/rpc/rpc-types/src/eth/engine/mod.rs @@ -2,11 +2,12 @@ #![allow(missing_docs)] +mod cancun; mod forkchoice; mod payload; mod transition; -pub use self::{forkchoice::*, payload::*, transition::*}; +pub use self::{cancun::*, forkchoice::*, payload::*, transition::*}; /// The list of supported Engine capabilities pub const CAPABILITIES: [&str; 9] = [ From 90a1900da8cf7cc30fe1ec4cf599aa25a28a73b8 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 30 Aug 2023 23:38:45 +0200 Subject: [PATCH 560/722] feat(book): document rpc limitations during pipeline sync (#4411) --- book/jsonrpc/rpc.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/book/jsonrpc/rpc.md b/book/jsonrpc/rpc.md index 30608cd3ae9e..0a4739718be5 100644 --- a/book/jsonrpc/rpc.md +++ b/book/jsonrpc/rpc.md @@ -15,4 +15,12 @@ Lists the enabled RPC namespaces and the versions of each. ```js // > {"jsonrpc":"2.0","id":1,"method":"rpc_modules","params":[]} {"jsonrpc":"2.0","id":1,"result":{"txpool":"1.0","eth":"1.0","rpc":"1.0"}} -``` \ No newline at end of file +``` + +## Handling Responses During Syncing + +When interacting with the RPC server while it is still syncing, some RPC requests may return an empty or null response, while others return the expected results. This behavior can be observed due to the asynchronous nature of the syncing process and the availability of required data. Notably, endpoints that rely on specific stages of the syncing process, such as the execution stage, might not be available until those stages are complete. + +It's important to understand that during pipeline sync, some endpoints may not be accessible until the necessary data is fully synchronized. For instance, the `eth_getBlockReceipts` endpoint is only expected to return valid data after the execution stage, where receipts are generated, has completed. As a result, certain RPC requests may return empty or null responses until the respective stages are finished. + +This behavior is intrinsic to how the syncing mechanism works and is not indicative of an issue or bug. If you encounter such responses while the node is still syncing, it's recommended to wait until the sync process is complete to ensure accurate and expected RPC responses. From 5d9b3d8ab732769863f9478868172cc6e8f99b58 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 14:40:14 -0700 Subject: [PATCH 561/722] feat: add max blob fee per gas underpriced check (#4406) --- crates/transaction-pool/src/pool/txpool.rs | 43 +++++++++++++++---- .../transaction-pool/src/test_utils/mock.rs | 4 ++ crates/transaction-pool/src/traits.rs | 9 ++++ 3 files changed, 47 insertions(+), 9 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1b49ea5d7de1..7a609f3eec2a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1040,24 +1040,49 @@ impl AllTransactions { /// Returns true if the replacement candidate is underpriced and can't replace the existing /// transaction. + #[inline] fn is_underpriced( existing_transaction: &ValidPoolTransaction, maybe_replacement: &ValidPoolTransaction, price_bumps: &PriceBumpConfig, ) -> bool { let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); + let price_bump_multiplier = (100 + price_bump) / 100; + + if maybe_replacement.max_fee_per_gas() <= + existing_transaction.max_fee_per_gas() * price_bump_multiplier + { + return true + } let existing_max_priority_fee_per_gas = - maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); - let replacement_max_priority_fee_per_gas = existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); + let replacement_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); + + if replacement_max_priority_fee_per_gas <= + existing_max_priority_fee_per_gas * price_bump_multiplier && + existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0 + { + return true + } + + // check max blob fee per gas + if let Some(existing_max_blob_fee_per_gas) = + existing_transaction.transaction.max_fee_per_blob_gas() + { + // this enforces that blob txs can only be replaced by blob txs + let replacement_max_blob_fee_per_gas = + maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or(0); + if replacement_max_blob_fee_per_gas <= + existing_max_blob_fee_per_gas * price_bump_multiplier + { + return true + } + } - maybe_replacement.max_fee_per_gas() <= - existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 || - (existing_max_priority_fee_per_gas <= - replacement_max_priority_fee_per_gas * (100 + price_bump) / 100 && - existing_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas != 0) + false } /// Inserts a new transaction into the pool. @@ -1455,7 +1480,7 @@ mod tests { // insert the same tx again let res = pool.insert_tx(valid_tx, on_chain_balance, on_chain_nonce); - assert!(res.is_err()); + res.unwrap_err(); assert_eq!(pool.len(), 1); let valid_tx = f.validated(tx.next()); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index b9acbfe7a411..ee3b65d64536 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -399,6 +399,10 @@ impl PoolTransaction for MockTransaction { } } + fn max_fee_per_blob_gas(&self) -> Option { + None + } + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { let base_fee = base_fee as u128; let max_fee_per_gas = self.max_fee_per_gas(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 7b94d354db84..93f17aa45bfa 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -613,6 +613,11 @@ pub trait PoolTransaction: /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option; + /// Returns the EIP-4844 max fee per data gas + /// + /// This will return `None` for non-EIP4844 transactions + fn max_fee_per_blob_gas(&self) -> Option; + /// Returns the effective tip for this transaction. /// /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. @@ -798,6 +803,10 @@ impl PoolTransaction for EthPooledTransaction { } } + fn max_fee_per_blob_gas(&self) -> Option { + self.transaction.max_fee_per_blob_gas() + } + /// Returns the effective tip for this transaction. /// /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. From 05d38dd4767233a4a14687180aaa897912aff11f Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 31 Aug 2023 00:02:53 +0200 Subject: [PATCH 562/722] feat(rpc): add documentation for `calculate_reward_percentiles` (#4413) --- crates/rpc/rpc/src/eth/api/fees.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index 5d8cbc4d1ff8..08822b58403b 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -132,7 +132,11 @@ where }) } - // todo: docs + /// Calculates reward percentiles for transactions in a block header. + /// Given a list of percentiles and a sealed block header, this function computes + /// the corresponding rewards for the transactions at each percentile. + /// + /// The results are returned as a vector of U256 values. async fn calculate_reward_percentiles( &self, percentiles: &[f64], From 2e332b5c8dc906fa08c16b13994099842f2e2f13 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 31 Aug 2023 00:26:45 +0200 Subject: [PATCH 563/722] feat(primitives): add doc for `parent_beacon_block_root` in `Header` (#4415) --- crates/primitives/src/header.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 12f4e3fca82a..f74707844c77 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -100,8 +100,13 @@ pub struct Header { /// with above-target blob gas consumption increase this value, blocks with below-target blob /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. pub excess_blob_gas: Option, - /// TODO: Docs - /// This was added in EIP-4788. + /// The hash of the parent beacon block's root is included in execution blocks, as proposed by + /// EIP-4788. + /// + /// This enables trust-minimized access to consensus state, supporting staking pools, bridges, + /// and more. + /// + /// The beacon roots contract handles root storage, enhancing Ethereum's functionalities. pub parent_beacon_block_root: Option, /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or /// fewer; formally Hx. From 975ff13155708c275043e798df98dc45ae8b12f3 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Thu, 31 Aug 2023 00:31:41 +0200 Subject: [PATCH 564/722] feat(cli): extract more functions from `RpcServerArgs` in `RethRpcConfig` (#4412) --- bin/reth/src/args/rpc_server_args.rs | 98 +++++++++++----------------- bin/reth/src/cli/config.rs | 50 +++++++++++++- bin/reth/src/node/mod.rs | 5 +- 3 files changed, 88 insertions(+), 65 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 3b2c463788c3..e17d8a1a5e90 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -166,57 +166,6 @@ pub struct RpcServerArgs { } impl RpcServerArgs { - /// Returns the max request size in bytes. - pub fn rpc_max_request_size_bytes(&self) -> u32 { - self.rpc_max_request_size * 1024 * 1024 - } - - /// Returns the max response size in bytes. - pub fn rpc_max_response_size_bytes(&self) -> u32 { - self.rpc_max_response_size * 1024 * 1024 - } - - /// Extracts the gas price oracle config from the args. - pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig { - GasPriceOracleConfig::new( - self.gas_price_oracle.blocks, - self.gas_price_oracle.ignore_price, - self.gas_price_oracle.max_price, - self.gas_price_oracle.percentile, - ) - } - - /// The execution layer and consensus layer clients SHOULD accept a configuration parameter: - /// jwt-secret, which designates a file containing the hex-encoded 256 bit secret key to be used - /// for verifying/generating JWT tokens. - /// - /// If such a parameter is given, but the file cannot be read, or does not contain a hex-encoded - /// key of 256 bits, the client SHOULD treat this as an error. - /// - /// If such a parameter is not given, the client SHOULD generate such a token, valid for the - /// duration of the execution, and SHOULD store the hex-encoded secret as a jwt.hex file on - /// the filesystem. This file can then be used to provision the counterpart client. - /// - /// The `default_jwt_path` provided as an argument will be used as the default location for the - /// jwt secret in case the `auth_jwtsecret` argument is not provided. - pub(crate) fn jwt_secret(&self, default_jwt_path: PathBuf) -> Result { - match self.auth_jwtsecret.as_ref() { - Some(fpath) => { - debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file"); - JwtSecret::from_file(fpath) - } - None => { - if default_jwt_path.exists() { - debug!(target: "reth::cli", ?default_jwt_path, "Reading JWT auth secret file"); - JwtSecret::from_file(&default_jwt_path) - } else { - info!(target: "reth::cli", ?default_jwt_path, "Creating JWT auth secret file"); - JwtSecret::try_create(&default_jwt_path) - } - } - } - } - /// Configures and launches _all_ servers. /// /// Returns the handles for the launched regular RPC server(s) (if any) and the server handle @@ -363,11 +312,44 @@ impl RpcServerArgs { ) .await } +} + +impl RethRpcConfig for RpcServerArgs { + fn rpc_max_request_size_bytes(&self) -> u32 { + self.rpc_max_request_size * 1024 * 1024 + } + + fn rpc_max_response_size_bytes(&self) -> u32 { + self.rpc_max_response_size * 1024 * 1024 + } + + fn gas_price_oracle_config(&self) -> GasPriceOracleConfig { + GasPriceOracleConfig::new( + self.gas_price_oracle.blocks, + self.gas_price_oracle.ignore_price, + self.gas_price_oracle.max_price, + self.gas_price_oracle.percentile, + ) + } + + fn jwt_secret(&self, default_jwt_path: PathBuf) -> Result { + match self.auth_jwtsecret.as_ref() { + Some(fpath) => { + debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file"); + JwtSecret::from_file(fpath) + } + None => { + if default_jwt_path.exists() { + debug!(target: "reth::cli", ?default_jwt_path, "Reading JWT auth secret file"); + JwtSecret::from_file(&default_jwt_path) + } else { + info!(target: "reth::cli", ?default_jwt_path, "Creating JWT auth secret file"); + JwtSecret::try_create(&default_jwt_path) + } + } + } + } - /// Creates the [TransportRpcModuleConfig] from cli args. - /// - /// This sets all the api modules, and configures additional settings like gas price oracle - /// settings in the [TransportRpcModuleConfig]. fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() .with_config(RpcModuleConfig::new(self.eth_config())); @@ -395,7 +377,6 @@ impl RpcServerArgs { config } - /// Returns the default server builder for http/ws fn http_ws_server_builder(&self) -> ServerBuilder { ServerBuilder::new() .max_connections(self.rpc_max_connections) @@ -404,7 +385,6 @@ impl RpcServerArgs { .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection) } - /// Returns the default ipc server builder fn ipc_server_builder(&self) -> IpcServerBuilder { IpcServerBuilder::default() .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection) @@ -413,7 +393,6 @@ impl RpcServerArgs { .max_connections(self.rpc_max_connections) } - /// Creates the [RpcServerConfig] from cli args. fn rpc_server_config(&self) -> RpcServerConfig { let mut config = RpcServerConfig::default(); @@ -439,15 +418,12 @@ impl RpcServerArgs { config } - /// Creates the [AuthServerConfig] from cli args. fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result { let address = SocketAddr::new(self.auth_addr, self.auth_port); Ok(AuthServerConfig::builder(jwt_secret).socket_addr(address).build()) } -} -impl RethRpcConfig for RpcServerArgs { fn is_ipc_enabled(&self) -> bool { // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. !self.ipcdisable diff --git a/bin/reth/src/cli/config.rs b/bin/reth/src/cli/config.rs index a698b3a5d472..b87ed6be0758 100644 --- a/bin/reth/src/cli/config.rs +++ b/bin/reth/src/cli/config.rs @@ -2,8 +2,12 @@ use reth_revm::primitives::bytes::BytesMut; use reth_rlp::Encodable; -use reth_rpc_builder::EthConfig; -use std::{borrow::Cow, time::Duration}; +use reth_rpc::{eth::gas_oracle::GasPriceOracleConfig, JwtError, JwtSecret}; +use reth_rpc_builder::{ + auth::AuthServerConfig, error::RpcError, EthConfig, IpcServerBuilder, RpcServerConfig, + ServerBuilder, TransportRpcModuleConfig, +}; +use std::{borrow::Cow, path::PathBuf, time::Duration}; /// A trait that provides configured RPC server. /// @@ -16,7 +20,47 @@ pub trait RethRpcConfig { /// The configured ethereum RPC settings. fn eth_config(&self) -> EthConfig; - // TODO extract more functions from RpcServerArgs + /// Returns the max request size in bytes. + fn rpc_max_request_size_bytes(&self) -> u32; + + /// Returns the max response size in bytes. + fn rpc_max_response_size_bytes(&self) -> u32; + + /// Extracts the gas price oracle config from the args. + fn gas_price_oracle_config(&self) -> GasPriceOracleConfig; + + /// Creates the [TransportRpcModuleConfig] from cli args. + /// + /// This sets all the api modules, and configures additional settings like gas price oracle + /// settings in the [TransportRpcModuleConfig]. + fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig; + + /// Returns the default server builder for http/ws + fn http_ws_server_builder(&self) -> ServerBuilder; + + /// Returns the default ipc server builder + fn ipc_server_builder(&self) -> IpcServerBuilder; + + /// Creates the [RpcServerConfig] from cli args. + fn rpc_server_config(&self) -> RpcServerConfig; + + /// Creates the [AuthServerConfig] from cli args. + fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result; + + /// The execution layer and consensus layer clients SHOULD accept a configuration parameter: + /// jwt-secret, which designates a file containing the hex-encoded 256 bit secret key to be used + /// for verifying/generating JWT tokens. + /// + /// If such a parameter is given, but the file cannot be read, or does not contain a hex-encoded + /// key of 256 bits, the client SHOULD treat this as an error. + /// + /// If such a parameter is not given, the client SHOULD generate such a token, valid for the + /// duration of the execution, and SHOULD store the hex-encoded secret as a jwt.hex file on + /// the filesystem. This file can then be used to provision the counterpart client. + /// + /// The `default_jwt_path` provided as an argument will be used as the default location for the + /// jwt secret in case the `auth_jwtsecret` argument is not provided. + fn jwt_secret(&self, default_jwt_path: PathBuf) -> Result; } /// A trait that provides payload builder settings. diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 36d9c01fd1a7..84e04524898f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -8,7 +8,10 @@ use crate::{ DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, - cli::ext::{RethCliExt, RethNodeCommandConfig}, + cli::{ + config::RethRpcConfig, + ext::{RethCliExt, RethNodeCommandConfig}, + }, dirs::{DataDirPath, MaybePlatformPath}, init::init_genesis, node::cl_events::ConsensusLayerHealthEvents, From 3088104a6dde18837d3b0961a7bc7079de8d9659 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 16:13:21 -0700 Subject: [PATCH 565/722] chore: add max fee per blob gas to mock transaction (#4416) --- .../transaction-pool/src/test_utils/mock.rs | 70 ++++++++++++++----- 1 file changed, 52 insertions(+), 18 deletions(-) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index ee3b65d64536..d42e45cdb49c 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -12,10 +12,11 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, hex, Address, FromRecoveredPooledTransaction, - FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, - TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, H256, U128, U256, + constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, + hex, Address, FromRecoveredPooledTransaction, FromRecoveredTransaction, + IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, + TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, + TxEip4844, TxHash, TxLegacy, TxType, H256, U128, U256, }; use std::{ops::Range, sync::Arc, time::Instant}; @@ -110,6 +111,7 @@ pub enum MockTransaction { nonce: u64, max_fee_per_gas: u128, max_priority_fee_per_gas: u128, + max_fee_per_blob_gas: u128, gas_limit: u64, to: TransactionKind, value: U256, @@ -154,6 +156,36 @@ impl MockTransaction { } } + /// Returns a new EIP1559 transaction with random address and hash and empty values + pub fn eip4844() -> Self { + MockTransaction::Eip4844 { + hash: H256::random(), + sender: Address::random(), + nonce: 0, + max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, + max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, + max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, + gas_limit: 0, + to: TransactionKind::Call(Address::random()), + value: Default::default(), + } + } + + /// Sets the max fee per blob gas for EIP-4844 transactions, + pub fn with_blob_fee(mut self, val: u128) -> Self { + self.set_blob_fee(val); + self + } + + /// Sets the max fee per blob gas for EIP-4844 transactions, + pub fn set_blob_fee(&mut self, val: u128) -> &mut Self { + if let MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } = self { + *max_fee_per_blob_gas = val; + } + self + } + + /// Sets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844) pub fn set_priority_fee(&mut self, val: u128) -> &mut Self { if let (MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } | MockTransaction::Eip4844 { max_priority_fee_per_gas, .. }) = self @@ -164,11 +196,7 @@ impl MockTransaction { } pub fn with_priority_fee(mut self, val: u128) -> Self { - if let (MockTransaction::Eip1559 { ref mut max_priority_fee_per_gas, .. } | - MockTransaction::Eip4844 { ref mut max_priority_fee_per_gas, .. }) = self - { - *max_priority_fee_per_gas = val; - } + self.set_priority_fee(val); self } @@ -192,11 +220,7 @@ impl MockTransaction { } pub fn with_max_fee(mut self, val: u128) -> Self { - if let (MockTransaction::Eip1559 { ref mut max_fee_per_gas, .. } | - MockTransaction::Eip4844 { ref mut max_fee_per_gas, .. }) = self - { - *max_fee_per_gas = val; - } + self.set_max_fee(val); self } @@ -334,6 +358,10 @@ impl MockTransaction { pub fn is_eip1559(&self) -> bool { matches!(self, MockTransaction::Eip1559 { .. }) } + + pub fn is_eip4844(&self) -> bool { + matches!(self, MockTransaction::Eip4844 { .. }) + } } impl PoolTransaction for MockTransaction { @@ -400,7 +428,10 @@ impl PoolTransaction for MockTransaction { } fn max_fee_per_blob_gas(&self) -> Option { - None + match self { + MockTransaction::Eip4844 { max_fee_per_blob_gas, .. } => Some(*max_fee_per_blob_gas), + _ => None, + } } fn effective_tip_per_gas(&self, base_fee: u64) -> Option { @@ -509,13 +540,14 @@ impl FromRecoveredTransaction for MockTransaction { input, access_list, blob_versioned_hashes: _, - max_fee_per_blob_gas: _, + max_fee_per_blob_gas, }) => MockTransaction::Eip4844 { hash, sender, nonce, max_fee_per_gas, max_priority_fee_per_gas, + max_fee_per_blob_gas, gas_limit, to, value: U256::from(value), @@ -560,8 +592,6 @@ impl IntoRecoveredTransaction for MockTransaction { #[cfg(any(test, feature = "arbitrary"))] impl proptest::arbitrary::Arbitrary for MockTransaction { type Parameters = (); - type Strategy = proptest::strategy::BoxedStrategy; - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::{any, Strategy}; @@ -620,6 +650,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { to, value, input, + max_fee_per_blob_gas, .. }) => MockTransaction::Eip4844 { sender, @@ -627,6 +658,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { nonce: *nonce, max_fee_per_gas: *max_fee_per_gas, max_priority_fee_per_gas: *max_priority_fee_per_gas, + max_fee_per_blob_gas: *max_fee_per_blob_gas, gas_limit: *gas_limit, to: *to, value: U256::from(*value), @@ -634,6 +666,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { }) .boxed() } + + type Strategy = proptest::strategy::BoxedStrategy; } #[derive(Default)] From e33e3c9154b2290a282a14fe630b23c4c90bbdaf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 16:13:27 -0700 Subject: [PATCH 566/722] chore: add eip4844 nonce gap error (#4414) --- crates/rpc/rpc/src/eth/error.rs | 3 +++ crates/transaction-pool/src/error.rs | 13 +++++++++++++ crates/transaction-pool/src/pool/txpool.rs | 19 ++++++++++++------- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index ea281c431432..36151b5688f7 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -520,6 +520,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::InvalidEip4844Blob(err) => { RpcPoolError::InvalidEip4844Blob(err) } + InvalidPoolTransactionError::Eip4844NonceGap => { + RpcPoolError::Invalid(RpcInvalidTransactionError::NonceTooHigh) + } } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 658f10d324b0..c1e844e09017 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -144,6 +144,14 @@ pub enum InvalidPoolTransactionError { /// Thrown if validating the blob sidecar for the transaction failed. #[error(transparent)] InvalidEip4844Blob(BlobTransactionValidationError), + /// EIP-4844 transactions are only accepted if they're gapless, meaning the previous nonce of + /// the transaction (`tx.nonce -1`) must either be in the pool or match the on chain nonce of + /// the sender. + /// + /// This error is thrown on validation if a valid blob transaction arrives with a nonce that + /// would introduce gap in the nonce sequence. + #[error("Nonce too high.")] + Eip4844NonceGap, /// Any other error that occurred while inserting/validating that is transaction specific #[error("{0:?}")] Other(Box), @@ -210,6 +218,11 @@ impl InvalidPoolTransactionError { // This is only reachable when the blob is invalid true } + InvalidPoolTransactionError::Eip4844NonceGap => { + // it is possible that the pool sees `nonce n` before `nonce n-1` and this is only + // thrown for valid(good) blob transactions + false + } } } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 7a609f3eec2a..bd6665ea630a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1085,7 +1085,7 @@ impl AllTransactions { false } - /// Inserts a new transaction into the pool. + /// Inserts a new _valid_ transaction into the pool. /// /// If the transaction already exists, it will be replaced if not underpriced. /// Returns info to which sub-pool the transaction should be moved. @@ -1104,13 +1104,16 @@ impl AllTransactions { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); let transaction = Arc::new(self.ensure_valid(transaction)?); - let tx_id = *transaction.id(); + let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); let mut cumulative_cost = U256::ZERO; let mut updates = Vec::new(); - let ancestor = - TransactionId::ancestor(transaction.transaction.nonce(), on_chain_nonce, tx_id.sender); + let ancestor = TransactionId::ancestor( + transaction.transaction.nonce(), + on_chain_nonce, + inserted_tx_id.sender, + ); // If there's no ancestor tx then this is the next transaction. if ancestor.is_none() { @@ -1133,6 +1136,7 @@ impl AllTransactions { state.insert(TxState::NOT_TOO_MUCH_GAS); } + // placeholder for the replaced transaction, if any let mut replaced_tx = None; let pool_tx = PoolInternalTransaction { @@ -1175,6 +1179,7 @@ impl AllTransactions { // The next transaction of this sender let on_chain_id = TransactionId::new(transaction.sender_id(), on_chain_nonce); { + // get all transactions of the sender's account let mut descendants = self.descendant_txs_mut(&on_chain_id).peekable(); // Tracks the next nonce we expect if the transactions are gapless @@ -1189,7 +1194,7 @@ impl AllTransactions { // SAFETY: the transaction was added above so the _inclusive_ descendants iterator // returns at least 1 tx. let (id, tx) = descendants.peek().expect("Includes >= 1; qed."); - if id.nonce < tx_id.nonce { + if id.nonce < inserted_tx_id.nonce { !tx.state.is_pending() } else { true @@ -1232,7 +1237,7 @@ impl AllTransactions { // update the pool based on the state tx.subpool = tx.state.into(); - if tx_id.eq(id) { + if inserted_tx_id.eq(id) { // if it is the new transaction, track the state state = tx.state; } else { @@ -1254,7 +1259,7 @@ impl AllTransactions { // If this wasn't a replacement transaction we need to update the counter. if replaced_tx.is_none() { - self.tx_inc(tx_id.sender); + self.tx_inc(inserted_tx_id.sender); } Ok(InsertOk { transaction, move_to: state.into(), state, replaced_tx, updates }) From b5aa94301cf639a50a03ede556c16cc0695e9248 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 16:52:51 -0700 Subject: [PATCH 567/722] feat: add blob fields to receipt type (#4420) --- crates/primitives/src/transaction/mod.rs | 10 ++++++++++ crates/rpc/rpc-types/src/eth/transaction/receipt.rs | 8 ++++++++ crates/rpc/rpc/src/eth/api/transactions.rs | 4 ++++ 3 files changed, 22 insertions(+) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index a47471e96e65..ade66d1d9a75 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,5 +1,6 @@ use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Address, Bytes, TxHash, H256, }; pub use access_list::{AccessList, AccessListItem, AccessListWithGasUsed}; @@ -232,6 +233,15 @@ impl Transaction { } } + /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 + /// transaction. + /// + /// This is the number of blobs times the [DATA_GAS_PER_BLOB] a single blob consumes. + pub fn blob_gas_used(&self) -> Option { + let tx = self.as_eip4844()?; + Some(tx.blob_versioned_hashes.len() as u128 * DATA_GAS_PER_BLOB as u128) + } + /// Return the max priority fee per gas if the transaction is an EIP-1559 transaction, and /// otherwise return the gas price. /// diff --git a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs index e8339a9dbbe5..f4900a619488 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs @@ -22,6 +22,14 @@ pub struct TransactionReceipt { /// fields in 1559-style transactions are maximums (max fee + max priority fee), the amount /// that's actually paid by users can only be determined post-execution pub effective_gas_price: U128, + /// Blob gas used by the eip-4844 transaction + /// + /// This is None for non eip-4844 transactions + #[serde(skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// The price paid by the eip-4844 transaction per blob gas. + #[serde(skip_serializing_if = "Option::is_none")] + pub blob_gas_price: Option, /// Address of the sender pub from: Address, /// Address of the receiver. null when its a contract creation transaction. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 87d72789267a..6f3e1fa7f4f3 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -874,6 +874,10 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( state_root: None, logs_bloom: receipt.bloom_slow(), status_code: if receipt.success { Some(U64::from(1)) } else { Some(U64::from(0)) }, + + // EIP-4844 fields + blob_gas_price: transaction.transaction.max_fee_per_blob_gas().map(U128::from), + blob_gas_used: transaction.transaction.blob_gas_used().map(U128::from), }; match tx.transaction.kind() { From 8fff8fa8e977386d7c599ca212b75d605b5c88e5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 17:01:04 -0700 Subject: [PATCH 568/722] fix: use payload v2 for v2 (#4421) --- crates/rpc/rpc-api/src/engine.rs | 6 +++--- crates/rpc/rpc-engine-api/src/engine_api.rs | 9 +++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 0fc169e62658..6d22fc89f9ef 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,8 +3,8 @@ use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, H256 use reth_rpc_types::{ engine::{ ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, - PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, + ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, ForkchoiceState, + ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, BlockOverrides, CallRequest, Filter, Log, RichBlock, SyncStatus, @@ -20,7 +20,7 @@ pub trait EngineApi { /// See also #[method(name = "newPayloadV2")] - async fn new_payload_v2(&self, payload: ExecutionPayloadV1) -> RpcResult; + async fn new_payload_v2(&self, payload: ExecutionPayloadV2) -> RpcResult; /// Post Cancun payload handler /// diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index c61731d5c569..136e589cd1ee 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,8 +11,9 @@ use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFa use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, - ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, - PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, + ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, + CAPABILITIES, }; use reth_tasks::TaskSpawner; use std::sync::Arc; @@ -81,7 +82,7 @@ where /// See also pub async fn new_payload_v2( &self, - payload: ExecutionPayloadV1, + payload: ExecutionPayloadV2, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = PayloadOrAttributes::from_execution_payload(&payload, None); @@ -462,7 +463,7 @@ where /// Handler for `engine_newPayloadV2` /// See also - async fn new_payload_v2(&self, payload: ExecutionPayloadV1) -> RpcResult { + async fn new_payload_v2(&self, payload: ExecutionPayloadV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); Ok(EngineApi::new_payload_v2(self, payload).await?) } From 893f4cf2a27034f4550d3f4443a0fa7c2cd2444b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 30 Aug 2023 17:05:46 -0700 Subject: [PATCH 569/722] feat: validate payload versioned hashes (#4417) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/mod.rs | 87 +++++++++++++++++-- crates/primitives/src/block.rs | 5 ++ .../rpc/rpc-types/src/eth/engine/payload.rs | 3 + 3 files changed, 87 insertions(+), 8 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 71bc15da3f73..30ca834d0783 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -31,8 +31,8 @@ use reth_provider::{ }; use reth_prune::Pruner; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, PayloadAttributes, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, + CancunPayloadFields, ExecutionPayload, PayloadAttributes, PayloadError, PayloadStatus, + PayloadStatusEnum, PayloadValidationError, }; use reth_stages::{ControlFlow, Pipeline, PipelineError}; use reth_tasks::TaskSpawner; @@ -1056,10 +1056,7 @@ where payload: ExecutionPayload, cancun_fields: Option, ) -> Result { - let block = match self.ensure_well_formed_payload( - payload, - cancun_fields.map(|fields| fields.parent_beacon_block_root), - ) { + let block = match self.ensure_well_formed_payload(payload, cancun_fields) { Ok(block) => block, Err(status) => return Ok(status), }; @@ -1120,13 +1117,18 @@ where /// - missing or invalid base fee /// - invalid extra data /// - invalid transactions + /// - incorrect hash + /// - the versioned hashes passed with the payload do not exactly match transaction + /// versioned hashes fn ensure_well_formed_payload( &self, payload: ExecutionPayload, - parent_beacon_block_root: Option, + cancun_fields: Option, ) -> Result { let parent_hash = payload.parent_hash(); - let block = match payload.try_into_sealed_block(parent_beacon_block_root) { + let block = match payload.try_into_sealed_block( + cancun_fields.as_ref().map(|fields| fields.parent_beacon_block_root), + ) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", ?error, "Invalid payload"); @@ -1144,9 +1146,78 @@ where } }; + let block_versioned_hashes = block + .blob_transactions() + .iter() + .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) + .flatten() + .collect::>(); + + self.validate_versioned_hashes(parent_hash, block_versioned_hashes, cancun_fields)?; + Ok(block) } + /// Validates that the versioned hashes in the block match the versioned hashes passed in the + /// [CancunPayloadFields], if the cancun payload fields are provided. If the payload fields are + /// not provided, but versioned hashes exist in the block, this returns a [PayloadStatus] with + /// the [PayloadError::InvalidVersionedHashes] error. + /// + /// This validates versioned hashes according to the Engine API Cancun spec: + /// + fn validate_versioned_hashes( + &self, + parent_hash: H256, + block_versioned_hashes: Vec<&H256>, + cancun_fields: Option, + ) -> Result<(), PayloadStatus> { + // This validates the following engine API rule: + // + // 3. Given the expected array of blob versioned hashes client software **MUST** run its + // validation by taking the following steps: + // + // 1. Obtain the actual array by concatenating blob versioned hashes lists + // (`tx.blob_versioned_hashes`) of each [blob + // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included + // in the payload, respecting the order of inclusion. If the payload has no blob + // transactions the expected array **MUST** be `[]`. + // + // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | + // null}` if the expected and the actual arrays don't match. + // + // This validation **MUST** be instantly run in all cases even during active sync process. + if let Some(fields) = cancun_fields { + if block_versioned_hashes.len() != fields.versioned_hashes.len() { + // if the lengths don't match then we know that the payload is invalid + let latest_valid_hash = + self.latest_valid_hash_for_invalid_payload(parent_hash, None); + let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes); + return Err(PayloadStatus::new(status, latest_valid_hash)) + } + + // we can use `zip` safely here because we already compared their length + let zipped_versioned_hashes = + fields.versioned_hashes.iter().zip(block_versioned_hashes); + for (payload_versioned_hash, block_versioned_hash) in zipped_versioned_hashes { + if payload_versioned_hash != block_versioned_hash { + // One of the hashes does not match - return invalid + let latest_valid_hash = + self.latest_valid_hash_for_invalid_payload(parent_hash, None); + let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes); + return Err(PayloadStatus::new(status, latest_valid_hash)) + } + } + } else if !block_versioned_hashes.is_empty() { + // there are versioned hashes in the block but no expected versioned hashes were + // provided in the new payload call, so the payload is invalid + let latest_valid_hash = self.latest_valid_hash_for_invalid_payload(parent_hash, None); + let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes); + return Err(PayloadStatus::new(status, latest_valid_hash)) + } + + Ok(()) + } + /// When the pipeline or the pruner is active, the tree is unable to commit any additional /// blocks since the pipeline holds exclusive access to the database. /// diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5248787c15d6..01a477482bc1 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -161,6 +161,11 @@ impl SealedBlock { ) } + /// Returns only the blob transactions, if any, from the block body. + pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { + self.body.iter().filter(|tx| tx.is_eip4844()).collect() + } + /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. pub fn senders(&self) -> Option> { TransactionSigned::recover_signers(&self.body, self.body.len()) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 7749ce41b40e..3026260d339d 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -557,6 +557,9 @@ pub enum PayloadError { /// The block hash provided with the payload. consensus: H256, }, + /// Expected blob versioned hashes do not match the given transactions. + #[error("Expected blob versioned hashes do not match the given transactions")] + InvalidVersionedHashes, /// Encountered decoding error. #[error(transparent)] Decode(#[from] reth_rlp::DecodeError), From 3d9e968b039022a0e217d5c236b96cb096db7b5b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 30 Aug 2023 17:30:45 -0700 Subject: [PATCH 570/722] feat: add eip4844 fields to rpc transaction (#4422) --- .../rpc-types-compat/src/transaction/mod.rs | 35 ++++++++++++------- .../rpc/rpc-types/src/eth/transaction/mod.rs | 10 ++++++ 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 9cda539ef1b2..eb9cc88e9a49 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -38,7 +38,7 @@ fn fill( transaction_index: Option, ) -> Transaction { let signer = tx.signer(); - let signed_tx = tx.into_signed(); + let mut signed_tx = tx.into_signed(); let to = match signed_tx.kind() { PrimitiveTransactionKind::Create => None, @@ -62,7 +62,9 @@ fn fill( }; let chain_id = signed_tx.chain_id().map(U64::from); - let access_list = match &signed_tx.transaction { + let mut blob_versioned_hashes = Vec::new(); + + let access_list = match &mut signed_tx.transaction { PrimitiveTransaction::Legacy(_) => None, PrimitiveTransaction::Eip2930(tx) => Some( tx.access_list @@ -84,16 +86,21 @@ fn fill( }) .collect(), ), - PrimitiveTransaction::Eip4844(tx) => Some( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ), + PrimitiveTransaction::Eip4844(tx) => { + // extract the blob hashes from the transaction + blob_versioned_hashes = std::mem::take(&mut tx.blob_versioned_hashes); + + Some( + tx.access_list + .0 + .iter() + .map(|item| AccessListItem { + address: item.address.0.into(), + storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), + }) + .collect(), + ) + } }; let signature = @@ -119,5 +126,9 @@ fn fill( block_hash, block_number: block_number.map(U256::from), transaction_index, + + // EIP-4844 fields + max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas().map(U128::from), + blob_versioned_hashes, } } diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index 6b6601af62a5..b2effca69e71 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -43,6 +43,9 @@ pub struct Transaction { /// The miner's tip. #[serde(skip_serializing_if = "Option::is_none")] pub max_priority_fee_per_gas: Option, + /// Configured max fee per blob gas for eip-4844 transactions + #[serde(skip_serializing_if = "Option::is_none")] + pub max_fee_per_blob_gas: Option, /// Data pub input: Bytes, /// All _flattened_ fields of the transaction signature. @@ -52,6 +55,9 @@ pub struct Transaction { pub signature: Option, /// The chain id of the transaction, if any. pub chain_id: Option, + /// Contains the blob hashes for eip-4844 transactions. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub blob_versioned_hashes: Vec, /// EIP2930 /// /// Pre-pay to warm storage access. @@ -91,10 +97,12 @@ mod tests { y_parity: None, }), chain_id: Some(U64::from(17)), + blob_versioned_hashes: vec![], access_list: None, transaction_type: Some(U64::from(20)), max_fee_per_gas: Some(U128::from(21)), max_priority_fee_per_gas: Some(U128::from(22)), + max_fee_per_blob_gas: None, }; let serialized = serde_json::to_string(&transaction).unwrap(); assert_eq!( @@ -126,10 +134,12 @@ mod tests { y_parity: Some(Parity(true)), }), chain_id: Some(U64::from(17)), + blob_versioned_hashes: vec![], access_list: None, transaction_type: Some(U64::from(20)), max_fee_per_gas: Some(U128::from(21)), max_priority_fee_per_gas: Some(U128::from(22)), + max_fee_per_blob_gas: None, }; let serialized = serde_json::to_string(&transaction).unwrap(); assert_eq!( From cd71f689cdcd7c2ef99dd5af9d943196cfa3bf3a Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Thu, 31 Aug 2023 03:58:02 +0200 Subject: [PATCH 571/722] feat: add a CLI flag for secondary nodes (#4029) Co-authored-by: Matthias Seitz --- bin/reth/src/cli/mod.rs | 18 ++++++++- bin/reth/src/node/mod.rs | 80 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 93 insertions(+), 5 deletions(-) diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d63ed726366c..79b0ab00a6a3 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -10,7 +10,7 @@ use crate::{ stage, test_vectors, version::{LONG_VERSION, SHORT_VERSION}, }; -use clap::{ArgAction, Args, Parser, Subcommand, ValueEnum}; +use clap::{value_parser, ArgAction, Args, Parser, Subcommand, ValueEnum}; use reth_primitives::ChainSpec; use reth_tracing::{ tracing::{metadata::LevelFilter, Level, Subscriber}, @@ -50,6 +50,22 @@ pub struct Cli { )] chain: Arc, + /// Add a new instance of a node. + /// + /// Configures the ports of the node to avoid conflicts with the defaults. + /// This is useful for running multiple nodes on the same machine. + /// + /// Max number of instances is 200. It is chosen in a way so that it's not possible to have + /// port numbers that conflict with each other. + /// + /// Changes to the following port numbers: + /// - DISCOVERY_PORT: default + `instance` - 1 + /// - AUTH_PORT: default + `instance` * 100 - 100 + /// - HTTP_RPC_PORT: default - `instance` + 1 + /// - WS_RPC_PORT: default + `instance` * 2 - 2 + #[arg(long, value_name = "INSTANCE", global = true, default_value_t = 1, value_parser = value_parser!(u16).range(..=200))] + instance: u16, + #[clap(flatten)] logs: Logs, diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 84e04524898f..b85ff1c68ad5 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -20,7 +20,7 @@ use crate::{ utils::get_single_header, version::SHORT_VERSION, }; -use clap::Parser; +use clap::{value_parser, Parser}; use eyre::Context; use fdlimit::raise_fd_limit; use futures::{future::Either, pin_mut, stream, stream_select, StreamExt}; @@ -127,9 +127,25 @@ pub struct NodeCommand { #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] pub metrics: Option, + /// Add a new instance of a node. + /// + /// Configures the ports of the node to avoid conflicts with the defaults. + /// This is useful for running multiple nodes on the same machine. + /// + /// Max number of instances is 200. It is chosen in a way so that it's not possible to have + /// port numbers that conflict with each other. + /// + /// Changes to the following port numbers: + /// - DISCOVERY_PORT: default + `instance` - 1 + /// - AUTH_PORT: default + `instance` * 100 - 100 + /// - HTTP_RPC_PORT: default - `instance` + 1 + /// - WS_RPC_PORT: default + `instance` * 2 - 2 + #[arg(long, value_name = "INSTANCE", global = true, default_value_t = 1, value_parser = value_parser!(u16).range(..=200))] + pub instance: u16, + /// Overrides the KZG trusted setup by reading from the supplied file. #[arg(long, value_name = "PATH")] - trusted_setup_file: Option, + pub trusted_setup_file: Option, /// All networking related arguments #[clap(flatten)] @@ -177,6 +193,7 @@ impl NodeCommand { chain, metrics, trusted_setup_file, + instance, network, rpc, txpool, @@ -192,6 +209,7 @@ impl NodeCommand { config, chain, metrics, + instance, trusted_setup_file, network, rpc, @@ -487,6 +505,9 @@ impl NodeCommand { let default_jwt_path = data_dir.jwt_path(); let jwt_secret = self.rpc.jwt_secret(default_jwt_path)?; + // adjust rpc port numbers based on instance number + self.adjust_instance_ports(); + // Start RPC servers let (_rpc_server, _auth_server) = self .rpc @@ -733,11 +754,19 @@ impl NodeCommand { .set_head(head) .listener_addr(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::UNSPECIFIED, - self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + // set discovery port based on instance number + match self.network.port { + Some(port) => port + self.instance - 1, + None => DEFAULT_DISCOVERY_PORT + self.instance - 1, + }, ))) .discovery_addr(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::UNSPECIFIED, - self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT), + // set discovery port based on instance number + match self.network.port { + Some(port) => port + self.instance - 1, + None => DEFAULT_DISCOVERY_PORT + self.instance - 1, + }, ))) .build(ProviderFactory::new(db, self.chain.clone())) } @@ -845,6 +874,16 @@ impl NodeCommand { Ok(pipeline) } + + /// Change rpc port numbers based on the instance number. + fn adjust_instance_ports(&mut self) { + // auth port is scaled by a factor of instance * 100 + self.rpc.auth_port += self.instance * 100 - 100; + // http port is scaled by a factor of -instance + self.rpc.http_port -= self.instance - 1; + // ws port is scaled by a factor of instance * 2 + self.rpc.ws_port += self.instance * 2 - 2; + } } /// Drives the [NetworkManager] future until a [Shutdown](reth_tasks::shutdown::Shutdown) signal is @@ -976,4 +1015,37 @@ mod tests { assert!(cmd.dev.dev); } + + #[test] + fn parse_instance() { + let mut cmd = NodeCommand::<()>::parse_from(["reth"]); + cmd.adjust_instance_ports(); + cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1); + // check rpc port numbers + assert_eq!(cmd.rpc.auth_port, 8551); + assert_eq!(cmd.rpc.http_port, 8545); + assert_eq!(cmd.rpc.ws_port, 8546); + // check network listening port number + assert_eq!(cmd.network.port.unwrap(), 30303); + + let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "2"]); + cmd.adjust_instance_ports(); + cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1); + // check rpc port numbers + assert_eq!(cmd.rpc.auth_port, 8651); + assert_eq!(cmd.rpc.http_port, 8544); + assert_eq!(cmd.rpc.ws_port, 8548); + // check network listening port number + assert_eq!(cmd.network.port.unwrap(), 30304); + + let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "3"]); + cmd.adjust_instance_ports(); + cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1); + // check rpc port numbers + assert_eq!(cmd.rpc.auth_port, 8751); + assert_eq!(cmd.rpc.http_port, 8543); + assert_eq!(cmd.rpc.ws_port, 8550); + // check network listening port number + assert_eq!(cmd.network.port.unwrap(), 30305); + } } From edf31806d886c8b0e8bacb572bd19923de274190 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 31 Aug 2023 11:27:07 +0100 Subject: [PATCH 572/722] feat(stages): respect `PruneModes` in Index History stages (#4382) --- bin/reth/src/node/mod.rs | 6 +- .../src/stages/index_account_history.rs | 114 ++++++++++++++++-- .../src/stages/index_storage_history.rs | 113 +++++++++++++++-- crates/stages/src/stages/mod.rs | 41 +++++-- 4 files changed, 235 insertions(+), 39 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index b85ff1c68ad5..f06baaaf25fb 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -817,6 +817,8 @@ impl NodeCommand { let factory = factory.with_stack_config(stack_config); + let prune_modes = prune_config.map(|prune| prune.parts).unwrap_or_default(); + let header_mode = if continuous { HeaderSyncMode::Continuous } else { HeaderSyncMode::Tip(tip_rx) }; let pipeline = builder @@ -849,7 +851,7 @@ impl NodeCommand { .clean_threshold .max(stage_config.account_hashing.clean_threshold) .max(stage_config.storage_hashing.clean_threshold), - prune_config.map(|prune| prune.parts).unwrap_or_default(), + prune_modes.clone(), ) .with_metrics_tx(metrics_tx), ) @@ -865,9 +867,11 @@ impl NodeCommand { .set(TransactionLookupStage::new(stage_config.transaction_lookup.commit_threshold)) .set(IndexAccountHistoryStage::new( stage_config.index_account_history.commit_threshold, + prune_modes.clone(), )) .set(IndexStorageHistoryStage::new( stage_config.index_storage_history.commit_threshold, + prune_modes, )), ) .build(db, self.chain.clone()); diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index c13123d08688..b79af45d1ed2 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -1,33 +1,37 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; -use reth_primitives::stage::{StageCheckpoint, StageId}; -use reth_provider::{AccountExtReader, DatabaseProviderRW, HistoryWriter}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + PruneCheckpoint, PruneModes, PrunePart, +}; +use reth_provider::{ + AccountExtReader, DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, + PruneCheckpointWriter, +}; use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::AccountHistory`] -/// -/// Pruning: we don't need to store and act on [`reth_primitives::PruneModes`], -/// because this stage indexes the already pruned account changesets generated by -/// [`crate::stages::ExecutionStage`]. #[derive(Debug)] pub struct IndexAccountHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, + /// Pruning configuration. + pub prune_modes: PruneModes, } impl IndexAccountHistoryStage { /// Create new instance of [IndexAccountHistoryStage]. - pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + pub fn new(commit_threshold: u64, prune_modes: PruneModes) -> Self { + Self { commit_threshold, prune_modes } } } impl Default for IndexAccountHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000 } + Self { commit_threshold: 100_000, prune_modes: PruneModes::none() } } } @@ -42,8 +46,29 @@ impl Stage for IndexAccountHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - input: ExecInput, + mut input: ExecInput, ) -> Result { + if let Some((target_prunable_block, prune_mode)) = + self.prune_modes.prune_target_block_account_history(input.target())? + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + + // Save prune checkpoint only if we don't have one already. + // Otherwise, pruner may skip the unpruned range of blocks. + if provider.get_prune_checkpoint(PrunePart::AccountHistory)?.is_none() { + provider.save_prune_checkpoint( + PrunePart::AccountHistory, + PruneCheckpoint { + block_number: Some(target_prunable_block), + tx_number: None, + prune_mode, + }, + )?; + } + } + } + if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -98,7 +123,7 @@ mod tests { generators, generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; - use reth_primitives::{hex_literal::hex, Address, BlockNumber, H160, H256, MAINNET}; + use reth_primitives::{hex_literal::hex, Address, BlockNumber, PruneMode, H160, H256, MAINNET}; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); @@ -371,16 +396,76 @@ mod tests { ); } + #[tokio::test] + async fn insert_index_with_prune_modes() { + // init + let tx = TestTransaction::default(); + + // setup + tx.commit(|tx| { + // we just need first and last + tx.put::( + 0, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + ) + .unwrap(); + + tx.put::( + 100, + StoredBlockBodyIndices { tx_count: 5, ..Default::default() }, + ) + .unwrap(); + + // setup changeset that are going to be applied to history index + tx.put::(20, acc()).unwrap(); + tx.put::(36, acc()).unwrap(); + tx.put::(100, acc()).unwrap(); + Ok(()) + }) + .unwrap(); + + // run + let input = ExecInput { target: Some(100), ..Default::default() }; + let mut stage = IndexAccountHistoryStage { + prune_modes: PruneModes { + account_history: Some(PruneMode::Before(36)), + ..Default::default() + }, + ..Default::default() + }; + let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + let out = stage.execute(&provider, input).await.unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(100), done: true }); + provider.commit().unwrap(); + + // verify + let table = cast(tx.table::().unwrap()); + assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])])); + + // unwind + unwind(&tx, 100, 0).await; + + // verify initial state + let table = tx.table::().unwrap(); + assert!(table.is_empty()); + } + stage_test_suite_ext!(IndexAccountHistoryTestRunner, index_account_history); struct IndexAccountHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, + prune_modes: PruneModes, } impl Default for IndexAccountHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000 } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + prune_modes: PruneModes::none(), + } } } @@ -392,7 +477,10 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold } + Self::S { + commit_threshold: self.commit_threshold, + prune_modes: self.prune_modes.clone(), + } } } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index 0f077fac5874..8d03bb20d54b 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -1,33 +1,36 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::{database::Database, models::BlockNumberAddress}; -use reth_primitives::stage::{StageCheckpoint, StageId}; -use reth_provider::{DatabaseProviderRW, HistoryWriter, StorageReader}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + PruneCheckpoint, PruneModes, PrunePart, +}; +use reth_provider::{ + DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, StorageReader, +}; use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::StorageHistory`]. -/// -/// Pruning: we don't need to store and act on [`reth_primitives::PruneModes`], -/// because this stage indexes the already pruned storage changesets generated by -/// [`crate::stages::ExecutionStage`]. #[derive(Debug)] pub struct IndexStorageHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, + /// Pruning configuration. + pub prune_modes: PruneModes, } impl IndexStorageHistoryStage { /// Create new instance of [IndexStorageHistoryStage]. - pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + pub fn new(commit_threshold: u64, prune_modes: PruneModes) -> Self { + Self { commit_threshold, prune_modes } } } impl Default for IndexStorageHistoryStage { fn default() -> Self { - Self { commit_threshold: 100_000 } + Self { commit_threshold: 100_000, prune_modes: PruneModes::none() } } } @@ -42,8 +45,29 @@ impl Stage for IndexStorageHistoryStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - input: ExecInput, + mut input: ExecInput, ) -> Result { + if let Some((target_prunable_block, prune_mode)) = + self.prune_modes.prune_target_block_storage_history(input.target())? + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + + // Save prune checkpoint only if we don't have one already. + // Otherwise, pruner may skip the unpruned range of blocks. + if provider.get_prune_checkpoint(PrunePart::StorageHistory)?.is_none() { + provider.save_prune_checkpoint( + PrunePart::StorageHistory, + PruneCheckpoint { + block_number: Some(target_prunable_block), + tx_number: None, + prune_mode, + }, + )?; + } + } + } + if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -98,7 +122,7 @@ mod tests { generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; use reth_primitives::{ - hex_literal::hex, Address, BlockNumber, StorageEntry, H160, H256, MAINNET, U256, + hex_literal::hex, Address, BlockNumber, PruneMode, StorageEntry, H160, H256, MAINNET, U256, }; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); @@ -385,16 +409,76 @@ mod tests { ); } + #[tokio::test] + async fn insert_index_with_prune_modes() { + // init + let tx = TestTransaction::default(); + + // setup + tx.commit(|tx| { + // we just need first and last + tx.put::( + 0, + StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, + ) + .unwrap(); + + tx.put::( + 100, + StoredBlockBodyIndices { tx_count: 5, ..Default::default() }, + ) + .unwrap(); + + // setup changeset that are going to be applied to history index + tx.put::(trns(20), storage(STORAGE_KEY)).unwrap(); + tx.put::(trns(36), storage(STORAGE_KEY)).unwrap(); + tx.put::(trns(100), storage(STORAGE_KEY)).unwrap(); + Ok(()) + }) + .unwrap(); + + // run + let input = ExecInput { target: Some(100), ..Default::default() }; + let mut stage = IndexStorageHistoryStage { + prune_modes: PruneModes { + storage_history: Some(PruneMode::Before(36)), + ..Default::default() + }, + ..Default::default() + }; + let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + let out = stage.execute(&provider, input).await.unwrap(); + assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(100), done: true }); + provider.commit().unwrap(); + + // verify + let table = cast(tx.table::().unwrap()); + assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100]),])); + + // unwind + unwind(&tx, 100, 0).await; + + // verify initial state + let table = tx.table::().unwrap(); + assert!(table.is_empty()); + } + stage_test_suite_ext!(IndexStorageHistoryTestRunner, index_storage_history); struct IndexStorageHistoryTestRunner { pub(crate) tx: TestTransaction, commit_threshold: u64, + prune_modes: PruneModes, } impl Default for IndexStorageHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000 } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + prune_modes: PruneModes::none(), + } } } @@ -406,7 +490,10 @@ mod tests { } fn stage(&self) -> Self::S { - Self::S { commit_threshold: self.commit_threshold } + Self::S { + commit_threshold: self.commit_threshold, + prune_modes: self.prune_modes.clone(), + } } } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 1adc72bdf969..87e045aee9eb 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -135,7 +135,7 @@ mod tests { Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - prune_modes, + prune_modes.clone(), ); execution_stage.execute(&provider, input).await.unwrap(); @@ -155,19 +155,36 @@ mod tests { ); // Check AccountHistory - let mut acc_indexing_stage = IndexAccountHistoryStage::default(); - acc_indexing_stage.execute(&provider, input).await.unwrap(); - let mut account_history: Cursor<'_, RW, AccountHistory> = - provider.tx_ref().cursor_read::().unwrap(); - assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); + let mut acc_indexing_stage = + IndexAccountHistoryStage { prune_modes: prune_modes.clone(), ..Default::default() }; + + if let Some(PruneMode::Full) = prune_modes.account_history { + // Full is not supported + assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + } else { + acc_indexing_stage.execute(&provider, input).await.unwrap(); + let mut account_history: Cursor<'_, RW, AccountHistory> = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); + } // Check StorageHistory - let mut storage_indexing_stage = IndexStorageHistoryStage::default(); - storage_indexing_stage.execute(&provider, input).await.unwrap(); - - let mut storage_history = - provider.tx_ref().cursor_read::().unwrap(); - assert_eq!(storage_history.walk(None).unwrap().count(), expect_num_storage_changesets); + let mut storage_indexing_stage = + IndexStorageHistoryStage { prune_modes: prune_modes.clone(), ..Default::default() }; + + if let Some(PruneMode::Full) = prune_modes.storage_history { + // Full is not supported + assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + } else { + storage_indexing_stage.execute(&provider, input).await.unwrap(); + + let mut storage_history = + provider.tx_ref().cursor_read::().unwrap(); + assert_eq!( + storage_history.walk(None).unwrap().count(), + expect_num_storage_changesets + ); + } }; // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed From 71cbddc2336b58c2f972e4554266252e44aeb390 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 31 Aug 2023 13:34:27 +0100 Subject: [PATCH 573/722] docs(book): pruning & full node RPC breakdown & improvements (#4428) --- book/SUMMARY.md | 2 +- book/run/pruning.md | 172 ++++++++++++++++++++++++++++++++++++----- book/run/run-a-node.md | 2 +- 3 files changed, 155 insertions(+), 21 deletions(-) diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 0bab86a8bb71..c75636428d65 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -13,7 +13,7 @@ 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) 1. [Transaction types](./run/transactions.md) - 1. [Pruning](./run/pruning.md) + 1. [Pruning & Full Node](./run/pruning.md) 1. [Ports](./run/ports.md) 1. [Troubleshooting](./run/troubleshooting.md) 1. [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) diff --git a/book/run/pruning.md b/book/run/pruning.md index e9aa69ec5e68..ad47e0a274a7 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -1,4 +1,4 @@ -# Pruning +# Pruning & Full Node > Pruning and full node are new features of Reth, > and we will be happy to hear about your experience using them either @@ -26,6 +26,12 @@ the initial sync. Turning Archive into Pruned, or Pruned into Full is not suppor Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). +### Pruned Node + +To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section), +modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from +the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). + ### Full Node To run Reth as a full node, follow the steps from the previous chapter on @@ -38,31 +44,21 @@ RUST_LOG=info reth node \ --authrpc.port 8551 ``` -### Pruned Node - -To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section), -modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from -the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). - ## Size All numbers are as of August 2023 at block number 17.9M for mainnet. -### Archive +### Archive Node Archive node occupies at least 2.1TB. You can track the growth of Reth archive node size with our [public Grafana dashboard](https://reth.paradigm.xyz/d/2k8BXz24k/reth?orgId=1&refresh=30s&viewPanel=52). -### Full - -Full node occupies 1TB at the peak, and slowly goes down to 920GB. - -### Pruned +### Pruned Node Different parts take up different amounts of disk space. -If pruned fully, this is the total freed space you'll get, per part: +If pruned fully, this is the total freed space you'll get, per part: | Part | Size | |--------------------|-------| @@ -72,6 +68,42 @@ If pruned fully, this is the total freed space you'll get, per part: | Account History | 230GB | | Storage History | 680GB | +### Full Node + +Full node occupies at least 950GB. + +Essentially, the full node is the same as following configuration for the pruned node: +```toml +[prune] +block_interval = 5 + +[prune.parts] +sender_recovery = { distance = 128 } +# transaction_lookup is not pruned +receipts = { before = 11052984 } # Beacon Deposit Contract deployment block: https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 +account_history = { distance = 128 } +storage_history = { distance = 128 } + +[prune.parts.receipts_log_filter] +# Prune all receipts, leaving only those which contain logs from address `0x00000000219ab540356cbb839cbe05303d7705fa`, +# starting from the block 11052984. This leaves receipts with the logs from the Beacon Deposit Contract. +"0x00000000219ab540356cbb839cbe05303d7705fa" = { before = 11052984 } +``` + +Meaning, it prunes: +- Account History and Storage History up to the last 128 blocks +- Sender Recovery up to the last 128 blocks. The caveat is that it's pruned gradually after the initial sync +is completed, so the disk space is reclaimed slowly. +- Receipts up to the last 128 blocks, preserving all receipts with the logs from Beacon Deposit Contract + +Given the aforementioned part sizes, we get the following full node size: +```text +Archive Node - Receipts - AccountHistory - StorageHistory = Full Node +``` +```text +2.1TB - 240GB - 230GB - 680GB = 950GB +``` + ## RPC support As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several parts @@ -85,11 +117,113 @@ which can be pruned independently of each other: Pruning of each of these parts disables different RPC methods, because the historical data or lookup indexes become unavailable. +### Full Node + +The following tables describe RPC methods available in the full node. + + +#### `debug` namespace + +| RPC | Note | +|----------------------------|----------------------------------------------------------| +| `debug_getRawBlock` | | +| `debug_getRawHeader` | | +| `debug_getRawReceipts` | Only for the last 128 blocks and Beacon Deposit Contract | +| `debug_getRawTransaction` | | +| `debug_traceBlock` | Only for the last 128 blocks | +| `debug_traceBlockByHash` | Only for the last 128 blocks | +| `debug_traceBlockByNumber` | Only for the last 128 blocks | +| `debug_traceCall` | Only for the last 128 blocks | +| `debug_traceCallMany` | Only for the last 128 blocks | +| `debug_traceTransaction` | Only for the last 128 blocks | + + +#### `eth` namespace + +| RPC / Part | Note | +|-------------------------------------------|----------------------------------------------------------| +| `eth_accounts` | | +| `eth_blockNumber` | | +| `eth_call` | Only for the last 128 blocks | +| `eth_chainId` | | +| `eth_createAccessList` | Only for the last 128 blocks | +| `eth_estimateGas` | Only for the last 128 blocks | +| `eth_feeHistory` | | +| `eth_gasPrice` | | +| `eth_getBalance` | Only for the last 128 blocks | +| `eth_getBlockByHash` | | +| `eth_getBlockByNumber` | | +| `eth_getBlockReceipts` | Only for the last 128 blocks and Beacon Deposit Contract | +| `eth_getBlockTransactionCountByHash` | | +| `eth_getBlockTransactionCountByNumber` | | +| `eth_getCode` | | +| `eth_getFilterChanges` | | +| `eth_getFilterLogs` | Only for the last 128 blocks and Beacon Deposit Contract | +| `eth_getLogs` | Only for the last 128 blocks and Beacon Deposit Contract | +| `eth_getStorageAt` | Only for the last 128 blocks | +| `eth_getTransactionByBlockHashAndIndex` | | +| `eth_getTransactionByBlockNumberAndIndex` | | +| `eth_getTransactionByHash` | | +| `eth_getTransactionCount` | Only for the last 128 blocks | +| `eth_getTransactionReceipt` | Only for the last 128 blocks and Beacon Deposit Contract | +| `eth_getUncleByBlockHashAndIndex` | | +| `eth_getUncleByBlockNumberAndIndex` | | +| `eth_getUncleCountByBlockHash` | | +| `eth_getUncleCountByBlockNumber` | | +| `eth_maxPriorityFeePerGas` | | +| `eth_mining` | | +| `eth_newBlockFilter` | | +| `eth_newFilter` | | +| `eth_newPendingTransactionFilter` | | +| `eth_protocolVersion` | | +| `eth_sendRawTransaction` | | +| `eth_sendTransaction` | | +| `eth_sign` | | +| `eth_signTransaction` | | +| `eth_signTypedData` | | +| `eth_subscribe` | | +| `eth_syncing` | | +| `eth_uninstallFilter` | | +| `eth_unsubscribe` | | + +#### `net` namespace + +| RPC / Part | +|-----------------| +| `net_listening` | +| `net_peerCount` | +| `net_version` | + +#### `trace` namespace + +| RPC / Part | Note | +|---------------------------------|------------------------------| +| `trace_block` | Only for the last 128 blocks | +| `trace_call` | Only for the last 128 blocks | +| `trace_callMany` | Only for the last 128 blocks | +| `trace_get` | Only for the last 128 blocks | +| `trace_rawTransaction` | Only for the last 128 blocks | +| `trace_replayBlockTransactions` | Only for the last 128 blocks | +| `trace_replayTransaction` | Only for the last 128 blocks | +| `trace_transaction` | Only for the last 128 blocks | + +#### `txpool` namespace + +| RPC / Part | +|----------------------| +| `txpool_content` | +| `txpool_contentFrom` | +| `txpool_inspect` | +| `txpool_status` | + + +### Pruned Node + The following tables describe the requirements for prune parts, per RPC method: - ✅ – if the part is pruned, the RPC method still works - ❌ - if the part is pruned, the RPC method doesn't work anymore -### `debug` namespace +#### `debug` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |----------------------------|-----------------|--------------------|----------|-----------------|-----------------| @@ -105,7 +239,7 @@ The following tables describe the requirements for prune parts, per RPC method: | `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | -### `eth` namespace +#### `eth` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |-------------------------------------------|-----------------|--------------------|----------|-----------------|-----------------| @@ -153,7 +287,7 @@ The following tables describe the requirements for prune parts, per RPC method: | `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | | `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | -### `net` namespace +#### `net` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |-----------------|-----------------|--------------------|----------|-----------------|-----------------| @@ -161,7 +295,7 @@ The following tables describe the requirements for prune parts, per RPC method: | `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | | `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | -### `trace` namespace +#### `trace` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |---------------------------------|-----------------|--------------------|----------|-----------------|-----------------| @@ -174,7 +308,7 @@ The following tables describe the requirements for prune parts, per RPC method: | `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | | `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | -### `txpool` namespace +#### `txpool` namespace | RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | |----------------------|-----------------|--------------------|----------|-----------------|-----------------| diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 06a3f8f2d64f..67d73614a2f5 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -7,7 +7,7 @@ In this chapter we'll go through a few different topics you'll encounter when ru 1. [Logs and Observability](./observability.md) 1. [Configuring reth.toml](./config.md) 1. [Transaction types](./transactions.md) -1. [Pruning](./pruning.md) +1. [Pruning & Full Node](./pruning.md) 1. [Ports](./ports.md) 1. [Troubleshooting](./troubleshooting.md) From d890053084e56872ed6415be7fa64e20c432d8be Mon Sep 17 00:00:00 2001 From: JosepBove Date: Thu, 31 Aug 2023 14:48:20 +0200 Subject: [PATCH 574/722] Add BlobStore metrics to grafana (#4424) Co-authored-by: Alexey Shekhirin --- etc/grafana/dashboards/overview.json | 205 +++++++++++++++++++++++---- 1 file changed, 178 insertions(+), 27 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 628f53d529c4..abede2b745c8 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -96,6 +96,7 @@ "liveNow": false, "panels": [ { + "collapsed": false, "gridPos": { "h": 1, "w": 24, @@ -3632,13 +3633,163 @@ "title": "Network transaction channel", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the entries, byte size, failed inserts and file deletes of the blob store", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 135 + }, + "id": 115, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_entries{instance=~\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Entries", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_byte_size{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Bytesize", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_inserts{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Inserts", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_deletes{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Deletes", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Blob store", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 135 + "y": 143 }, "id": 79, "panels": [], @@ -3710,7 +3861,7 @@ "h": 8, "w": 12, "x": 0, - "y": 136 + "y": 144 }, "id": 74, "options": { @@ -3805,7 +3956,7 @@ "h": 8, "w": 12, "x": 12, - "y": 136 + "y": 144 }, "id": 80, "options": { @@ -3900,7 +4051,7 @@ "h": 8, "w": 12, "x": 0, - "y": 144 + "y": 152 }, "id": 81, "options": { @@ -3995,7 +4146,7 @@ "h": 8, "w": 12, "x": 12, - "y": 144 + "y": 152 }, "id": 114, "options": { @@ -4033,7 +4184,7 @@ "h": 1, "w": 24, "x": 0, - "y": 152 + "y": 160 }, "id": 87, "panels": [], @@ -4105,7 +4256,7 @@ "h": 8, "w": 12, "x": 0, - "y": 153 + "y": 161 }, "id": 83, "options": { @@ -4199,7 +4350,7 @@ "h": 8, "w": 12, "x": 12, - "y": 153 + "y": 161 }, "id": 84, "options": { @@ -4305,7 +4456,7 @@ "h": 8, "w": 12, "x": 0, - "y": 161 + "y": 169 }, "id": 85, "options": { @@ -4342,7 +4493,7 @@ "h": 1, "w": 24, "x": 0, - "y": 169 + "y": 177 }, "id": 68, "panels": [], @@ -4414,7 +4565,7 @@ "h": 8, "w": 12, "x": 0, - "y": 170 + "y": 178 }, "id": 60, "options": { @@ -4508,7 +4659,7 @@ "h": 8, "w": 12, "x": 12, - "y": 170 + "y": 178 }, "id": 62, "options": { @@ -4602,7 +4753,7 @@ "h": 8, "w": 12, "x": 0, - "y": 178 + "y": 186 }, "id": 64, "options": { @@ -4639,7 +4790,7 @@ "h": 1, "w": 24, "x": 0, - "y": 186 + "y": 194 }, "id": 97, "panels": [], @@ -4709,7 +4860,7 @@ "h": 8, "w": 12, "x": 0, - "y": 187 + "y": 195 }, "id": 98, "options": { @@ -4870,7 +5021,7 @@ "h": 8, "w": 12, "x": 12, - "y": 187 + "y": 195 }, "id": 101, "options": { @@ -4966,7 +5117,7 @@ "h": 8, "w": 12, "x": 0, - "y": 195 + "y": 203 }, "id": 99, "options": { @@ -5062,7 +5213,7 @@ "h": 8, "w": 12, "x": 12, - "y": 195 + "y": 203 }, "id": 100, "options": { @@ -5100,7 +5251,7 @@ "h": 1, "w": 24, "x": 0, - "y": 203 + "y": 211 }, "id": 105, "panels": [], @@ -5171,7 +5322,7 @@ "h": 8, "w": 12, "x": 0, - "y": 204 + "y": 212 }, "id": 106, "options": { @@ -5267,7 +5418,7 @@ "h": 8, "w": 12, "x": 12, - "y": 204 + "y": 212 }, "id": 107, "options": { @@ -5305,7 +5456,7 @@ "h": 1, "w": 24, "x": 0, - "y": 212 + "y": 220 }, "id": 108, "panels": [], @@ -5328,7 +5479,7 @@ "h": 8, "w": 12, "x": 0, - "y": 213 + "y": 221 }, "hiddenSeries": false, "id": 109, @@ -5416,7 +5567,7 @@ "h": 8, "w": 12, "x": 12, - "y": 213 + "y": 221 }, "hiddenSeries": false, "id": 110, @@ -5513,7 +5664,7 @@ "h": 8, "w": 12, "x": 0, - "y": 221 + "y": 229 }, "id": 111, "maxDataPoints": 25, @@ -5602,7 +5753,7 @@ "h": 8, "w": 12, "x": 12, - "y": 221 + "y": 229 }, "id": 112, "maxDataPoints": 25, @@ -5706,6 +5857,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 8, + "version": 9, "weekStart": "" } \ No newline at end of file From d0a5a19b0dcfdb6201af6ef4cda73783ce15a28f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 31 Aug 2023 06:10:17 -0700 Subject: [PATCH 575/722] fix(pruning): dont check pruning on every storage change for the same block (#4402) --- crates/storage/provider/src/post_state/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index fc1d74af8f80..069ad602a401 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -529,11 +529,11 @@ impl PostState { for (block_number, storage_changes) in std::mem::take(&mut self.storage_changes).inner.into_iter() { - for (address, mut storage) in storage_changes.into_iter() { - if self.prune_modes.should_prune_storage_history(block_number, tip) { - continue - } + if self.prune_modes.should_prune_storage_history(block_number, tip) { + continue + } + for (address, mut storage) in storage_changes.into_iter() { let storage_id = BlockNumberAddress((block_number, address)); // If the account was created and wiped at the same block, skip all storage changes From b839e394a45edbe7b2030fb370420ca771e5b728 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 31 Aug 2023 17:52:05 +0100 Subject: [PATCH 576/722] fix(grafana): peer disconnect reasons chart (#4429) --- etc/grafana/dashboards/overview.json | 87 ++++++++++------------------ 1 file changed, 32 insertions(+), 55 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index abede2b745c8..326afbc3c049 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1625,6 +1625,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "color": { @@ -2354,8 +2358,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2555,8 +2558,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2670,8 +2672,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2776,8 +2777,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2916,8 +2916,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3035,8 +3034,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3153,8 +3151,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3291,8 +3288,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3386,8 +3382,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3534,8 +3529,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3680,8 +3674,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3845,8 +3838,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3940,8 +3932,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4035,8 +4026,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4129,8 +4119,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4240,8 +4229,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4334,8 +4322,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4440,8 +4427,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4549,8 +4535,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4643,8 +4628,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4737,8 +4721,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4843,8 +4826,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5004,8 +4986,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5100,8 +5081,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5196,8 +5176,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5305,8 +5284,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5401,8 +5379,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5857,6 +5834,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 9, + "version": 10, "weekStart": "" } \ No newline at end of file From 1a40daef8aa077275c7e03003ed9b8dc6d36cf6e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 12:09:27 -0700 Subject: [PATCH 577/722] docs: add aquamarine as dep to reth (#4433) --- Cargo.lock | 1 + Cargo.toml | 1 + bin/reth/Cargo.toml | 10 ++++++++++ bin/reth/src/lib.rs | 3 +++ crates/blockchain-tree/Cargo.toml | 2 +- crates/net/network/Cargo.toml | 2 +- crates/stages/Cargo.toml | 2 +- crates/transaction-pool/Cargo.toml | 2 +- 8 files changed, 19 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ed01683261d..3f43893b50be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5172,6 +5172,7 @@ dependencies = [ name = "reth" version = "0.1.0-alpha.8" dependencies = [ + "aquamarine", "backon", "boyer-moore-magiclen", "clap", diff --git a/Cargo.toml b/Cargo.toml index 98c77b92ac39..e80531040a33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,6 +112,7 @@ ethers-signers = { version = "2.0", default-features = false } ethers-middleware = { version = "2.0", default-features = false } ## misc +aquamarine = "0.3" bytes = "1.4" bitflags = "2.3" tracing = "0.1.0" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d09d0b9666ce..cba10e316281 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -6,6 +6,15 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true +description = """ +Reth node implementation +""" + +[package.metadata.cargo-udeps.ignore] +normal = [ + # Used for diagrams in docs + "aquamarine", +] [dependencies] # reth @@ -81,6 +90,7 @@ pin-project.workspace = true hyper = "0.14.25" # misc +aquamarine.workspace = true eyre = "0.6.8" clap = { version = "4", features = ["derive"] } tempfile = { version = "3.3.0" } diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index dbaa2ce58a60..c9e02bc34216 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -83,3 +83,6 @@ pub mod rpc { #[cfg(all(feature = "jemalloc", unix))] use jemallocator as _; + +// for rendering diagrams +use aquamarine as _; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index e18e2562eeaf..70e3e1803b30 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -31,7 +31,7 @@ reth-metrics = { workspace = true, features = ["common"] } metrics.workspace = true # misc -aquamarine = "0.3.0" +aquamarine.workspace = true linked_hash_set = "0.1.4" [dev-dependencies] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 8ac11a46a167..0f4444a2486f 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -51,7 +51,7 @@ metrics.workspace = true # misc auto_impl = "1" -aquamarine = "0.3.0" +aquamarine.workspace = true tracing.workspace = true fnv = "1.0" thiserror.workspace = true diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 7ae6f5fca871..01c3b78d7247 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -42,7 +42,7 @@ metrics.workspace = true # misc thiserror.workspace = true -aquamarine = "0.3.0" +aquamarine.workspace = true itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 9354ce2d1a86..a5ef62b83ebe 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -36,7 +36,7 @@ reth-metrics.workspace = true metrics.workspace = true # misc -aquamarine = "0.3.0" +aquamarine.workspace = true thiserror.workspace = true tracing.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } From d8a7ee2eb44b613d7fef23b9b9559148b66f985e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 13:06:24 -0700 Subject: [PATCH 578/722] fix: listen for all transactions (#4436) --- examples/network-txpool.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index d9c61636cbe1..fbd6dfaee811 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -11,8 +11,8 @@ use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::ValidTransaction, CoinbaseTipOrdering, - EthPooledTransaction, PoolTransaction, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, + EthPooledTransaction, PoolTransaction, TransactionListenerKind, TransactionOrigin, + TransactionPool, TransactionValidationOutcome, TransactionValidator, }; #[tokio::main] @@ -46,7 +46,7 @@ async fn main() -> eyre::Result<()> { tokio::task::spawn(txpool); // listen for new transactions - let mut txs = pool.pending_transactions_listener(); + let mut txs = pool.pending_transactions_listener_for(TransactionListenerKind::All); while let Some(tx) = txs.recv().await { println!("Received new transaction: {:?}", tx); From a76da98316dfd8171df339b0405db993135a52c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 13:36:50 -0700 Subject: [PATCH 579/722] feat: support admin_peers (#4435) --- crates/net/common/src/bandwidth_meter.rs | 5 ++ crates/net/eth-wire/src/capability.rs | 7 +++ crates/net/eth-wire/src/p2pstream.rs | 10 ++++ crates/net/network-api/src/lib.rs | 59 +++++++++++++++++++++++- crates/net/network-api/src/noop.rs | 7 ++- crates/net/network/src/network.rs | 18 ++++---- crates/net/network/src/session/handle.rs | 35 ++++++++------ crates/net/network/src/session/mod.rs | 59 +++++------------------- crates/rpc/rpc-api/src/admin.rs | 15 ++++-- crates/rpc/rpc-types/src/eth/syncing.rs | 9 ++-- crates/rpc/rpc/src/admin.rs | 43 ++++++++++++++--- 11 files changed, 177 insertions(+), 90 deletions(-) diff --git a/crates/net/common/src/bandwidth_meter.rs b/crates/net/common/src/bandwidth_meter.rs index b2c52a4f351c..a40f7432d1e8 100644 --- a/crates/net/common/src/bandwidth_meter.rs +++ b/crates/net/common/src/bandwidth_meter.rs @@ -113,6 +113,11 @@ impl MeteredStream { pub fn get_bandwidth_meter(&self) -> &BandwidthMeter { &self.meter } + + /// Returns the wrapped stream + pub fn inner(&self) -> &S { + &self.inner + } } impl AsyncRead for MeteredStream { diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 6bdfd85bdce3..fd528d2fe77f 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -5,6 +5,7 @@ use reth_codecs::add_arbitrary_tests; use reth_primitives::bytes::{BufMut, Bytes}; use reth_rlp::{Decodable, DecodeError, Encodable, RlpDecodable, RlpEncodable}; use smol_str::SmolStr; +use std::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -72,6 +73,12 @@ impl Capability { } } +impl fmt::Display for Capability { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.name, self.version) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Capability { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index b46d3ecd6a35..025f0b347e31 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -75,6 +75,11 @@ impl UnauthedP2PStream { pub fn new(inner: S) -> Self { Self { inner } } + + /// Returns a reference to the inner stream. + pub fn inner(&self) -> &S { + &self.inner + } } impl UnauthedP2PStream @@ -242,6 +247,11 @@ impl P2PStream { } } + /// Returns a reference to the inner stream. + pub fn inner(&self) -> &S { + &self.inner + } + /// Sets a custom outgoing message buffer capacity. /// /// # Panics diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index f15d3fec4aa6..947af332c7e5 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -19,13 +19,14 @@ //! //! - `serde` (default): Enable serde support use async_trait::async_trait; -use reth_eth_wire::DisconnectReason; +use reth_eth_wire::{DisconnectReason, EthVersion, Status}; use reth_primitives::{NodeRecord, PeerId}; use reth_rpc_types::NetworkStatus; -use std::net::SocketAddr; +use std::{net::SocketAddr, sync::Arc}; pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; +use reth_eth_wire::capability::Capabilities; /// Network Error pub mod error; @@ -81,6 +82,9 @@ pub trait Peers: PeersInfo { /// Adds a peer to the known peer set, with the given kind. fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr); + /// Returns the rpc [PeerInfo] for all connected peers. + async fn get_peers(&self) -> Result, NetworkError>; + /// Removes a peer from the peer set that corresponds to given kind. fn remove_peer(&self, peer: PeerId, kind: PeerKind); @@ -106,3 +110,54 @@ pub enum PeerKind { /// Trusted peer. Trusted, } + +/// Info about an active peer session. +#[derive(Debug, Clone)] +pub struct PeerInfo { + /// Announced capabilities of the peer + pub capabilities: Arc, + /// The identifier of the remote peer + pub remote_id: PeerId, + /// The client's name and version + pub client_version: Arc, + /// The peer's address we're connected to + pub remote_addr: SocketAddr, + /// The local address of the connection + pub local_addr: Option, + /// The direction of the session + pub direction: Direction, + /// The negotiated eth version. + pub eth_version: EthVersion, + /// The Status message the peer sent for the `eth` handshake + pub status: Status, +} + +/// The direction of the connection. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Direction { + /// Incoming connection. + Incoming, + /// Outgoing connection to a specific node. + Outgoing(PeerId), +} + +impl Direction { + /// Returns `true` if this an incoming connection. + pub fn is_incoming(&self) -> bool { + matches!(self, Direction::Incoming) + } + + /// Returns `true` if this an outgoing connection. + pub fn is_outgoing(&self) -> bool { + matches!(self, Direction::Outgoing(_)) + } +} + +impl std::fmt::Display for Direction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Direction::Incoming => write!(f, "incoming"), + Direction::Outgoing(_) => write!(f, "outgoing"), + } + } +} diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 2b453b4c4e7f..16c9cbe9a7ea 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -4,7 +4,8 @@ //! generic over it. use crate::{ - NetworkError, NetworkInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, + NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, + ReputationChangeKind, }; use async_trait::async_trait; use reth_discv4::DEFAULT_DISCOVERY_PORT; @@ -66,6 +67,10 @@ impl PeersInfo for NoopNetwork { impl Peers for NoopNetwork { fn add_peer_kind(&self, _peer: PeerId, _kind: PeerKind, _addr: SocketAddr) {} + async fn get_peers(&self) -> Result, NetworkError> { + Ok(vec![]) + } + fn remove_peer(&self, _peer: PeerId, _kind: PeerKind) {} fn disconnect_peer(&self, _peer: PeerId) {} diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 42fe5b4e5cf0..9199718be288 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,6 @@ use crate::{ config::NetworkMode, discovery::DiscoveryEvent, manager::NetworkEvent, message::PeerRequest, - peers::PeersHandle, session::PeerInfo, FetchClient, + peers::PeersHandle, FetchClient, }; use async_trait::async_trait; use parking_lot::Mutex; @@ -8,7 +8,8 @@ use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, Shar use reth_interfaces::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::{ - NetworkError, NetworkInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, + NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, + ReputationChangeKind, }; use reth_primitives::{Head, NodeRecord, PeerId, TransactionSigned, H256}; use reth_rpc_types::NetworkStatus; @@ -102,13 +103,6 @@ impl NetworkHandle { rx.await } - /// Returns [`PeerInfo`] for all connected peers - pub async fn get_peers(&self) -> Result, oneshot::error::RecvError> { - let (tx, rx) = oneshot::channel(); - let _ = self.manager().send(NetworkHandleMessage::GetPeerInfo(tx)); - rx.await - } - /// Returns [`PeerInfo`] for a given peer. /// /// Returns `None` if there's no active session to the peer. @@ -209,6 +203,12 @@ impl Peers for NetworkHandle { self.send_message(NetworkHandleMessage::AddPeerAddress(peer, kind, addr)); } + async fn get_peers(&self) -> Result, NetworkError> { + let (tx, rx) = oneshot::channel(); + let _ = self.manager().send(NetworkHandleMessage::GetPeerInfo(tx)); + Ok(rx.await?) + } + /// Sends a message to the [`NetworkManager`](crate::NetworkManager) to remove a peer from the /// set corresponding to given kind. fn remove_peer(&self, peer: PeerId, kind: PeerKind) { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 1b4f893b6eea..2cdc33d321f3 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -10,6 +10,7 @@ use reth_eth_wire::{ DisconnectReason, EthStream, EthVersion, P2PStream, Status, }; use reth_net_common::bandwidth_meter::MeteredStream; +use reth_network_api::PeerInfo; use reth_primitives::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::{ @@ -53,7 +54,6 @@ impl PendingSessionHandle { /// Within an active session that supports the `Ethereum Wire Protocol `, three high-level tasks can /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] -#[allow(unused)] pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, @@ -73,6 +73,10 @@ pub struct ActiveSessionHandle { pub(crate) client_version: Arc, /// The address we're connected to pub(crate) remote_addr: SocketAddr, + /// The local address of the connection. + pub(crate) local_addr: Option, + /// The Status message the peer sent for the `eth` handshake + pub(crate) status: Status, } // === impl ActiveSessionHandle === @@ -132,21 +136,20 @@ impl ActiveSessionHandle { pub fn remote_addr(&self) -> SocketAddr { self.remote_addr } -} -/// Info about an active peer session. -#[derive(Debug, Clone)] -pub struct PeerInfo { - /// Announced capabilities of the peer - pub capabilities: Arc, - /// The identifier of the remote peer - pub remote_id: PeerId, - /// The client's name and version - pub client_version: Arc, - /// The address we're connected to - pub remote_addr: SocketAddr, - /// The direction of the session - pub direction: Direction, + /// Extracts the [PeerInfo] from the session handle. + pub(crate) fn peer_info(&self) -> PeerInfo { + PeerInfo { + remote_id: self.remote_id, + direction: self.direction, + remote_addr: self.remote_addr, + local_addr: self.local_addr, + capabilities: self.capabilities.clone(), + client_version: self.client_version.clone(), + eth_version: self.version, + status: self.status, + } + } } /// Events a pending session can produce. @@ -162,6 +165,8 @@ pub enum PendingSessionEvent { session_id: SessionId, /// The remote node's socket address remote_addr: SocketAddr, + /// The local address of the connection + local_addr: Option, /// The remote node's public key peer_id: PeerId, /// All capabilities the peer announced diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index df19d144e433..f97a5cbfcbeb 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -4,7 +4,6 @@ use crate::{ metrics::SessionManagerMetrics, session::{active::ActiveSession, config::SessionCounter}, }; -pub use crate::{message::PeerRequestSender, session::handle::PeerInfo}; use fnv::FnvHashMap; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; @@ -40,11 +39,13 @@ use tracing::{instrument, trace}; mod active; mod config; mod handle; +pub use crate::message::PeerRequestSender; pub use config::{SessionLimits, SessionsConfig}; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, SessionCommand, }; +pub use reth_network_api::{Direction, PeerInfo}; /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] @@ -388,6 +389,7 @@ impl SessionManager { PendingSessionEvent::Established { session_id, remote_addr, + local_addr, peer_id, capabilities, conn, @@ -460,6 +462,7 @@ impl SessionManager { let client_version = Arc::new(client_id); let handle = ActiveSessionHandle { + status, direction, session_id, remote_id: peer_id, @@ -469,6 +472,7 @@ impl SessionManager { commands_to_session, client_version: Arc::clone(&client_version), remote_addr, + local_addr, }; self.active_sessions.insert(peer_id, handle); @@ -563,29 +567,14 @@ impl SessionManager { /// Returns [`PeerInfo`] for all connected peers pub fn get_peer_info(&self) -> Vec { - self.active_sessions - .values() - .map(|session| PeerInfo { - remote_id: session.remote_id, - direction: session.direction, - remote_addr: session.remote_addr, - capabilities: session.capabilities.clone(), - client_version: session.client_version.clone(), - }) - .collect() + self.active_sessions.values().map(ActiveSessionHandle::peer_info).collect() } /// Returns [`PeerInfo`] for a given peer. /// /// Returns `None` if there's no active session to the peer. pub fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { - self.active_sessions.get(&peer_id).map(|session| PeerInfo { - remote_id: session.remote_id, - direction: session.direction, - remote_addr: session.remote_addr, - capabilities: session.capabilities.clone(), - client_version: session.client_version.clone(), - }) + self.active_sessions.get(&peer_id).map(ActiveSessionHandle::peer_info) } } @@ -713,36 +702,6 @@ impl PendingSessionHandshakeError { } } -/// The direction of the connection. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Direction { - /// Incoming connection. - Incoming, - /// Outgoing connection to a specific node. - Outgoing(PeerId), -} - -impl Direction { - /// Returns `true` if this an incoming connection. - pub fn is_incoming(&self) -> bool { - matches!(self, Direction::Incoming) - } - - /// Returns `true` if this an outgoing connection. - pub(crate) fn is_outgoing(&self) -> bool { - matches!(self, Direction::Outgoing(_)) - } -} - -impl std::fmt::Display for Direction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Direction::Incoming => write!(f, "incoming"), - Direction::Outgoing(_) => write!(f, "outgoing"), - } - } -} - /// The error thrown when the max configured limit has been reached and no more connections are /// accepted. #[derive(Debug, Clone, thiserror::Error)] @@ -837,6 +796,7 @@ async fn authenticate( status: Status, fork_filter: ForkFilter, ) { + let local_addr = stream.inner().local_addr().ok(); let stream = match get_eciess_stream(stream, secret_key, direction).await { Ok(stream) => stream, Err(error) => { @@ -858,6 +818,7 @@ async fn authenticate( unauthed, session_id, remote_addr, + local_addr, direction, hello, status, @@ -905,6 +866,7 @@ async fn authenticate_stream( stream: UnauthedP2PStream>>, session_id: SessionId, remote_addr: SocketAddr, + local_addr: Option, direction: Direction, hello: HelloMessage, status: Status, @@ -942,6 +904,7 @@ async fn authenticate_stream( PendingSessionEvent::Established { session_id, remote_addr, + local_addr, peer_id: their_hello.id, capabilities: Arc::new(Capabilities::from(their_hello.capabilities)), status: their_status, diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 1252111218b7..b7656953b0ed 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,6 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::NodeRecord; -use reth_rpc_types::NodeInfo; +use reth_rpc_types::{NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))] @@ -28,11 +28,18 @@ pub trait AdminApi { #[method(name = "removeTrustedPeer")] fn remove_trusted_peer(&self, record: NodeRecord) -> RpcResult; + /// The peers administrative property can be queried for all the information known about the + /// connected remote nodes at the networking granularity. These include general information + /// about the nodes themselves as participants of the devp2p P2P overlay protocol, as well as + /// specialized information added by each of the running application protocols + #[method(name = "peers")] + async fn peers(&self) -> RpcResult>; + /// Creates an RPC subscription which serves events received from the network. #[subscription( - name = "peerEvents", - unsubscribe = "peerEvents_unsubscribe", - item = String + name = "peerEvents", + unsubscribe = "peerEvents_unsubscribe", + item = String )] async fn subscribe_peer_events(&self) -> jsonrpsee::core::SubscriptionResult; diff --git a/crates/rpc/rpc-types/src/eth/syncing.rs b/crates/rpc/rpc-types/src/eth/syncing.rs index e2eddaa74925..c6f2c6e7a936 100644 --- a/crates/rpc/rpc-types/src/eth/syncing.rs +++ b/crates/rpc/rpc-types/src/eth/syncing.rs @@ -42,7 +42,7 @@ pub enum PeerCount { } /// Peer connection information -#[derive(Debug, Clone, Default, Serialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct PeerInfo { /// Public node id pub id: Option, @@ -57,7 +57,7 @@ pub struct PeerInfo { } /// Peer network information -#[derive(Debug, Clone, Default, Serialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PeerNetworkInfo { /// Remote endpoint address @@ -67,11 +67,12 @@ pub struct PeerNetworkInfo { } /// Peer protocols information -#[derive(Debug, Clone, Default, Serialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct PeerProtocolsInfo { /// Ethereum protocol information pub eth: Option, /// PIP protocol information. + #[serde(default, skip_serializing_if = "Option::is_none")] pub pip: Option, } @@ -87,7 +88,7 @@ pub struct PeerEthProtocolInfo { } /// Peer PIP protocol information -#[derive(Debug, Clone, Default, Serialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct PipProtocolInfo { /// Negotiated PIP protocol version pub version: u32, diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 596fd6030a6d..9a680a6b4d62 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -4,7 +4,7 @@ use jsonrpsee::core::RpcResult; use reth_network_api::{NetworkInfo, PeerKind, Peers}; use reth_primitives::NodeRecord; use reth_rpc_api::AdminApiServer; -use reth_rpc_types::NodeInfo; +use reth_rpc_types::{NodeInfo, PeerEthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo}; /// `admin` API implementation. /// @@ -50,12 +50,33 @@ where Ok(true) } - /// Handler for `admin_peerEvents` - async fn subscribe_peer_events( - &self, - _pending: jsonrpsee::PendingSubscriptionSink, - ) -> jsonrpsee::core::SubscriptionResult { - Err("admin_peerEvents is not implemented yet".into()) + async fn peers(&self) -> RpcResult> { + let peers = self.network.get_peers().await.to_rpc_result()?; + let peers = peers + .into_iter() + .map(|peer| PeerInfo { + id: Some(format!("{:?}", peer.remote_id)), + name: peer.client_version.to_string(), + caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr.to_string(), + local_address: peer + .local_addr + .unwrap_or_else(|| self.network.local_addr()) + .to_string(), + }, + protocols: PeerProtocolsInfo { + eth: Some(PeerEthProtocolInfo { + difficulty: Some(peer.status.total_difficulty), + head: format!("{:?}", peer.status.blockhash), + version: peer.status.version as u32, + }), + pip: None, + }, + }) + .collect(); + + Ok(peers) } /// Handler for `admin_nodeInfo` @@ -65,6 +86,14 @@ where Ok(NodeInfo::new(enr, status)) } + + /// Handler for `admin_peerEvents` + async fn subscribe_peer_events( + &self, + _pending: jsonrpsee::PendingSubscriptionSink, + ) -> jsonrpsee::core::SubscriptionResult { + Err("admin_peerEvents is not implemented yet".into()) + } } impl std::fmt::Debug for AdminApi { From 97cf35673e807b82b05d0c90282d00c7efd184a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 13:47:07 -0700 Subject: [PATCH 580/722] feat: add blob fee calc functions (#4440) --- crates/primitives/src/constants/eip4844.rs | 60 ++++++++++++++++++++-- crates/primitives/src/header.rs | 17 ++++++ 2 files changed, 74 insertions(+), 3 deletions(-) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 2cde1da77520..cb971c9f18de 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -1,6 +1,6 @@ -//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants for shard Blob Transactions. +//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. -use crate::kzg::KzgSettings; +use crate::{kzg::KzgSettings, U256}; use once_cell::sync::Lazy; use std::{io::Write, sync::Arc}; @@ -25,9 +25,12 @@ pub const MAX_BLOBS_PER_BLOCK: u64 = MAX_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; /// Target number of data blobs in a single block. pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; // 393216 / 131072 = 3 -/// Used to determine the price for next data blob +/// Determines the maximum rate of change for blob fee pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3_338_477u64; // 3338477 +/// Minimum gas price for a data blob +pub const BLOB_TX_MIN_BLOB_GASPRICE: u64 = 1u64; + /// Commitment version of a KZG commitment pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; @@ -63,6 +66,32 @@ pub enum LoadKzgSettingsError { KzgError(c_kzg::Error), } +/// Calculates the blob fee for the given excess blob gas. +pub fn blob_fee(excess_blob_gas: u64) -> U256 { + fake_exponential( + U256::from(BLOB_TX_MIN_BLOB_GASPRICE), + U256::from(excess_blob_gas), + U256::from(BLOB_GASPRICE_UPDATE_FRACTION), + ) +} + +/// Approximates factor * e ** (numerator / denominator) using Taylor expansion. +/// +/// This is used to calculate the blob price. +/// +/// See also +pub fn fake_exponential(factor: U256, numerator: U256, denominator: U256) -> U256 { + let mut output = U256::ZERO; + let mut numerator_accum = factor.saturating_mul(denominator); + let mut i = U256::from(1u64); + while numerator_accum > U256::ZERO { + output += numerator_accum; + numerator_accum = numerator_accum * numerator / (denominator * i); + i += U256::from(1u64); + } + output / denominator +} + #[cfg(test)] mod tests { use super::*; @@ -71,4 +100,29 @@ mod tests { fn ensure_load_kzg_settings() { let _settings = Arc::clone(&MAINNET_KZG_TRUSTED_SETUP); } + + #[test] + fn test_fake_exp() { + // + for (factor, num, denom, expected) in &[ + (1u64, 0u64, 1u64, 1u64), + (38493, 0, 1000, 38493), + (0, 1234, 2345, 0), + (1, 2, 1, 6), // approximate 7.389 + (1, 4, 2, 6), + (1, 3, 1, 16), // approximate 20.09 + (1, 6, 2, 18), + (1, 4, 1, 49), // approximate 54.60 + (1, 8, 2, 50), + (10, 8, 2, 542), // approximate 540.598 + (11, 8, 2, 596), // approximate 600.58 + (1, 5, 1, 136), // approximate 148.4 + (1, 5, 2, 11), // approximate 12.18 + (2, 5, 2, 23), // approximate 24.36 + (1, 50000000, 2225652, 5709098764), + ] { + let res = fake_exponential(U256::from(*factor), U256::from(*num), U256::from(*denom)); + assert_eq!(res, U256::from(*expected)); + } + } } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index f74707844c77..69a1516312b8 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -8,6 +8,7 @@ use crate::{ }; use bytes::{Buf, BufMut, BytesMut}; +use crate::constants::eip4844::blob_fee; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_LIST_CODE, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; @@ -183,6 +184,22 @@ impl Header { } } + /// Returns the blob fee for _this_ block according to the EIP-4844 spec. + /// + /// Returns `None` if `excess_blob_gas` is None + pub fn blob_fee(&self) -> Option { + self.excess_blob_gas.map(blob_fee) + } + + /// Returns the blob fee for the next block according to the EIP-4844 spec. + /// + /// Returns `None` if `excess_blob_gas` is None. + /// + /// See also [Self::next_block_excess_blob_gas] + pub fn next_block_blob_fee(&self) -> Option { + self.next_block_excess_blob_gas().map(blob_fee) + } + /// Calculate base fee for next block according to the EIP-1559 spec. /// /// Returns a `None` if no base fee is set, no EIP-1559 support From ad245702c9ce293e7294d80a2c2f5acc996ab974 Mon Sep 17 00:00:00 2001 From: Derek <103802618+leeederek@users.noreply.github.com> Date: Thu, 31 Aug 2023 16:48:09 -0400 Subject: [PATCH 581/722] docs: minor fixes/polish to private network docs (#4441) --- book/run/private-testnet.md | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 901a4de68a68..e6bb168970ad 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -6,12 +6,13 @@ This guide uses [Kurtosis' eth2-package](https://github.com/kurtosis-tech/eth2-p * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The `eth2-package` is a package that is a general purpose testnet definition for instantiating private testnets at any scale over Docker or Kubernetes. This guide will go through how to spin up a local private testnet with Reth various CL clients. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`eth2-package`](https://github.com/kurtosis-tech/eth2-package) is a [package](https://docs.kurtosis.com/concepts-reference/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/eth2-package#configuration). Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves/). Read more about how the `eth2-package` works by going [here](https://github.com/kurtosis-tech/eth2-package/). +### Step 1: Define the parameters and shape of your private network First, in your home directory, create a file with the name `network_params.json` with the following contents: ```json { @@ -30,17 +31,17 @@ First, in your home directory, create a file with the name `network_params.json` "cl_client_image": "consensys/teku:latest", "count": 1 } - ] + ], "launch_additional_services": false } ``` +### Step 2: Spin up your network Next, run the following command from your command line: ```bash kurtosis run github.com/kurtosis-tech/eth2-package "$(cat ~/network_params.json)" ``` - -In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: +Kurtosis will spin up an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: ```console INFO[2023-08-21T18:22:18-04:00] ==================================================== INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp || @@ -89,5 +90,17 @@ b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp - 46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING ``` -## Using Kubernetes on remote infrastructure -Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker locally as in the cloud on Kubernetes. Check out these docs [here](https://docs.kurtosis.com/k8s/) to learn how to deploy your private testnet to a Kubernetes cluster. +Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network. + +## Using Kurtosis on Kubernetes +Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrsatructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/). + +## Running the network with additional services +The [`eth2-package`](https://github.com/kurtosis-tech/eth2-package) comes with many optional flags and arguments you can enable for your private network. Some include: +- A Grafana + Prometheus instance +- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz) +- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer) +- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows) + +### Questions? +Please reach out to the [Kurtosis discord](https://discord.com/invite/6Jjp9c89z9) should you have any questions about how to use the `eth2-package` for your private testnet needs. Thanks! From 426865aca9f4a8729e5143403c231af96c202ca0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 15:45:05 -0700 Subject: [PATCH 582/722] chore: apply same order --- bin/reth/src/args/rpc_server_args.rs | 56 ++++++++++++++-------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index e17d8a1a5e90..78260e10f97d 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -315,6 +315,18 @@ impl RpcServerArgs { } impl RethRpcConfig for RpcServerArgs { + fn is_ipc_enabled(&self) -> bool { + // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. + !self.ipcdisable + } + + fn eth_config(&self) -> EthConfig { + EthConfig::default() + .max_tracing_requests(self.rpc_max_tracing_requests) + .rpc_gas_cap(self.rpc_gas_cap) + .gpo_config(self.gas_price_oracle_config()) + } + fn rpc_max_request_size_bytes(&self) -> u32 { self.rpc_max_request_size * 1024 * 1024 } @@ -332,24 +344,6 @@ impl RethRpcConfig for RpcServerArgs { ) } - fn jwt_secret(&self, default_jwt_path: PathBuf) -> Result { - match self.auth_jwtsecret.as_ref() { - Some(fpath) => { - debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file"); - JwtSecret::from_file(fpath) - } - None => { - if default_jwt_path.exists() { - debug!(target: "reth::cli", ?default_jwt_path, "Reading JWT auth secret file"); - JwtSecret::from_file(&default_jwt_path) - } else { - info!(target: "reth::cli", ?default_jwt_path, "Creating JWT auth secret file"); - JwtSecret::try_create(&default_jwt_path) - } - } - } - } - fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() .with_config(RpcModuleConfig::new(self.eth_config())); @@ -424,16 +418,22 @@ impl RethRpcConfig for RpcServerArgs { Ok(AuthServerConfig::builder(jwt_secret).socket_addr(address).build()) } - fn is_ipc_enabled(&self) -> bool { - // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. - !self.ipcdisable - } - - fn eth_config(&self) -> EthConfig { - EthConfig::default() - .max_tracing_requests(self.rpc_max_tracing_requests) - .rpc_gas_cap(self.rpc_gas_cap) - .gpo_config(self.gas_price_oracle_config()) + fn jwt_secret(&self, default_jwt_path: PathBuf) -> Result { + match self.auth_jwtsecret.as_ref() { + Some(fpath) => { + debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file"); + JwtSecret::from_file(fpath) + } + None => { + if default_jwt_path.exists() { + debug!(target: "reth::cli", ?default_jwt_path, "Reading JWT auth secret file"); + JwtSecret::from_file(&default_jwt_path) + } else { + info!(target: "reth::cli", ?default_jwt_path, "Creating JWT auth secret file"); + JwtSecret::try_create(&default_jwt_path) + } + } + } } } From 2a771386b9bf9a2e8a5e9727488a5eaee53fa178 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 15:54:34 -0700 Subject: [PATCH 583/722] feat: support pending blob fee (#4443) --- crates/transaction-pool/src/maintain.rs | 12 +++++-- crates/transaction-pool/src/noop.rs | 1 + crates/transaction-pool/src/pool/txpool.rs | 38 +++++++++++++++++++--- crates/transaction-pool/src/traits.rs | 16 +++++++-- 4 files changed, 57 insertions(+), 10 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index fd3c83a4854c..257c389e69e5 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -92,6 +92,7 @@ pub async fn maintain_transaction_pool( pending_basefee: latest .next_block_base_fee(chain_spec.base_fee_params) .unwrap_or_default(), + pending_blob_fee: latest.next_block_blob_fee().map(|fee| fee.saturating_to()), }; pool.set_block_info(info); } @@ -234,9 +235,11 @@ pub async fn maintain_transaction_pool( let chain_spec = client.chain_spec(); - // base fee for the next block: `new_tip+1` + // fees for the next block: `new_tip+1` let pending_block_base_fee = new_tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); + let pending_block_blob_fee = + new_tip.next_block_blob_fee().map(|fee| fee.saturating_to()); // we know all changed account in the new chain let new_changed_accounts: HashSet<_> = @@ -292,6 +295,7 @@ pub async fn maintain_transaction_pool( let update = CanonicalStateUpdate { new_tip: &new_tip.block, pending_block_base_fee, + pending_block_blob_fee, changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_mined_transactions.into_iter().collect(), @@ -314,9 +318,11 @@ pub async fn maintain_transaction_pool( let tip = blocks.tip(); let chain_spec = client.chain_spec(); - // base fee for the next block: `tip+1` + // fees for the next block: `tip+1` let pending_block_base_fee = tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); + let pending_block_blob_fee = + tip.next_block_blob_fee().map(|fee| fee.saturating_to()); let first_block = blocks.first(); trace!( @@ -337,6 +343,7 @@ pub async fn maintain_transaction_pool( last_seen_block_hash: tip.hash, last_seen_block_number: tip.number, pending_basefee: pending_block_base_fee, + pending_blob_fee: pending_block_blob_fee, }; pool.set_block_info(info); @@ -367,6 +374,7 @@ pub async fn maintain_transaction_pool( let update = CanonicalStateUpdate { new_tip: &tip.block, pending_block_base_fee, + pending_block_blob_fee, changed_accounts, mined_transactions, }; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index abbbddf8572b..d2c8bcd7137f 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -38,6 +38,7 @@ impl TransactionPool for NoopTransactionPool { last_seen_block_hash: Default::default(), last_seen_block_number: 0, pending_basefee: 0, + pending_blob_fee: None, } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index bd6665ea630a..98e07d39cab1 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,7 +18,9 @@ use crate::{ }; use fnv::FnvHashMap; use reth_primitives::{ - constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + constants::{ + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, + }, Address, TxHash, H256, }; use std::{ @@ -135,9 +137,15 @@ impl TxPool { last_seen_block_hash: self.all_transactions.last_seen_block_hash, last_seen_block_number: self.all_transactions.last_seen_block_number, pending_basefee: self.all_transactions.pending_basefee, + pending_blob_fee: Some(self.all_transactions.pending_blob_fee), } } + /// Updates the tracked blob fee + fn update_blob_fee(&mut self, _pending_blob_fee: u64) { + // TODO(mattsse): update blob txs + } + /// Updates the tracked basefee /// /// Depending on the change in direction of the basefee, this will promote or demote @@ -182,11 +190,21 @@ impl TxPool { /// /// This will also apply updates to the pool based on the new base fee pub(crate) fn set_block_info(&mut self, info: BlockInfo) { - let BlockInfo { last_seen_block_hash, last_seen_block_number, pending_basefee } = info; + let BlockInfo { + last_seen_block_hash, + last_seen_block_number, + pending_basefee, + pending_blob_fee, + } = info; self.all_transactions.last_seen_block_hash = last_seen_block_hash; self.all_transactions.last_seen_block_number = last_seen_block_number; self.all_transactions.pending_basefee = pending_basefee; - self.update_basefee(pending_basefee) + self.update_basefee(pending_basefee); + + if let Some(blob_fee) = pending_blob_fee { + self.all_transactions.pending_blob_fee = blob_fee; + self.update_blob_fee(pending_basefee) + } } /// Returns an iterator that yields transactions that are ready to be included in the block. @@ -683,6 +701,8 @@ pub(crate) struct AllTransactions { last_seen_block_hash: H256, /// Expected base fee for the pending block. pending_basefee: u64, + /// Expected blob fee for the pending block. + pending_blob_fee: u64, /// Configured price bump settings for replacements price_bumps: PriceBumpConfig, } @@ -741,11 +761,18 @@ impl AllTransactions { /// Updates the block specific info fn set_block_info(&mut self, block_info: BlockInfo) { - let BlockInfo { last_seen_block_hash, last_seen_block_number, pending_basefee } = - block_info; + let BlockInfo { + last_seen_block_hash, + last_seen_block_number, + pending_basefee, + pending_blob_fee, + } = block_info; self.last_seen_block_number = last_seen_block_number; self.last_seen_block_hash = last_seen_block_hash; self.pending_basefee = pending_basefee; + if let Some(pending_blob_fee) = pending_blob_fee { + self.pending_blob_fee = pending_blob_fee; + } } /// Rechecks all transactions in the pool against the changes. @@ -1296,6 +1323,7 @@ impl Default for AllTransactions { last_seen_block_number: 0, last_seen_block_hash: Default::default(), pending_basefee: Default::default(), + pending_blob_fee: BLOB_TX_MIN_BLOB_GASPRICE, price_bumps: Default::default(), } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 93f17aa45bfa..6ef34cb6f78d 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -484,6 +484,10 @@ pub struct CanonicalStateUpdate<'a> { /// /// The base fee of a block depends on the utilization of the last block and its base fee. pub pending_block_base_fee: u64, + /// EIP-4844 blob fee of the _next_ (pending) block + /// + /// Only after Cancun + pub pending_block_blob_fee: Option, /// A set of changed accounts across a range of blocks. pub changed_accounts: Vec, /// All mined transactions in the block range. @@ -512,14 +516,15 @@ impl<'a> CanonicalStateUpdate<'a> { last_seen_block_hash: self.hash(), last_seen_block_number: self.number(), pending_basefee: self.pending_block_base_fee, + pending_blob_fee: self.pending_block_blob_fee, } } } impl<'a> fmt::Display for CanonicalStateUpdate<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{{ hash: {}, number: {}, pending_block_base_fee: {}, changed_accounts: {}, mined_transactions: {} }}", - self.hash(), self.number(), self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) + write!(f, "{{ hash: {}, number: {}, pending_block_base_fee: {}, pending_block_blob_fee: {:?}, changed_accounts: {}, mined_transactions: {} }}", + self.hash(), self.number(), self.pending_block_base_fee, self.pending_block_blob_fee, self.changed_accounts.len(), self.mined_transactions.len()) } } @@ -921,9 +926,14 @@ pub struct BlockInfo { pub last_seen_block_number: u64, /// Currently enforced base fee: the threshold for the basefee sub-pool. /// - /// Note: this is the derived base fee of the _next_ block that builds on the clock the pool is + /// Note: this is the derived base fee of the _next_ block that builds on the block the pool is /// currently tracking. pub pending_basefee: u64, + /// Currently enforced blob fee: the threshold for eip-4844 blob transactions. + /// + /// Note: this is the derived blob fee of the _next_ block that builds on the block the pool is + /// currently tracking + pub pending_blob_fee: Option, } /// The limit to enforce for [TransactionPool::get_pooled_transaction_elements]. From 89bd0246681a52cd2bbec3d3e1d76269ad535463 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 16:59:22 -0700 Subject: [PATCH 584/722] docs: add missing field docs (#4446) --- crates/rpc/rpc-types/src/eth/engine/payload.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 3026260d339d..3cefc20bf465 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -604,8 +604,11 @@ impl From for ExecutionPayloadBodyV1 { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PayloadAttributes { + /// Value for the `timestamp` field of the new payload pub timestamp: U64, + /// Value for the `prevRandao` field of the new payload pub prev_randao: H256, + /// Suggested value for the `feeRecipient` field of the new payload pub suggested_fee_recipient: Address, /// Array of [`Withdrawal`] enabled with V2 /// See From eb4126b78daf6358a9b7ea28212d4f217093a7dc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 31 Aug 2023 17:54:46 -0700 Subject: [PATCH 585/722] feat: add blob count checks (#4447) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/primitives/src/constants/eip4844.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 21 ++++++++++++++-- crates/transaction-pool/src/error.rs | 25 ++++++++++++++++--- crates/transaction-pool/src/traits.rs | 5 ++++ crates/transaction-pool/src/validate/eth.rs | 27 +++++++++++++++++++-- 5 files changed, 72 insertions(+), 8 deletions(-) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index cb971c9f18de..751ddb7390c6 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -20,7 +20,7 @@ pub const MAX_DATA_GAS_PER_BLOCK: u64 = 786_432u64; // 0xC0000 pub const TARGET_DATA_GAS_PER_BLOCK: u64 = 393_216u64; // 0x60000 /// Maximum number of data blobs in a single block. -pub const MAX_BLOBS_PER_BLOCK: u64 = MAX_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; // 786432 / 131072 = 6 +pub const MAX_BLOBS_PER_BLOCK: usize = (MAX_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) as usize; // 786432 / 131072 = 6 /// Target number of data blobs in a single block. pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB; // 393216 / 131072 = 3 diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 36151b5688f7..eb5897b27d21 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -473,8 +473,19 @@ pub enum RpcPoolError { #[error("{0:?}")] PoolTransactionError(Box), /// Unable to find the blob for an EIP4844 transaction - #[error("blob not found for EIP4844 transaction")] + #[error("blob sidecar not found for EIP4844 transaction")] MissingEip4844Blob, + /// Thrown if an EIP-4844 without any blobs arrives + #[error("blobless blob transaction")] + NoEip4844Blobs, + /// Thrown if an EIP-4844 without any blobs arrives + #[error("too many blobs in transaction: have {have}, permitted {permitted}")] + TooManyEip4844Blobs { + /// Number of blobs the transaction has + have: usize, + /// Number of maximum blobs the transaction can have + permitted: usize, + }, /// Thrown if validating the blob sidecar for the transaction failed. #[error(transparent)] InvalidEip4844Blob(BlobTransactionValidationError), @@ -516,7 +527,13 @@ impl From for RpcPoolError { InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), - InvalidPoolTransactionError::MissingEip4844Blob => RpcPoolError::MissingEip4844Blob, + InvalidPoolTransactionError::MissingEip4844BlobSidecar => { + RpcPoolError::MissingEip4844Blob + } + InvalidPoolTransactionError::NoEip4844Blobs => RpcPoolError::NoEip4844Blobs, + InvalidPoolTransactionError::TooManyEip4844Blobs { have, permitted } => { + RpcPoolError::TooManyEip4844Blobs { have, permitted } + } InvalidPoolTransactionError::InvalidEip4844Blob(err) => { RpcPoolError::InvalidEip4844Blob(err) } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index c1e844e09017..d9685b3a4662 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -139,8 +139,19 @@ pub enum InvalidPoolTransactionError { #[error("transaction underpriced")] Underpriced, /// Thrown if we're unable to find the blob for a transaction that was previously extracted - #[error("blob not found for EIP4844 transaction")] - MissingEip4844Blob, + #[error("blob sidecar not found for EIP4844 transaction")] + MissingEip4844BlobSidecar, + /// Thrown if an EIP-4844 without any blobs arrives + #[error("blobless blob transaction")] + NoEip4844Blobs, + /// Thrown if an EIP-4844 without any blobs arrives + #[error("too many blobs in transaction: have {have}, permitted {permitted}")] + TooManyEip4844Blobs { + /// Number of blobs the transaction has + have: usize, + /// Number of maximum blobs the transaction can have + permitted: usize, + }, /// Thrown if validating the blob sidecar for the transaction failed. #[error(transparent)] InvalidEip4844Blob(BlobTransactionValidationError), @@ -209,7 +220,7 @@ impl InvalidPoolTransactionError { false } InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), - InvalidPoolTransactionError::MissingEip4844Blob => { + InvalidPoolTransactionError::MissingEip4844BlobSidecar => { // this is only reachable when blob transactions are reinjected and we're unable to // find the previously extracted blob false @@ -223,6 +234,14 @@ impl InvalidPoolTransactionError { // thrown for valid(good) blob transactions false } + InvalidPoolTransactionError::NoEip4844Blobs => { + // this is a malformed transaction and should not be sent over the network + true + } + InvalidPoolTransactionError::TooManyEip4844Blobs { .. } => { + // this is a malformed transaction and should not be sent over the network + true + } } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6ef34cb6f78d..862ce14baab9 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -666,6 +666,11 @@ pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; + /// Returns the number of blobs this transaction has. + fn blob_count(&self) -> usize { + self.as_eip4844().map(|tx| tx.blob_versioned_hashes.len()).unwrap_or_default() + } + /// Returns the transaction as EIP-4844 transaction if it is one. fn as_eip4844(&self) -> Option<&TxEip4844>; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 223149b07329..814613a5076c 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -9,7 +9,10 @@ use crate::{ TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ - constants::{eip4844::MAINNET_KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, + constants::{ + eip4844::{MAINNET_KZG_TRUSTED_SETUP, MAX_BLOBS_PER_BLOCK}, + ETHEREUM_BLOCK_GAS_LIMIT, + }, kzg::KzgSettings, ChainSpec, InvalidTransactionError, SealedBlock, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -209,6 +212,26 @@ where ) } + let blob_count = transaction.blob_count(); + if blob_count == 0 { + // no blobs + return TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::NoEip4844Blobs, + ) + } + + if blob_count > MAX_BLOBS_PER_BLOCK { + // too many blobs + return TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::TooManyEip4844Blobs { + have: blob_count, + permitted: MAX_BLOBS_PER_BLOCK, + }, + ) + } + // extract the blob from the transaction match transaction.take_blob() { EthBlobTransactionSidecar::None => { @@ -224,7 +247,7 @@ where } else { return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::MissingEip4844Blob, + InvalidPoolTransactionError::MissingEip4844BlobSidecar, ) } } From 39a6fa1b5723466d98de3df66a5eae76e3846a28 Mon Sep 17 00:00:00 2001 From: JosepBove Date: Fri, 1 Sep 2023 08:06:24 +0200 Subject: [PATCH 586/722] Remove duplicate in codeowners (#4451) --- CODEOWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 7216063442e7..12a0bcbf7db1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,7 +2,6 @@ bin/ @onbjerg crates/net/ @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk -crates/blockchain-tree/ @rakita crates/revm/src/ @rakita crates/revm/ @mattsse crates/stages/ @onbjerg @rkrasiuk @shekhirin From e66e3e3556956864ea5a8d1da41a1e45c8f11e32 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 1 Sep 2023 13:51:34 +0100 Subject: [PATCH 587/722] feat(storage, tree): respect `Transaction Lookup` pruning in the blockchain tree (#4410) --- Cargo.lock | 1 + Cargo.toml | 1 + bin/reth/src/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/debug_cmd/merkle.rs | 2 +- bin/reth/src/node/mod.rs | 7 ++-- crates/blockchain-tree/Cargo.toml | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 15 +++++-- crates/consensus/beacon/Cargo.toml | 2 +- crates/consensus/beacon/src/engine/mod.rs | 6 ++- .../consensus/beacon/src/engine/test_utils.rs | 2 +- crates/consensus/common/Cargo.toml | 2 +- crates/net/downloaders/Cargo.toml | 2 +- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/prune/mode.rs | 5 +++ crates/prune/Cargo.toml | 2 +- crates/rpc/rpc-engine-api/Cargo.toml | 2 +- crates/rpc/rpc/Cargo.toml | 2 +- crates/stages/Cargo.toml | 2 +- crates/stages/src/stages/execution.rs | 24 +++++------ crates/stages/src/stages/hashing_account.rs | 2 +- crates/stages/src/stages/mod.rs | 6 +-- crates/storage/db/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 1 + .../provider/src/providers/database/mod.rs | 40 ++++++++++++++++++- .../src/providers/database/provider.rs | 21 +++++++--- crates/storage/provider/src/traits/block.rs | 6 ++- crates/transaction-pool/Cargo.toml | 2 +- examples/rpc-db.rs | 1 + testing/ef-tests/src/cases/blockchain_test.rs | 3 +- 29 files changed, 118 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f43893b50be..5c8386b3052e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5817,6 +5817,7 @@ dependencies = [ name = "reth-provider" version = "0.1.0-alpha.8" dependencies = [ + "assert_matches", "auto_impl", "derive_more", "itertools 0.11.0", diff --git a/Cargo.toml b/Cargo.toml index e80531040a33..529715acc475 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -156,3 +156,4 @@ c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } ### misc-testing proptest = "1.0" arbitrary = "1.1" +assert_matches = "1.5.0" diff --git a/bin/reth/src/debug_cmd/in_memory_merkle.rs b/bin/reth/src/debug_cmd/in_memory_merkle.rs index 701b2196fa57..837c31c752bd 100644 --- a/bin/reth/src/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/debug_cmd/in_memory_merkle.rs @@ -193,7 +193,7 @@ impl Command { let provider_rw = factory.provider_rw()?; // Insert block, state and hashes - provider_rw.insert_block(block.clone(), None)?; + provider_rw.insert_block(block.clone(), None, None)?; block_state.write_to_db(provider_rw.tx_ref(), block.number)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plainstate_storages(storage_lists)?; diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 47e37c36996e..add25f6cd505 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -185,7 +185,7 @@ impl Command { continue } }; - provider_rw.insert_block(sealed_block.block, Some(sealed_block.senders))?; + provider_rw.insert_block(sealed_block.block, Some(sealed_block.senders), None)?; } // Check if any of hashing or merkle stages aren't on the same block number as diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index f06baaaf25fb..58d7bccb8d21 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -267,6 +267,9 @@ impl NodeCommand { let metrics_listener = MetricsListener::new(metrics_rx); ctx.task_executor.spawn_critical("metrics listener task", metrics_listener); + let prune_config = + self.pruning.prune_config(Arc::clone(&self.chain))?.or(config.prune.clone()); + // configure blockchain tree let tree_externals = TreeExternals::new( db.clone(), @@ -284,6 +287,7 @@ impl NodeCommand { tree_externals, canon_state_notification_sender.clone(), tree_config, + prune_config.clone().map(|config| config.parts), )? .with_sync_metrics_tx(metrics_tx.clone()), ); @@ -365,9 +369,6 @@ impl NodeCommand { None }; - let prune_config = - self.pruning.prune_config(Arc::clone(&self.chain))?.or(config.prune.clone()); - // Configure the pipeline let (mut pipeline, client) = if self.dev.dev { info!(target: "reth::cli", "Starting Reth in dev mode"); diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 70e3e1803b30..81a8b81d2e90 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -40,7 +40,7 @@ reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } parking_lot.workspace = true -assert_matches = "1.5" +assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } [features] diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 26508b4f8fd7..ac17cd2f463b 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -16,7 +16,7 @@ use reth_interfaces::{ Error, }; use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, ForkBlock, Hardfork, Receipt, SealedBlock, + BlockHash, BlockNumHash, BlockNumber, ForkBlock, Hardfork, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, U256, }; use reth_provider::{ @@ -93,6 +93,7 @@ pub struct BlockchainTree { metrics: TreeMetrics, /// Metrics for sync stages. sync_metrics_tx: Option, + prune_modes: Option, } /// A container that wraps chains and block indices to allow searching for block hashes across all @@ -110,6 +111,7 @@ impl BlockchainTree externals: TreeExternals, canon_state_notification_sender: CanonStateNotificationSender, config: BlockchainTreeConfig, + prune_modes: Option, ) -> Result { let max_reorg_depth = config.max_reorg_depth(); @@ -145,6 +147,7 @@ impl BlockchainTree canon_state_notification_sender, metrics: Default::default(), sync_metrics_tx: None, + prune_modes, }) } @@ -1048,7 +1051,11 @@ impl BlockchainTree let (blocks, state) = chain.into_inner(); provider - .append_blocks_with_post_state(blocks.into_blocks().collect(), state) + .append_blocks_with_post_state( + blocks.into_blocks().collect(), + state, + self.prune_modes.as_ref(), + ) .map_err(|e| BlockExecutionError::CanonicalCommit { inner: e.to_string() })?; provider.commit()?; @@ -1173,7 +1180,7 @@ mod tests { let factory = ProviderFactory::new(&db, MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - provider.insert_block(genesis, None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); // insert first 10 blocks for i in 0..10 { @@ -1279,7 +1286,7 @@ mod tests { let config = BlockchainTreeConfig::new(1, 2, 3, 2); let (sender, mut canon_notif) = tokio::sync::broadcast::channel(10); let mut tree = - BlockchainTree::new(externals, sender, config).expect("failed to create tree"); + BlockchainTree::new(externals, sender, config, None).expect("failed to create tree"); // genesis block 10 is already canonical assert!(tree.make_canonical(&H256::zero()).is_ok()); diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 09e9ecc5f013..f44de6c555fe 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -46,4 +46,4 @@ reth-tracing = { path = "../../tracing" } reth-revm = { path = "../../revm" } reth-downloaders = { path = "../../net/downloaders" } -assert_matches = "1.5" +assert_matches.workspace = true diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 30ca834d0783..118eb82f3850 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1880,7 +1880,9 @@ mod tests { BeaconForkChoiceUpdateError, }; use assert_matches::assert_matches; - use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; + use reth_primitives::{ + stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, PruneModes, H256, MAINNET, + }; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ ExecutionPayloadV1, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, @@ -2056,7 +2058,7 @@ mod tests { let factory = ProviderFactory::new(db, chain); let provider = factory.provider_rw().unwrap(); blocks - .try_for_each(|b| provider.insert_block(b.clone(), None).map(|_| ())) + .try_for_each(|b| provider.insert_block(b.clone(), None, None).map(|_| ())) .expect("failed to insert"); provider.commit().unwrap(); } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 0fdda91da98b..dc963c3bd5c6 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -463,7 +463,7 @@ where let config = BlockchainTreeConfig::new(1, 2, 3, 2); let (canon_state_notification_sender, _) = tokio::sync::broadcast::channel(3); let tree = ShareableBlockchainTree::new( - BlockchainTree::new(externals, canon_state_notification_sender, config) + BlockchainTree::new(externals, canon_state_notification_sender, config, None) .expect("failed to create tree"), ); let shareable_db = ProviderFactory::new(db.clone(), self.base_config.chain_spec.clone()); diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 8611aa2317fd..480b3df3698e 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -16,5 +16,5 @@ reth-provider.workspace = true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } -assert_matches = "1.5.0" +assert_matches.workspace = true mockall = "0.11.3" diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index ccc803c8d469..3c33ab23d844 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -42,7 +42,7 @@ reth-db = { path = "../../storage/db", features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-tracing = { path = "../../tracing" } -assert_matches = "1.5.0" +assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-rlp.workspace = true itertools.workspace = true diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index da697b8b25fc..d450c419d3ce 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -85,7 +85,7 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive = "0.3" -assert_matches = "1.5.0" +assert_matches.workspace = true toml = "0.7.4" # necessary so we don't hit a "undeclared 'std'": diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index 3e39f876d7ce..a8bd046a7690 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -49,6 +49,11 @@ impl PruneMode { PruneMode::Before(n) => *n > block, } } + + /// Returns true if the prune mode is [`PruneMode::Full`]. + pub fn is_full(&self) -> bool { + matches!(self, Self::Full) + } } #[cfg(test)] diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index be5346d025a5..4582d27f8d76 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -34,4 +34,4 @@ reth-stages = { path = "../stages", features = ["test-utils"] } # misc -assert_matches = "1.5.0" +assert_matches.workspace = true diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 8570e610f35b..d08e7c8faa8d 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -34,4 +34,4 @@ reth-rlp.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } -assert_matches = "1.5.0" +assert_matches.workspace = true diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 44760ecd91a3..33c2fdde670a 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -70,6 +70,6 @@ futures.workspace = true [dev-dependencies] jsonrpsee = { workspace = true, features = ["client"] } -assert_matches = "1.5.0" +assert_matches.workspace = true tempfile = "3.5.0" reth-interfaces = { workspace = true, features = ["test-utils"] } diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 01c3b78d7247..a1770fa76c32 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -61,7 +61,7 @@ reth-trie = { path = "../trie", features = ["test-utils"] } itertools.workspace = true tokio = { workspace = true, features = ["rt", "sync", "macros"] } -assert_matches = "1.5.0" +assert_matches.workspace = true rand.workspace = true paste = "1.0" diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 6fe07013b90d..cf688f352c10 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -526,8 +526,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); let previous_stage_checkpoint = ExecutionCheckpoint { @@ -562,8 +562,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); let previous_stage_checkpoint = ExecutionCheckpoint { @@ -598,8 +598,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); let previous_checkpoint = StageCheckpoint { block_number: 1, stage_checkpoint: None }; @@ -632,8 +632,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); // insert pre state @@ -742,8 +742,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); // variables @@ -820,8 +820,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); provider.commit().unwrap(); // variables diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index ccb6fb960fd0..5887fb803415 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -95,7 +95,7 @@ impl AccountHashingStage { let blocks = random_block_range(&mut rng, opts.blocks.clone(), H256::zero(), opts.txs); for block in blocks { - provider.insert_block(block, None).unwrap(); + provider.insert_block(block, None, None).unwrap(); } let mut accounts = random_eoa_account_range(&mut rng, opts.accounts); { diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 87e045aee9eb..fbe9ff00eaec 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -81,8 +81,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None).unwrap(); - provider.insert_block(block.clone(), None).unwrap(); + provider.insert_block(genesis, None, None).unwrap(); + provider.insert_block(block.clone(), None, None).unwrap(); // Fill with bogus blocks to respect PruneMode distance. let mut head = block.hash; @@ -90,7 +90,7 @@ mod tests { for block_number in 2..=tip { let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0)); head = nblock.hash; - provider.insert_block(nblock, None).unwrap(); + provider.insert_block(nblock, None, None).unwrap(); } provider.commit().unwrap(); diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index f4bcd553aa78..ae4d67ca8f49 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -76,7 +76,7 @@ serde_json.workspace = true paste = "1.0" -assert_matches = "1.5.0" +assert_matches.workspace = true [build-dependencies] vergen = { version = "8.0.0", features = ["git", "gitcl"] } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 3bd4bc57d3a8..bcff000a09d3 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -40,6 +40,7 @@ reth-rlp.workspace = true reth-trie = { path = "../../trie", features = ["test-utils"] } parking_lot.workspace = true tempfile = "3.3" +assert_matches.workspace = true [features] test-utils = ["reth-rlp"] diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index b45417909b95..45529c9f0490 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -393,12 +393,16 @@ impl PruneCheckpointReader for ProviderFactory { #[cfg(test)] mod tests { use super::ProviderFactory; - use crate::{BlockHashReader, BlockNumReader}; + use crate::{BlockHashReader, BlockNumReader, BlockWriter, TransactionsProvider}; + use assert_matches::assert_matches; use reth_db::{ test_utils::{create_test_rw_db, ERROR_TEMPDIR}, DatabaseEnv, }; - use reth_primitives::{ChainSpecBuilder, H256}; + use reth_primitives::{ + hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, H256, + }; + use reth_rlp::Decodable; use std::sync::Arc; #[test] @@ -449,4 +453,36 @@ mod tests { provider_rw.block_hash(0).unwrap(); provider.block_hash(0).unwrap(); } + + #[test] + fn insert_block_with_prune_modes() { + let chain_spec = ChainSpecBuilder::mainnet().build(); + let db = create_test_rw_db(); + let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + + let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); + let block = SealedBlock::decode(&mut block_rlp).unwrap(); + + { + let provider = factory.provider_rw().unwrap(); + assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_)); + assert_matches!(provider.transaction_id(block.body[0].hash), Ok(Some(0))); + } + + { + let provider = factory.provider_rw().unwrap(); + assert_matches!( + provider.insert_block( + block.clone(), + None, + Some(&PruneModes { + transaction_lookup: Some(PruneMode::Full), + ..PruneModes::none() + }) + ), + Ok(_) + ); + assert_matches!(provider.transaction_id(block.body[0].hash), Ok(None)); + } + } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 85c497cc4432..f63b256b3a1c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -31,10 +31,10 @@ use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::Nibbles, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PrunePart, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, H256, - U256, + ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PruneModes, PrunePart, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, H256, U256, }; use reth_revm_primitives::{ config::revm_spec, @@ -1871,6 +1871,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' &self, block: SealedBlock, senders: Option>, + prune_modes: Option<&PruneModes>, ) -> Result { let block_number = block.number; self.tx.put::(block.number, block.hash())?; @@ -1922,7 +1923,14 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' let hash = transaction.hash(); self.tx.put::(next_tx_num, sender)?; self.tx.put::(next_tx_num, transaction.into())?; - self.tx.put::(hash, next_tx_num)?; + + if prune_modes + .and_then(|modes| modes.transaction_lookup) + .filter(|prune_mode| prune_mode.is_full()) + .is_none() + { + self.tx.put::(hash, next_tx_num)?; + } next_tx_num += 1; } @@ -1949,6 +1957,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' &self, blocks: Vec, state: PostState, + prune_modes: Option<&PruneModes>, ) -> Result<()> { if blocks.is_empty() { return Ok(()) @@ -1966,7 +1975,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' // Insert the blocks for block in blocks { let (block, senders) = block.into_components(); - self.insert_block(block, Some(senders))?; + self.insert_block(block, Some(senders), prune_modes)?; } // Write state and changesets to the database. diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 182d7c57982a..c780701619e8 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -7,7 +7,8 @@ use reth_db::models::StoredBlockBodyIndices; use reth_interfaces::Result; use reth_primitives::{ Address, Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, - ChainSpec, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, H256, + ChainSpec, Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + H256, }; use std::ops::RangeInclusive; @@ -242,6 +243,7 @@ pub trait BlockWriter: Send + Sync { &self, block: SealedBlock, senders: Option>, + prune_modes: Option<&PruneModes>, ) -> Result; /// Appends a batch of sealed blocks to the blockchain, including sender information, and @@ -254,6 +256,7 @@ pub trait BlockWriter: Send + Sync { /// /// - `blocks`: Vector of `SealedBlockWithSenders` instances to append. /// - `state`: Post-state information to update after appending. + /// - `prune_modes`: Optional pruning configuration. /// /// # Returns /// @@ -263,5 +266,6 @@ pub trait BlockWriter: Send + Sync { &self, blocks: Vec, state: PostState, + prune_modes: Option<&PruneModes>, ) -> Result<()>; } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index a5ef62b83ebe..0ac9379d2af8 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -55,7 +55,7 @@ paste = "1.0" rand = "0.8" proptest.workspace = true criterion = "0.5" -assert_matches = "1.5" +assert_matches.workspace = true [features] default = ["serde"] diff --git a/examples/rpc-db.rs b/examples/rpc-db.rs index 5ff672f82ced..86f890299a51 100644 --- a/examples/rpc-db.rs +++ b/examples/rpc-db.rs @@ -48,6 +48,7 @@ async fn main() -> eyre::Result<()> { externals, canon_state_notification_sender, tree_config, + None, )?); BlockchainProvider::new(factory, tree)? diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index d63124581b6b..d240885e724a 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -81,6 +81,7 @@ impl Case for BlockchainTestCase { provider.insert_block( SealedBlock::new(case.genesis_block_header.clone().into(), BlockBody::default()), None, + None, )?; case.pre.write_to_db(provider.tx_ref())?; @@ -88,7 +89,7 @@ impl Case for BlockchainTestCase { for block in case.blocks.iter() { let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?; last_block = Some(decoded.number); - provider.insert_block(decoded, None)?; + provider.insert_block(decoded, None, None)?; } // Call execution stage From 6bb94af5bbf928f9a706fef1c23fb5dcdcfcba65 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 1 Sep 2023 15:06:49 +0100 Subject: [PATCH 588/722] feat(stages): respect `PruneModes` in `TxLookup` stage (#4390) --- Cargo.lock | 8 +- bin/reth/src/node/mod.rs | 5 +- bin/reth/src/stage/run.rs | 6 +- crates/config/src/config.rs | 2 +- .../consensus/beacon/src/engine/test_utils.rs | 2 +- crates/prune/src/pruner.rs | 2 +- crates/stages/benches/criterion.rs | 4 +- crates/stages/src/stages/execution.rs | 2 +- crates/stages/src/stages/tx_lookup.rs | 131 +++++++++++++++--- 9 files changed, 133 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c8386b3052e..f2df57294b1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6188,7 +6188,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" dependencies = [ "auto_impl", "revm-interpreter", @@ -6198,7 +6198,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" dependencies = [ "derive_more", "enumn", @@ -6209,7 +6209,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" dependencies = [ "k256", "num", @@ -6225,7 +6225,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" dependencies = [ "arbitrary", "auto_impl", diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 58d7bccb8d21..cd42ee81fb6a 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -865,7 +865,10 @@ impl NodeCommand { stage_config.storage_hashing.commit_threshold, )) .set(MerkleStage::new_execution(stage_config.merkle.clean_threshold)) - .set(TransactionLookupStage::new(stage_config.transaction_lookup.commit_threshold)) + .set(TransactionLookupStage::new( + stage_config.transaction_lookup.commit_threshold, + prune_modes.clone(), + )) .set(IndexAccountHistoryStage::new( stage_config.index_account_history.commit_threshold, prune_modes.clone(), diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 0ec5d7d4fccc..079db67837cd 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -12,7 +12,7 @@ use reth_beacon_consensus::BeaconConsensus; use reth_config::Config; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; -use reth_primitives::ChainSpec; +use reth_primitives::{ChainSpec, PruneModes}; use reth_provider::{ProviderFactory, StageCheckpointReader}; use reth_stages::{ stages::{ @@ -208,7 +208,9 @@ impl Command { None, ) } - StageEnum::TxLookup => (Box::new(TransactionLookupStage::new(batch_size)), None), + StageEnum::TxLookup => { + (Box::new(TransactionLookupStage::new(batch_size, PruneModes::none())), None) + } StageEnum::AccountHashing => { (Box::new(AccountHashingStage::new(1, batch_size)), None) } diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 15dcb42f1adc..95f92c9a5c33 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -290,7 +290,7 @@ pub struct PruneConfig { impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 5, parts: PruneModes::default() } + Self { block_interval: 5, parts: PruneModes::none() } } } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index dc963c3bd5c6..d3959f4495c1 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -474,7 +474,7 @@ where db.clone(), self.base_config.chain_spec.clone(), 5, - PruneModes::default(), + PruneModes::none(), PruneBatchSizes::default(), ); diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index df2e8ce633dc..7d91f9535b55 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -922,7 +922,7 @@ mod tests { fn is_pruning_needed() { let db = create_test_rw_db(); let pruner = - Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), PruneBatchSizes::default()); + Pruner::new(db, MAINNET.clone(), 5, PruneModes::none(), PruneBatchSizes::default()); // No last pruned block number was set before let first_block_number = 1; diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 8fce2e37035e..aae96c7722fd 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -5,7 +5,7 @@ use criterion::{ use pprof::criterion::{Output, PProfProfiler}; use reth_db::DatabaseEnv; use reth_interfaces::test_utils::TestConsensus; -use reth_primitives::{stage::StageCheckpoint, MAINNET}; +use reth_primitives::{stage::StageCheckpoint, PruneModes, MAINNET}; use reth_provider::ProviderFactory; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TotalDifficultyStage, TransactionLookupStage}, @@ -62,7 +62,7 @@ fn transaction_lookup(c: &mut Criterion) { let mut group = c.benchmark_group("Stages"); // don't need to run each stage for that many times group.sample_size(10); - let stage = TransactionLookupStage::new(DEFAULT_NUM_BLOCKS); + let stage = TransactionLookupStage::new(DEFAULT_NUM_BLOCKS, PruneModes::none()); measure_stage( &mut group, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index cf688f352c10..c8dfed70a930 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -94,7 +94,7 @@ impl ExecutionStage { executor_factory, ExecutionStageThresholds::default(), MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - PruneModes::default(), + PruneModes::none(), ) } diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 65f5772b74ee..43700ab3d171 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -8,12 +8,15 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseError, }; +use reth_interfaces::provider::ProviderError; use reth_primitives::{ keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, - PrunePart, TransactionSignedNoHash, TxNumber, H256, + PruneCheckpoint, PruneModes, PrunePart, TransactionSignedNoHash, TxNumber, H256, +}; +use reth_provider::{ + BlockReader, DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_provider::{DatabaseProviderRW, PruneCheckpointReader}; use tokio::sync::mpsc; use tracing::*; @@ -26,18 +29,19 @@ use tracing::*; pub struct TransactionLookupStage { /// The number of lookup entries to commit at once commit_threshold: u64, + prune_modes: PruneModes, } impl Default for TransactionLookupStage { fn default() -> Self { - Self { commit_threshold: 5_000_000 } + Self { commit_threshold: 5_000_000, prune_modes: PruneModes::none() } } } impl TransactionLookupStage { /// Create new instance of [TransactionLookupStage]. - pub fn new(commit_threshold: u64) -> Self { - Self { commit_threshold } + pub fn new(commit_threshold: u64, prune_modes: PruneModes) -> Self { + Self { commit_threshold, prune_modes } } } @@ -52,11 +56,37 @@ impl Stage for TransactionLookupStage { async fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, - input: ExecInput, + mut input: ExecInput, ) -> Result { + if let Some((target_prunable_block, prune_mode)) = + self.prune_modes.prune_target_block_transaction_lookup(input.target())? + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + + // Save prune checkpoint only if we don't have one already. + // Otherwise, pruner may skip the unpruned range of blocks. + if provider.get_prune_checkpoint(PrunePart::TransactionLookup)?.is_none() { + let target_prunable_tx_number = provider + .block_body_indices(target_prunable_block)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(target_prunable_block))? + .last_tx_num(); + + provider.save_prune_checkpoint( + PrunePart::TransactionLookup, + PruneCheckpoint { + block_number: Some(target_prunable_block), + tx_number: Some(target_prunable_tx_number), + prune_mode, + }, + )?; + } + } + } if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } + let (tx_range, block_range, is_final_range) = input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)?; let end_block = *block_range.end(); @@ -187,6 +217,8 @@ fn stage_checkpoint( let pruned_entries = provider .get_prune_checkpoint(PrunePart::TransactionLookup)? .and_then(|checkpoint| checkpoint.tx_number) + // `+1` is needed because `TxNumber` is 0-indexed + .map(|tx_number| tx_number + 1) .unwrap_or_default(); Ok(EntitiesCheckpoint { // If `TxHashNumber` table was pruned, we will have a number of entries in it not matching @@ -216,6 +248,7 @@ mod tests { use reth_provider::{ BlockReader, ProviderError, ProviderFactory, PruneCheckpointWriter, TransactionsProvider, }; + use std::ops::Sub; // Implement stage test suite. stage_test_suite_ext!(TransactionLookupTestRunner, transaction_lookup); @@ -253,7 +286,8 @@ mod tests { let result = rx.await.unwrap(); assert_matches!( result, - Ok(ExecOutput {checkpoint: StageCheckpoint { + Ok(ExecOutput { + checkpoint: StageCheckpoint { block_number, stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint { processed, @@ -272,7 +306,7 @@ mod tests { async fn execute_intermediate_commit_transaction_lookup() { let threshold = 50; let mut runner = TransactionLookupTestRunner::default(); - runner.set_threshold(threshold); + runner.set_commit_threshold(threshold); let (stage_progress, previous_stage) = (1000, 1100); // input exceeds threshold let first_input = ExecInput { target: Some(previous_stage), @@ -313,7 +347,7 @@ mod tests { ); // Execute second time to completion - runner.set_threshold(u64::MAX); + runner.set_commit_threshold(u64::MAX); let second_input = ExecInput { target: Some(previous_stage), checkpoint: Some(StageCheckpoint::new(expected_progress)), @@ -333,6 +367,49 @@ mod tests { assert!(runner.validate_execution(first_input, result.ok()).is_ok(), "validation failed"); } + #[tokio::test] + async fn execute_pruned_transaction_lookup() { + let (previous_stage, prune_target, stage_progress) = (500, 400, 100); + let mut rng = generators::rng(); + + // Set up the runner + let mut runner = TransactionLookupTestRunner::default(); + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(stage_progress)), + }; + + // Seed only once with full input range + let seed = + random_block_range(&mut rng, stage_progress + 1..=previous_stage, H256::zero(), 0..2); + runner.tx.insert_blocks(seed.iter(), None).expect("failed to seed execution"); + + runner.set_prune_modes(PruneModes { + transaction_lookup: Some(PruneMode::Before(prune_target)), + ..Default::default() + }); + + let rx = runner.execute(input); + + // Assert the successful result + let result = rx.await.unwrap(); + assert_matches!( + result, + Ok(ExecOutput { + checkpoint: StageCheckpoint { + block_number, + stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint { + processed, + total + })) + }, done: true }) if block_number == previous_stage && processed == total && + total == runner.tx.table::().unwrap().len() as u64 + ); + + // Validate the stage execution + assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation"); + } + #[test] fn stage_checkpoint_pruned() { let tx = TestTransaction::default(); @@ -366,7 +443,8 @@ mod tests { blocks[..=max_pruned_block as usize] .iter() .map(|block| block.body.len() as u64) - .sum::(), + .sum::() + .sub(1), // `TxNumber` is 0-indexed ), prune_mode: PruneMode::Full, }, @@ -392,18 +470,27 @@ mod tests { struct TransactionLookupTestRunner { tx: TestTransaction, - threshold: u64, + commit_threshold: u64, + prune_modes: PruneModes, } impl Default for TransactionLookupTestRunner { fn default() -> Self { - Self { threshold: 1000, tx: TestTransaction::default() } + Self { + tx: TestTransaction::default(), + commit_threshold: 1000, + prune_modes: PruneModes::none(), + } } } impl TransactionLookupTestRunner { - fn set_threshold(&mut self, threshold: u64) { - self.threshold = threshold; + fn set_commit_threshold(&mut self, threshold: u64) { + self.commit_threshold = threshold; + } + + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; } /// # Panics @@ -441,7 +528,10 @@ mod tests { } fn stage(&self) -> Self::S { - TransactionLookupStage { commit_threshold: self.threshold } + TransactionLookupStage { + commit_threshold: self.commit_threshold, + prune_modes: self.prune_modes.clone(), + } } } @@ -460,13 +550,22 @@ mod tests { fn validate_execution( &self, - input: ExecInput, + mut input: ExecInput, output: Option, ) -> Result<(), TestRunnerError> { match output { Some(output) => { let provider = self.tx.inner(); + if let Some((target_prunable_block, _)) = self + .prune_modes + .prune_target_block_transaction_lookup(input.target()) + .expect("prune target block for transaction lookup") + { + if target_prunable_block > input.checkpoint().block_number { + input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); + } + } let start_block = input.next_block(); let end_block = output.checkpoint.block_number; From 19e03ee9cdb7c08124734a8cfbe98a32fd38460f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 2 Sep 2023 11:41:15 -0700 Subject: [PATCH 589/722] chore: do not warn when the chain is syncing normally (#4458) --- crates/consensus/beacon/src/engine/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 118eb82f3850..5b5bd6413a44 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -914,11 +914,11 @@ where error: Error, ) -> PayloadStatus { debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash"); // check if the new head was previously invalidated, if so then we deem this FCU // as invalid if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash) { + warn!(target: "consensus::engine", ?error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid"); debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=?error, "Head was previously marked as invalid"); return invalid_ancestor } @@ -930,12 +930,19 @@ where .. }), ) => { + warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash"); return PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: error.to_string(), }) .with_latest_valid_hash(H256::zero()) } + Error::Execution(BlockExecutionError::BlockHashNotFoundInChain { .. }) => { + // This just means we couldn't find the block when attempting to make it canonical, + // so we should not warn the user, since this will result in us attempting to sync + // to a new target and is considered normal operation during sync + } _ => { + warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash"); // TODO(mattsse) better error handling before attempting to sync (FCU could be // invalid): only trigger sync if we can't determine whether the FCU is invalid } @@ -976,6 +983,7 @@ where self.sync.download_full_block(target); } + debug!(target: "consensus::engine", ?target, "Syncing to new target"); PayloadStatus::from_status(PayloadStatusEnum::Syncing) } From 60e5b43992f79cbe90c2ee009016f236b8effa4f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sun, 3 Sep 2023 06:40:26 -0700 Subject: [PATCH 590/722] chore: fix deps sanity check (#4462) --- crates/consensus/beacon/src/engine/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5b5bd6413a44..5a8279cf547d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1888,9 +1888,7 @@ mod tests { BeaconForkChoiceUpdateError, }; use assert_matches::assert_matches; - use reth_primitives::{ - stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, PruneModes, H256, MAINNET, - }; + use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ ExecutionPayloadV1, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, From acedc8cf4caf12c1172241250c133611870aa633 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 3 Sep 2023 14:05:28 +0000 Subject: [PATCH 591/722] chore(deps): weekly `cargo update` (#4464) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 141 ++++++++++++++++++++++++++--------------------------- 1 file changed, 70 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2df57294b1b..db688335d5d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,9 +119,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" dependencies = [ "memchr", ] @@ -403,9 +403,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-compression" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" +checksum = "d495b6dc0184693324491a5ac05f559acc97bf937ab31d7a1c33dd0016be6d2b" dependencies = [ "brotli", "flate2", @@ -934,7 +934,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#666a9de002035eb7e929bceee3a70dee1b23aa93" +source = "git+https://github.com/ethereum/c-kzg-4844#d35b0f3854ab114b48daa9b504f6ee085c61508a" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "blst", @@ -1030,15 +1030,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "winapi", + "windows-targets 0.48.5", ] [[package]] @@ -1100,20 +1100,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d5f1946157a96594eb2d2c10eb7ad9a2b27518cb3000209dec700c35df9197d" +checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78116e32a042dd73c2901f0dc30790d20ff3447f3e3472fad359e8c3d282bcd6" +checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" dependencies = [ "anstream", "anstyle", @@ -1123,9 +1122,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" +checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" dependencies = [ "heck", "proc-macro2 1.0.66", @@ -1253,9 +1252,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.6.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca268df6cd88e646b564e6aff1a016834e5f42077c736ef6b6789c31ef9ec5dc" +checksum = "08849ed393c907c90016652a01465a12d86361cd38ad2a7de026c56a520cc259" dependencies = [ "cfg-if", "cpufeatures", @@ -1621,9 +1620,9 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.1" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", "hashbrown 0.14.0", @@ -2082,9 +2081,9 @@ dependencies = [ [[package]] name = "enumn" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" +checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -2110,9 +2109,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", @@ -2387,9 +2386,9 @@ dependencies = [ [[package]] name = "ethnum" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0198b9d0078e0f30dedc7acbb21c974e838fc8fae3ee170128658a98cb2c1c04" +checksum = "6c8ff382b2fa527fb7fb06eeebfc5bbb3f17e3cc6b9d70b006c41daa8824adac" [[package]] name = "event-listener" @@ -3421,7 +3420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.9", + "rustix 0.38.11", "windows-sys 0.48.0", ] @@ -3866,9 +3865,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memmap2" @@ -4070,14 +4069,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "static_assertions", ] [[package]] @@ -4335,9 +4333,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec", "bitvec", @@ -4350,9 +4348,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", @@ -4470,10 +4468,11 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.2" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" +checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] @@ -4589,9 +4588,9 @@ dependencies = [ [[package]] name = "platforms" -version = "3.0.2" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" [[package]] name = "plotters" @@ -4647,9 +4646,9 @@ checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" [[package]] name = "postcard" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9ee729232311d3cd113749948b689627618133b1c5012b77342c1950b25eaeb" +checksum = "d534c6e61df1c7166e636ca612d9820d486fe96ddad37f7abc671517b297488e" dependencies = [ "cobs", "heapless", @@ -5072,13 +5071,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick 1.0.5", "memchr", - "regex-automata 0.3.7", + "regex-automata 0.3.8", "regex-syntax 0.7.5", ] @@ -5093,11 +5092,11 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick 1.0.5", "memchr", "regex-syntax 0.7.5", ] @@ -6188,7 +6187,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "auto_impl", "revm-interpreter", @@ -6198,7 +6197,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "derive_more", "enumn", @@ -6209,7 +6208,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "k256", "num", @@ -6225,7 +6224,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d16ed1f5e4cde12a03d0cb755cd658" +source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "arbitrary", "auto_impl", @@ -6399,7 +6398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ "bitflags 1.3.2", - "errno 0.3.2", + "errno 0.3.3", "io-lifetimes", "libc", "linux-raw-sys 0.1.4", @@ -6408,12 +6407,12 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.9" +version = "0.38.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" dependencies = [ "bitflags 2.4.0", - "errno 0.3.2", + "errno 0.3.3", "libc", "linux-raw-sys 0.4.5", "windows-sys 0.48.0", @@ -6421,9 +6420,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", @@ -7293,7 +7292,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.9", + "rustix 0.38.11", "windows-sys 0.48.0", ] @@ -7377,18 +7376,18 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -7416,9 +7415,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" +checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ "deranged", "itoa", @@ -7437,9 +7436,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" +checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" dependencies = [ "time-core", ] @@ -7647,9 +7646,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", "base64 0.21.3", @@ -8039,9 +8038,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna 0.4.0", From e8f2a56f40b612f38cf842ed8a1f1d71d2c95fce Mon Sep 17 00:00:00 2001 From: swbartosz <34399101+swbartosz@users.noreply.github.com> Date: Sun, 3 Sep 2023 15:16:38 +0100 Subject: [PATCH 592/722] Fix: high load RPC putting node in a broken state: avoid running blocking tasks within blocking tasks (#4461) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/api/call.rs | 8 +++-- crates/rpc/rpc/src/eth/api/server.rs | 34 ++++++-------------- crates/rpc/rpc/src/eth/api/transactions.rs | 36 +++++++++++++--------- 3 files changed, 38 insertions(+), 40 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 6e40b76b6ad0..6bf638cf5a6d 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -47,8 +47,12 @@ where /// Estimate gas needed for execution of the `request` at the [BlockId]. pub async fn estimate_gas_at(&self, request: CallRequest, at: BlockId) -> EthResult { let (cfg, block_env, at) = self.evm_env_at(at).await?; - let state = self.state_at(at)?; - self.estimate_gas_with(cfg, block_env, request, state) + + self.on_blocking_task(|this| async move { + let state = this.state_at(at)?; + this.estimate_gas_with(cfg, block_env, request, state) + }) + .await } /// Executes the call request (`eth_call`) and returns the output diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 0ec7535a29a1..575a1b3a3160 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -236,14 +236,7 @@ where ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); Ok(self - .on_blocking_task(|this| async move { - this.call( - request, - block_number, - EvmOverrides::new(state_overrides, block_overrides), - ) - .await - }) + .call(request, block_number, EvmOverrides::new(state_overrides, block_overrides)) .await?) } @@ -265,15 +258,11 @@ where block_number: Option, ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_createAccessList"); - Ok(self - .on_blocking_task(|this| async move { - let block_id = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); - let access_list = this.create_access_list_at(request.clone(), block_number).await?; - request.access_list = Some(access_list.clone()); - let gas_used = this.estimate_gas_at(request, block_id).await?; - Ok(AccessListWithGasUsed { access_list, gas_used }) - }) - .await?) + let block_id = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let access_list = self.create_access_list_at(request.clone(), block_number).await?; + request.access_list = Some(access_list.clone()); + let gas_used = self.estimate_gas_at(request, block_id).await?; + Ok(AccessListWithGasUsed { access_list, gas_used }) } /// Handler for: `eth_estimateGas` @@ -284,13 +273,10 @@ where ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); Ok(self - .on_blocking_task(|this| async move { - this.estimate_gas_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - ) - .await - }) + .estimate_gas_at( + request, + block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), + ) .await?) } diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 6f3e1fa7f4f3..c5b07d7ab67d 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -400,20 +400,28 @@ where } async fn transaction_receipt(&self, hash: H256) -> EthResult> { - self.on_blocking_task(|this| async move { - let (tx, meta) = match this.provider().transaction_by_hash_with_meta(hash)? { - Some((tx, meta)) => (tx, meta), - None => return Ok(None), - }; - - let receipt = match this.provider().receipt_by_hash(hash)? { - Some(recpt) => recpt, - None => return Ok(None), - }; - - this.build_transaction_receipt(tx, meta, receipt).await.map(Some) - }) - .await + let result = self + .on_blocking_task(|this| async move { + let (tx, meta) = match this.provider().transaction_by_hash_with_meta(hash)? { + Some((tx, meta)) => (tx, meta), + None => return Ok(None), + }; + + let receipt = match this.provider().receipt_by_hash(hash)? { + Some(recpt) => recpt, + None => return Ok(None), + }; + + Ok(Some((tx, meta, receipt))) + }) + .await?; + + let (tx, meta, receipt) = match result { + Some((tx, meta, receipt)) => (tx, meta, receipt), + None => return Ok(None), + }; + + self.build_transaction_receipt(tx, meta, receipt).await.map(Some) } async fn send_raw_transaction(&self, tx: Bytes) -> EthResult { From e4f26e8bf0325aad3745897e806d34bb4c4ea7e8 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Sun, 3 Sep 2023 18:29:28 +0200 Subject: [PATCH 593/722] consolidate eip4844 pool errors (#4453) --- crates/rpc/rpc/src/eth/error.rs | 40 ++------ crates/transaction-pool/src/error.rs | 101 +++++++++++--------- crates/transaction-pool/src/validate/eth.rs | 24 +++-- 3 files changed, 80 insertions(+), 85 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index eb5897b27d21..6a79f59e191d 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -5,12 +5,12 @@ use jsonrpsee::{ core::Error as RpcError, types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}, }; -use reth_primitives::{ - abi::decode_revert_reason, Address, BlobTransactionValidationError, Bytes, U256, -}; +use reth_primitives::{abi::decode_revert_reason, Address, Bytes, U256}; use reth_revm::tracing::js::JsInspectorError; use reth_rpc_types::{error::EthRpcErrorCode, BlockError, CallInputError}; -use reth_transaction_pool::error::{InvalidPoolTransactionError, PoolError, PoolTransactionError}; +use reth_transaction_pool::error::{ + Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolTransactionError, +}; use revm::primitives::{EVMError, ExecutionResult, Halt, OutOfGasError}; use std::time::Duration; @@ -472,23 +472,9 @@ pub enum RpcPoolError { /// Custom pool error #[error("{0:?}")] PoolTransactionError(Box), - /// Unable to find the blob for an EIP4844 transaction - #[error("blob sidecar not found for EIP4844 transaction")] - MissingEip4844Blob, - /// Thrown if an EIP-4844 without any blobs arrives - #[error("blobless blob transaction")] - NoEip4844Blobs, - /// Thrown if an EIP-4844 without any blobs arrives - #[error("too many blobs in transaction: have {have}, permitted {permitted}")] - TooManyEip4844Blobs { - /// Number of blobs the transaction has - have: usize, - /// Number of maximum blobs the transaction can have - permitted: usize, - }, - /// Thrown if validating the blob sidecar for the transaction failed. + /// Eip-4844 related error #[error(transparent)] - InvalidEip4844Blob(BlobTransactionValidationError), + Eip4844(#[from] Eip4844PoolTransactionError), #[error(transparent)] Other(Box), } @@ -527,19 +513,7 @@ impl From for RpcPoolError { InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), - InvalidPoolTransactionError::MissingEip4844BlobSidecar => { - RpcPoolError::MissingEip4844Blob - } - InvalidPoolTransactionError::NoEip4844Blobs => RpcPoolError::NoEip4844Blobs, - InvalidPoolTransactionError::TooManyEip4844Blobs { have, permitted } => { - RpcPoolError::TooManyEip4844Blobs { have, permitted } - } - InvalidPoolTransactionError::InvalidEip4844Blob(err) => { - RpcPoolError::InvalidEip4844Blob(err) - } - InvalidPoolTransactionError::Eip4844NonceGap => { - RpcPoolError::Invalid(RpcInvalidTransactionError::NonceTooHigh) - } + InvalidPoolTransactionError::Eip4844(err) => RpcPoolError::Eip4844(err), } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index d9685b3a4662..8b57b61164f2 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -114,30 +114,10 @@ impl PoolError { } } -/// Represents errors that can happen when validating transactions for the pool -/// -/// See [TransactionValidator](crate::TransactionValidator). +/// Represents all errors that can happen when validating transactions for the pool for EIP-4844 +/// transactions #[derive(Debug, thiserror::Error)] -pub enum InvalidPoolTransactionError { - /// Hard consensus errors - #[error(transparent)] - Consensus(#[from] InvalidTransactionError), - /// Thrown when a new transaction is added to the pool, but then immediately discarded to - /// respect the size limits of the pool. - #[error("Transaction's gas limit {0} exceeds block's gas limit {1}.")] - ExceedsGasLimit(u64, u64), - /// Thrown when a new transaction is added to the pool, but then immediately discarded to - /// respect the max_init_code_size. - #[error("Transaction's size {0} exceeds max_init_code_size {1}.")] - ExceedsMaxInitCodeSize(usize, usize), - /// Thrown if the input data of a transaction is greater - /// than some meaningful limit a user might use. This is not a consensus error - /// making the transaction invalid, rather a DOS protection. - #[error("Input data too large")] - OversizedData(usize, usize), - /// Thrown if the transaction's fee is below the minimum fee - #[error("transaction underpriced")] - Underpriced, +pub enum Eip4844PoolTransactionError { /// Thrown if we're unable to find the blob for a transaction that was previously extracted #[error("blob sidecar not found for EIP4844 transaction")] MissingEip4844BlobSidecar, @@ -163,6 +143,35 @@ pub enum InvalidPoolTransactionError { /// would introduce gap in the nonce sequence. #[error("Nonce too high.")] Eip4844NonceGap, +} + +/// Represents errors that can happen when validating transactions for the pool +/// +/// See [TransactionValidator](crate::TransactionValidator). +#[derive(Debug, thiserror::Error)] +pub enum InvalidPoolTransactionError { + /// Hard consensus errors + #[error(transparent)] + Consensus(#[from] InvalidTransactionError), + /// Thrown when a new transaction is added to the pool, but then immediately discarded to + /// respect the size limits of the pool. + #[error("Transaction's gas limit {0} exceeds block's gas limit {1}.")] + ExceedsGasLimit(u64, u64), + /// Thrown when a new transaction is added to the pool, but then immediately discarded to + /// respect the max_init_code_size. + #[error("Transaction's size {0} exceeds max_init_code_size {1}.")] + ExceedsMaxInitCodeSize(usize, usize), + /// Thrown if the input data of a transaction is greater + /// than some meaningful limit a user might use. This is not a consensus error + /// making the transaction invalid, rather a DOS protection. + #[error("Input data too large")] + OversizedData(usize, usize), + /// Thrown if the transaction's fee is below the minimum fee + #[error("transaction underpriced")] + Underpriced, + /// Eip-4844 related errors + #[error(transparent)] + Eip4844(#[from] Eip4844PoolTransactionError), /// Any other error that occurred while inserting/validating that is transaction specific #[error("{0:?}")] Other(Box), @@ -220,27 +229,31 @@ impl InvalidPoolTransactionError { false } InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), - InvalidPoolTransactionError::MissingEip4844BlobSidecar => { - // this is only reachable when blob transactions are reinjected and we're unable to - // find the previously extracted blob - false - } - InvalidPoolTransactionError::InvalidEip4844Blob(_) => { - // This is only reachable when the blob is invalid - true - } - InvalidPoolTransactionError::Eip4844NonceGap => { - // it is possible that the pool sees `nonce n` before `nonce n-1` and this is only - // thrown for valid(good) blob transactions - false - } - InvalidPoolTransactionError::NoEip4844Blobs => { - // this is a malformed transaction and should not be sent over the network - true - } - InvalidPoolTransactionError::TooManyEip4844Blobs { .. } => { - // this is a malformed transaction and should not be sent over the network - true + InvalidPoolTransactionError::Eip4844(eip4844_err) => { + match eip4844_err { + Eip4844PoolTransactionError::MissingEip4844BlobSidecar => { + // this is only reachable when blob transactions are reinjected and we're + // unable to find the previously extracted blob + false + } + Eip4844PoolTransactionError::InvalidEip4844Blob(_) => { + // This is only reachable when the blob is invalid + true + } + Eip4844PoolTransactionError::Eip4844NonceGap => { + // it is possible that the pool sees `nonce n` before `nonce n-1` and this + // is only thrown for valid(good) blob transactions + false + } + Eip4844PoolTransactionError::NoEip4844Blobs => { + // this is a malformed transaction and should not be sent over the network + true + } + Eip4844PoolTransactionError::TooManyEip4844Blobs { .. } => { + // this is a malformed transaction and should not be sent over the network + true + } + } } } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 814613a5076c..01f5253b3c52 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -2,7 +2,7 @@ use crate::{ blobstore::BlobStore, - error::InvalidPoolTransactionError, + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_SIZE, TX_MAX_SIZE}, EthBlobTransactionSidecar, EthPoolTransaction, TransactionValidationOutcome, @@ -217,7 +217,9 @@ where // no blobs return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::NoEip4844Blobs, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), ) } @@ -225,10 +227,12 @@ where // too many blobs return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::TooManyEip4844Blobs { - have: blob_count, - permitted: MAX_BLOBS_PER_BLOCK, - }, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::TooManyEip4844Blobs { + have: blob_count, + permitted: MAX_BLOBS_PER_BLOCK, + }, + ), ) } @@ -247,7 +251,9 @@ where } else { return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::MissingEip4844BlobSidecar, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ), ) } } @@ -257,7 +263,9 @@ where if let Err(err) = eip4844.validate_blob(&blob, &self.kzg_settings) { return TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::InvalidEip4844Blob(err), + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob(err), + ), ) } // store the extracted blob From ce1c0eeb006f21ec4cf2e9184874354473f8a5e7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Sun, 3 Sep 2023 17:40:54 +0100 Subject: [PATCH 594/722] docs(book): mention full node in `mainnet.md` (#4454) --- book/run/mainnet.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/book/run/mainnet.md b/book/run/mainnet.md index e3f47cff0388..e2f1c07e42a6 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -17,13 +17,20 @@ By running both an execution client like Reth and a consensus client, such as Li First, ensure that you have Reth installed by following the [installation instructions][installation]. -Now, start the node as follows: +Now, to start the archive node, run: ```bash RUST_LOG=info reth node ``` -> Note that this command will not open any HTTP/WS ports by default. You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](../jsonrpc/intro.md). For more commands, see the [`reth node` CLI reference](../cli/node.md). +And to start the full node, run: +```bash +RUST_LOG=info reth node --full +``` + +On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section. + +> Note that these commands will not open any HTTP/WS ports by default. You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](../jsonrpc/intro.md). For more commands, see the [`reth node` CLI reference](../cli/node.md). The EL <> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). From ed6a9b41fe59faef1cbf8773af5799a93c491174 Mon Sep 17 00:00:00 2001 From: Daiz <90135051+0xDaizz@users.noreply.github.com> Date: Mon, 4 Sep 2023 05:16:04 +0900 Subject: [PATCH 595/722] docs: add WSL2 Disk allocation (Bus error) (#4463) --- book/installation/source.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/book/installation/source.md b/book/installation/source.md index a76326ba5f32..89893c9da244 100644 --- a/book/installation/source.md +++ b/book/installation/source.md @@ -133,3 +133,11 @@ binary](../installation/binaries.md). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. _(Thanks to Sigma Prime for this section from [their Lighthouse book](https://lighthouse-book.sigmaprime.io/installation.html)!)_ + +### Bus error (WSL2) + +In WSL 2 on Windows, the default virtual disk size is set to 1TB. + +You must increase the allocated disk size for your WSL2 instance before syncing reth. + +You can follow the instructions here: [how to expand the size of your WSL2 virtual hard disk.](https://learn.microsoft.com/en-us/windows/wsl/disk-space#how-to-expand-the-size-of-your-wsl-2-virtual-hard-disk) From ee15bdbb7be2170ddf6006692d0bf36937a60836 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 4 Sep 2023 11:20:19 +0100 Subject: [PATCH 596/722] chore: fix clippy (#4468) --- crates/net/discv4/src/config.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 7a3addc39f15..2f56835fc686 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -116,15 +116,15 @@ impl Default for Discv4Config { fn default() -> Self { Self { enable_packet_filter: false, - /// This should be high enough to cover an entire recursive FindNode lookup which is - /// includes sending FindNode to nodes it discovered in the rounds using the - /// concurrency factor ALPHA + // This should be high enough to cover an entire recursive FindNode lookup which is + // includes sending FindNode to nodes it discovered in the rounds using the concurrency + // factor ALPHA udp_egress_message_buffer: 1024, - /// Every outgoing request will eventually lead to an incoming response + // Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, ping_interval: Duration::from_secs(60 * 10), - /// unified expiration and timeout durations, mirrors geth's `expiration` duration + // Unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), bond_expiration: Duration::from_secs(60 * 60), enr_expiration: Duration::from_secs(20), @@ -141,7 +141,7 @@ impl Default for Discv4Config { enforce_expiration_timestamps: true, additional_eip868_rlp_pairs: Default::default(), external_ip_resolver: Some(Default::default()), - /// By default retry public IP using a 5min interval + // By default retry public IP using a 5min interval resolve_external_ip_interval: Some(Duration::from_secs(60 * 5)), } } From 03887a2c4ea6630a8b6a5c949b0a556f85e8ea9b Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 4 Sep 2023 12:51:07 +0200 Subject: [PATCH 597/722] feat(storage): recover senders if not found in database (#4280) Co-authored-by: Alexey Shekhirin --- .../src/providers/database/provider.rs | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index f63b256b3a1c..9b30cbafbb72 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -34,7 +34,7 @@ use reth_primitives::{ ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PruneModes, PrunePart, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, - Withdrawal, H256, U256, + Withdrawal, H160, H256, U256, }; use reth_revm_primitives::{ config::revm_spec, @@ -431,9 +431,31 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { .map(|(id, tx)| (id, tx.into())) .collect::>(); - let senders = + let mut senders = self.get_or_take::(first_transaction..=last_transaction)?; + // Recover senders manually if not found in db + let senders_len = senders.len(); + let transactions_len = transactions.len(); + let missing_senders = transactions_len - senders_len; + let mut senders_recovered: Vec<(u64, H160)> = (first_transaction.. + first_transaction + missing_senders as u64) + .zip( + TransactionSigned::recover_signers( + transactions.iter().take(missing_senders).map(|(_, tx)| tx).collect::>(), + missing_senders, + ) + .ok_or(BlockExecutionError::Validation( + BlockValidationError::SenderRecoveryError, + ))?, + ) + .collect(); + // It's only possible to have missing senders at the beginning of the range, and not in the + // middle or in the end, so it's safe to do `senders_recovered.extend(senders.iter())` + senders_recovered.extend(senders.iter()); + senders = senders_recovered; + debug_assert_eq!(senders.len(), transactions_len, "missing one or more senders"); + if TAKE { // Remove TxHashNumber let mut tx_hash_cursor = self.tx.cursor_write::()?; From b32562f4eab15bf2fc16a62d8636b3ab8284ce77 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 4 Sep 2023 13:20:19 +0100 Subject: [PATCH 598/722] test(storage): recover senders if not found in database (#4470) --- crates/storage/provider/Cargo.toml | 1 + .../provider/src/providers/database/mod.rs | 31 +++++++++++++++++++ .../src/providers/database/provider.rs | 2 +- 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index bcff000a09d3..45382391b567 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -38,6 +38,7 @@ reth-db = { path = "../db", features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } reth-rlp.workspace = true reth-trie = { path = "../../trie", features = ["test-utils"] } +reth-interfaces = { workspace = true, features = ["test-utils"] } parking_lot.workspace = true tempfile = "3.3" assert_matches.workspace = true diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 45529c9f0490..4273964c136e 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -396,9 +396,11 @@ mod tests { use crate::{BlockHashReader, BlockNumReader, BlockWriter, TransactionsProvider}; use assert_matches::assert_matches; use reth_db::{ + tables, test_utils::{create_test_rw_db, ERROR_TEMPDIR}, DatabaseEnv, }; + use reth_interfaces::test_utils::{generators, generators::random_block}; use reth_primitives::{ hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, H256, }; @@ -485,4 +487,33 @@ mod tests { assert_matches!(provider.transaction_id(block.body[0].hash), Ok(None)); } } + + #[test] + fn get_take_block_transaction_range_recover_senders() { + let chain_spec = ChainSpecBuilder::mainnet().build(); + let db = create_test_rw_db(); + let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + + let mut rng = generators::rng(); + let block = random_block(&mut rng, 0, None, Some(3), None); + + { + let provider = factory.provider_rw().unwrap(); + + assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_)); + + let senders = provider.get_or_take::(0..=0); + assert_eq!(senders, Ok(vec![(0, block.body[0].recover_signer().unwrap())])); + assert_eq!(provider.transaction_sender(0), Ok(None)); + + let result = provider.get_take_block_transaction_range::(0..=0); + assert_eq!( + result, + Ok(vec![( + 0, + block.body.iter().cloned().map(|tx| tx.into_ecrecovered().unwrap()).collect() + )]) + ) + } + } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9b30cbafbb72..ae8d87cabcdc 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -404,7 +404,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { } /// Get requested blocks transaction with signer - fn get_take_block_transaction_range( + pub(crate) fn get_take_block_transaction_range( &self, range: impl RangeBounds + Clone, ) -> Result)>> { From d9334ee6cf6764bd822f06b21b80820db78927e1 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 4 Sep 2023 16:51:33 +0200 Subject: [PATCH 599/722] feat: pass parent beacon block root to payload builder (#4425) --- crates/payload/basic/src/lib.rs | 4 ++-- crates/payload/builder/src/payload.rs | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 1ae9d0ef896a..c56dba46dcc2 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -828,7 +828,7 @@ where difficulty: U256::ZERO, gas_used: cumulative_gas_used, extra_data: extra_data.into(), - parent_beacon_block_root: None, + parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used, excess_blob_gas, }; @@ -906,7 +906,7 @@ where blob_gas_used: None, excess_blob_gas: None, extra_data: extra_data.into(), - parent_beacon_block_root: None, + parent_beacon_block_root: attributes.parent_beacon_block_root, }; let block = Block { header, body: vec![], ommers: vec![], withdrawals }; diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index c5e199ef718e..3b5fa7b5a67a 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -129,6 +129,8 @@ pub struct PayloadBuilderAttributes { pub prev_randao: H256, /// Withdrawals for the generated payload pub withdrawals: Vec, + /// Root of the parent beacon block + pub parent_beacon_block_root: Option, } // === impl PayloadBuilderAttributes === @@ -146,6 +148,7 @@ impl PayloadBuilderAttributes { suggested_fee_recipient: attributes.suggested_fee_recipient, prev_randao: attributes.prev_randao, withdrawals: attributes.withdrawals.unwrap_or_default(), + parent_beacon_block_root: attributes.parent_beacon_block_root, } } @@ -204,6 +207,9 @@ pub(crate) fn payload_id(parent: &H256, attributes: &PayloadAttributes) -> Paylo withdrawals.encode(&mut buf); hasher.update(buf); } + if let Some(parent_beacon_block) = attributes.parent_beacon_block_root { + hasher.update(parent_beacon_block); + } let out = hasher.finalize(); PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } From 8c015c176a6cf541fd81933170cea5b29cb20da9 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 4 Sep 2023 17:03:55 +0200 Subject: [PATCH 600/722] feat(txpool): add `skip_blob_transactions` function to `BestTransactions` (#4455) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/best.rs | 18 +++++++++++++++++- crates/transaction-pool/src/pool/pending.rs | 1 + crates/transaction-pool/src/traits.rs | 7 +++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 5fc5ebc93137..5b0ab07c5684 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -29,6 +29,10 @@ impl crate::traits::BestTransactions for BestTransaction fn no_updates(&mut self) { self.best.no_updates() } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + self.best.set_skip_blobs(skip_blobs) + } } impl Iterator for BestTransactionsWithBasefee { @@ -72,6 +76,8 @@ pub(crate) struct BestTransactions { /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value pub(crate) new_transaction_receiver: Option>>, + /// Flag to control whether to skip blob transactions (EIP4844). + pub(crate) skip_blobs: bool, } impl BestTransactions { @@ -134,6 +140,10 @@ impl crate::traits::BestTransactions for BestTransaction fn no_updates(&mut self) { self.new_transaction_receiver.take(); } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + self.skip_blobs = skip_blobs; + } } impl Iterator for BestTransactions { @@ -161,7 +171,13 @@ impl Iterator for BestTransactions { self.independent.insert(unlocked.clone()); } - return Some(best.transaction) + if self.skip_blobs && best.transaction.transaction.is_eip4844() { + // blobs should be skipped, marking the as invalid will ensure that no dependent + // transactions are returned + self.mark_invalid(&best.transaction) + } else { + return Some(best.transaction) + } } } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 65be2a7c1e85..0708f1c8840e 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -89,6 +89,7 @@ impl PendingPool { independent: self.independent_transactions.clone(), invalid: Default::default(), new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), + skip_blobs: false, } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 862ce14baab9..506ced4941e6 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -570,6 +570,11 @@ pub trait BestTransactions: Iterator + Send { /// This ensures that iterator will return the best transaction that it currently knows and not /// listen to pool updates. fn no_updates(&mut self); + + /// Set the skip_blobs flag to control whether to skip blob transactions (is_eip4844). + /// + /// This flag will control whether the iterator skips blob transactions or not. + fn set_skip_blobs(&mut self, skip_blobs: bool); } /// A no-op implementation that yields no transactions. @@ -577,6 +582,8 @@ impl BestTransactions for std::iter::Empty { fn mark_invalid(&mut self, _tx: &T) {} fn no_updates(&mut self) {} + + fn set_skip_blobs(&mut self, _skip_blobs: bool) {} } /// Trait for transaction types used inside the pool From d2647dcc7c05afbf56fdece19a8149a90f89232d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 4 Sep 2023 08:44:35 -0700 Subject: [PATCH 601/722] chore: add cancun to hive job (#4478) --- .github/workflows/hive.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index fbc598b95ac5..a921f4b94767 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -31,11 +31,10 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=max - # TODO: replace when we are not using a fork - name: Checkout hive tests uses: actions/checkout@v3 with: - repository: paradigmxyz/hive + repository: ethereum/hive ref: master path: hivetests @@ -88,6 +87,9 @@ jobs: - sim: ethereum/engine limit: engine-api experimental: true + - sim: ethereum/engine + limit: cancun + experimental: true # eth_ rpc methods - sim: ethereum/rpc-compat include: [eth_blockNumber, eth_call, eth_chainId, eth_createAccessList, eth_estimateGas, eth_feeHistory, eth_getBalance, eth_getBlockBy, eth_getBlockTransactionCountBy, eth_getCode, eth_getStorage, eth_getTransactionBy, eth_getTransactionCount, eth_getTransactionReceipt, eth_sendRawTransaction, eth_syncing] @@ -123,11 +125,10 @@ jobs: mv /tmp/hive /usr/local/bin chmod +x /usr/local/bin/hive - # TODO: replace when we are not using a fork - name: Checkout hive tests uses: actions/checkout@v3 with: - repository: paradigmxyz/hive + repository: ethereum/hive ref: master path: hivetests From 1ec4d462a22574ddf3baa88e507d6fbbf879eb40 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 4 Sep 2023 19:19:11 +0200 Subject: [PATCH 602/722] add semaphore to EthStateCacheService (#4477) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/cache/config.rs | 8 +++++++ crates/rpc/rpc/src/eth/cache/mod.rs | 32 ++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/eth/cache/config.rs b/crates/rpc/rpc/src/eth/cache/config.rs index da7037e72a77..d8a01cfb35f1 100644 --- a/crates/rpc/rpc/src/eth/cache/config.rs +++ b/crates/rpc/rpc/src/eth/cache/config.rs @@ -21,6 +21,9 @@ pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; /// Default cache size for the env cache: 1000 envs. pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; +/// Default number of concurrent database requests. +pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; + /// Settings for the [EthStateCache](crate::eth::cache::EthStateCache). #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -37,6 +40,10 @@ pub struct EthStateCacheConfig { /// /// Default is 1000. pub max_envs: u32, + /// Max number of concurrent database requests. + /// + /// Default is 512. + pub max_concurrent_db_requests: usize, } impl Default for EthStateCacheConfig { @@ -45,6 +52,7 @@ impl Default for EthStateCacheConfig { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } } diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 8209a77ec303..61424c1b1b25 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -12,11 +12,12 @@ use schnellru::{ByLength, Limiter}; use std::{ future::Future, pin::Pin, + sync::Arc, task::{ready, Context, Poll}, }; use tokio::sync::{ mpsc::{unbounded_channel, UnboundedSender}, - oneshot, + oneshot, Semaphore, }; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -68,6 +69,7 @@ impl EthStateCache { max_blocks: u32, max_receipts: u32, max_envs: u32, + max_concurrent_db_operations: usize, ) -> (Self, EthStateCacheService) { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { @@ -78,6 +80,7 @@ impl EthStateCache { action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, + rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), }; let cache = EthStateCache { to_service }; (cache, service) @@ -107,9 +110,16 @@ impl EthStateCache { Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { - let EthStateCacheConfig { max_blocks, max_receipts, max_envs } = config; - let (this, service) = - Self::create(provider, executor.clone(), max_blocks, max_receipts, max_envs); + let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = + config; + let (this, service) = Self::create( + provider, + executor.clone(), + max_blocks, + max_receipts, + max_envs, + max_concurrent_db_requests, + ); executor.spawn_critical("eth state cache", Box::pin(service)); this } @@ -229,6 +239,8 @@ pub(crate) struct EthStateCacheService< action_rx: UnboundedReceiverStream, /// The type that's used to spawn tasks that do the actual work action_task_spawner: Tasks, + /// Rate limiter + rate_limiter: Arc, } impl EthStateCacheService @@ -308,7 +320,10 @@ where if this.full_block_cache.queue(block_hash, Either::Left(response_tx)) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); + let rate_limiter = this.rate_limiter.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { + // Acquire permit + let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking let res = provider @@ -329,7 +344,10 @@ where if this.full_block_cache.queue(block_hash, Either::Right(response_tx)) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); + let rate_limiter = this.rate_limiter.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { + // Acquire permit + let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking let res = provider @@ -350,7 +368,10 @@ where if this.receipts_cache.queue(block_hash, response_tx) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); + let rate_limiter = this.rate_limiter.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { + // Acquire permit + let _permit = rate_limiter.acquire().await; let res = provider.receipts_by_block(block_hash.into()); let _ = action_tx .send(CacheAction::ReceiptsResult { block_hash, res }); @@ -369,7 +390,10 @@ where if this.evm_env_cache.queue(block_hash, response_tx) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); + let rate_limiter = this.rate_limiter.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { + // Acquire permit + let _permit = rate_limiter.acquire().await; let mut cfg = CfgEnv::default(); let mut block_env = BlockEnv::default(); let res = provider From b2750e0e9b57c9eba178e22f0e814e15ed6fbe9a Mon Sep 17 00:00:00 2001 From: JosepBove Date: Tue, 5 Sep 2023 10:30:18 +0200 Subject: [PATCH 603/722] Remove redundant clones (#4485) --- bin/reth/src/recover/storage_tries.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/prune/src/pruner.rs | 2 +- crates/storage/provider/src/providers/database/provider.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/recover/storage_tries.rs b/bin/reth/src/recover/storage_tries.rs index 564410eca06f..d1e8a87f54da 100644 --- a/bin/reth/src/recover/storage_tries.rs +++ b/bin/reth/src/recover/storage_tries.rs @@ -58,7 +58,7 @@ impl Command { debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(db.clone(), self.chain.clone())?; - let factory = ProviderFactory::new(&db, self.chain.clone()); + let factory = ProviderFactory::new(&db, self.chain); let mut provider = factory.provider_rw()?; let best_block = provider.best_block_number()?; let best_header = provider diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 5a8279cf547d..03d164d7dd36 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -564,7 +564,7 @@ where // FCU resulted in a fatal error from which we can't recover let err = err.clone(); let _ = tx.send(Err(error)); - return OnForkchoiceUpdateOutcome::Fatal(err.clone()) + return OnForkchoiceUpdateOutcome::Fatal(err) } } let _ = tx.send(Err(error)); diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 7d91f9535b55..123e2eb3470b 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -591,7 +591,7 @@ impl Pruner { .collect::>(); // Number of transactions retrieved from the database should match the tx range count - let tx_count = tx_range.clone().count(); + let tx_count = tx_range.count(); if hashes.len() != tx_count { return Err(PrunerError::InconsistentData( "Unexpected number of transaction hashes retrieved by transaction number range", diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ae8d87cabcdc..e54268530180 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -685,7 +685,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { mut delete_callback: impl FnMut(TableRow), ) -> std::result::Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(keys.clone())?; + let mut walker = cursor.walk_range(keys)?; let mut deleted = 0; while let Some(row) = walker.next().transpose()? { From 843d5047b9222d3e0a5d8079857bdb1f1ab74fe6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 5 Sep 2023 14:05:55 +0100 Subject: [PATCH 604/722] feat(storage): better sender recovery if not found in database (#4471) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- .../provider/src/providers/database/mod.rs | 24 ++++-- .../src/providers/database/provider.rs | 77 ++++++++++++++----- crates/transaction-pool/src/pool/best.rs | 2 +- 3 files changed, 76 insertions(+), 27 deletions(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 4273964c136e..e9da00b85059 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -402,10 +402,10 @@ mod tests { }; use reth_interfaces::test_utils::{generators, generators::random_block}; use reth_primitives::{ - hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, H256, + hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, TxNumber, H256, }; use reth_rlp::Decodable; - use std::sync::Arc; + use std::{ops::RangeInclusive, sync::Arc}; #[test] fn common_history_provider() { @@ -497,14 +497,26 @@ mod tests { let mut rng = generators::rng(); let block = random_block(&mut rng, 0, None, Some(3), None); - { + let tx_ranges: Vec> = vec![0..=0, 1..=1, 2..=2, 0..=1, 1..=2]; + for range in tx_ranges { let provider = factory.provider_rw().unwrap(); assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_)); - let senders = provider.get_or_take::(0..=0); - assert_eq!(senders, Ok(vec![(0, block.body[0].recover_signer().unwrap())])); - assert_eq!(provider.transaction_sender(0), Ok(None)); + let senders = provider.get_or_take::(range.clone()); + assert_eq!( + senders, + Ok(range + .clone() + .map(|tx_number| ( + tx_number, + block.body[tx_number as usize].recover_signer().unwrap() + )) + .collect()) + ); + + let db_senders = provider.senders_by_tx_range(range); + assert_eq!(db_senders, Ok(vec![])); let result = provider.get_take_block_transaction_range::(0..=0); assert_eq!( diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e54268530180..d811e9ab3ccc 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -34,7 +34,7 @@ use reth_primitives::{ ChainInfo, ChainSpec, Hardfork, Head, Header, PruneCheckpoint, PruneModes, PrunePart, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, - Withdrawal, H160, H256, U256, + Withdrawal, H256, U256, }; use reth_revm_primitives::{ config::revm_spec, @@ -435,26 +435,63 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { self.get_or_take::(first_transaction..=last_transaction)?; // Recover senders manually if not found in db - let senders_len = senders.len(); - let transactions_len = transactions.len(); - let missing_senders = transactions_len - senders_len; - let mut senders_recovered: Vec<(u64, H160)> = (first_transaction.. - first_transaction + missing_senders as u64) - .zip( - TransactionSigned::recover_signers( - transactions.iter().take(missing_senders).map(|(_, tx)| tx).collect::>(), - missing_senders, - ) - .ok_or(BlockExecutionError::Validation( - BlockValidationError::SenderRecoveryError, - ))?, + // SAFETY: Transactions are always guaranteed to be in the database whereas + // senders might be pruned. + if senders.len() != transactions.len() { + senders.reserve(transactions.len() - senders.len()); + // Find all missing senders, their corresponding tx numbers and indexes to the original + // `senders` vector at which the recovered senders will be inserted. + let mut missing_senders = Vec::with_capacity(transactions.len() - senders.len()); + { + let mut senders = senders.iter().peekable(); + + // `transactions` contain all entries. `senders` contain _some_ of the senders for + // these transactions. Both are sorted and indexed by `TxNumber`. + // + // The general idea is to iterate on both `transactions` and `senders`, and advance + // the `senders` iteration only if it matches the current `transactions` entry's + // `TxNumber`. Otherwise, add the transaction to the list of missing senders. + for (i, (tx_number, transaction)) in transactions.iter().enumerate() { + if let Some((sender_tx_number, _)) = senders.peek() { + if sender_tx_number == tx_number { + // If current sender's `TxNumber` matches current transaction's + // `TxNumber`, advance the senders iterator. + senders.next(); + } else { + // If current sender's `TxNumber` doesn't match current transaction's + // `TxNumber`, add it to missing senders. + missing_senders.push((i, tx_number, transaction)); + } + } else { + // If there's no more senders left, but we're still iterating over + // transactions, add them to missing senders + missing_senders.push((i, tx_number, transaction)); + } + } + } + + // Recover senders + let recovered_senders = TransactionSigned::recover_signers( + missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), + missing_senders.len(), ) - .collect(); - // It's only possible to have missing senders at the beginning of the range, and not in the - // middle or in the end, so it's safe to do `senders_recovered.extend(senders.iter())` - senders_recovered.extend(senders.iter()); - senders = senders_recovered; - debug_assert_eq!(senders.len(), transactions_len, "missing one or more senders"); + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + + // Insert recovered senders along with tx numbers at the corresponding indexes to the + // original `senders` vector + for ((i, tx_number, _), sender) in missing_senders.into_iter().zip(recovered_senders) { + // Insert will put recovered senders at necessary positions and shift the rest + senders.insert(i, (*tx_number, sender)); + } + + // Debug assertions which are triggered during the test to ensure that all senders are + // present and sorted + debug_assert_eq!(senders.len(), transactions.len(), "missing one or more senders"); + debug_assert!( + senders.iter().tuple_windows().all(|(a, b)| a.0 < b.0), + "senders not sorted" + ); + } if TAKE { // Remove TxHashNumber diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 5b0ab07c5684..2a57f2f22017 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -70,7 +70,7 @@ pub(crate) struct BestTransactions { pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. pub(crate) invalid: HashSet, - /// Used to recieve any new pending transactions that have been added to the pool after this + /// Used to receive any new pending transactions that have been added to the pool after this /// iterator was snapshotted /// /// These new pending transactions are inserted into this iterator's pool before yielding the From f6e5826dec3546c068f0c4d5cf8ecff1be31f6cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Sep 2023 15:07:49 +0200 Subject: [PATCH 605/722] feat: add get blobs exact (#4482) --- crates/payload/basic/src/lib.rs | 5 +---- crates/transaction-pool/src/blobstore/mem.rs | 18 ++++++++++++++++-- crates/transaction-pool/src/blobstore/mod.rs | 9 +++++++++ crates/transaction-pool/src/blobstore/noop.rs | 7 +++++++ crates/transaction-pool/src/lib.rs | 7 +++++++ crates/transaction-pool/src/noop.rs | 10 ++++++++++ crates/transaction-pool/src/traits.rs | 9 +++++++++ 7 files changed, 59 insertions(+), 6 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c56dba46dcc2..49307b506bd0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -790,13 +790,10 @@ where // only determine cancun fields when active if chain_spec.is_cancun_activated_at_timestamp(attributes.timestamp) { // grab the blob sidecars from the executed txs - let blobs = pool.get_all_blobs( + blob_sidecars = pool.get_all_blobs_exact( executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), )?; - // map to just the sidecars - blob_sidecars = blobs.into_iter().map(|(_, sidecars)| sidecars).collect(); - excess_blob_gas = if chain_spec.is_cancun_activated_at_timestamp(parent_block.timestamp) { let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 352ecae61520..2a2d2fd861ab 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -83,7 +83,7 @@ impl BlobStore for InMemoryBlobStore { // Retrieves the decoded blob data for the given transaction hash. fn get(&self, tx: H256) -> Result, BlobStoreError> { - let store = self.inner.store.write(); + let store = self.inner.store.read(); Ok(store.get(&tx).cloned()) } @@ -92,7 +92,7 @@ impl BlobStore for InMemoryBlobStore { txs: Vec, ) -> Result, BlobStoreError> { let mut items = Vec::with_capacity(txs.len()); - let store = self.inner.store.write(); + let store = self.inner.store.read(); for tx in txs { if let Some(item) = store.get(&tx) { items.push((tx, item.clone())); @@ -102,6 +102,20 @@ impl BlobStore for InMemoryBlobStore { Ok(items) } + fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + let mut items = Vec::with_capacity(txs.len()); + let store = self.inner.store.read(); + for tx in txs { + if let Some(item) = store.get(&tx) { + items.push(item.clone()); + } else { + return Err(BlobStoreError::MissingSidecar(tx)) + } + } + + Ok(items) + } + fn data_size_hint(&self) -> Option { Some(self.inner.data_size.load(std::sync::atomic::Ordering::Relaxed)) } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 786bbcd4f4d8..ce2e875bf917 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -41,6 +41,12 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { txs: Vec, ) -> Result, BlobStoreError>; + /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order + /// they were requested. + /// + /// Returns an error if any of the blobs are not found in the blob store. + fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + /// Data size of all transactions in the blob store. fn data_size_hint(&self) -> Option; @@ -51,6 +57,9 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Error variants that can occur when interacting with a blob store. #[derive(Debug, thiserror::Error)] pub enum BlobStoreError { + /// Thrown if the blob sidecar is not found for a given transaction hash but was required. + #[error("blob sidecar not found for transaction {0:?}")] + MissingSidecar(H256), /// Failed to decode the stored blob data. #[error("failed to decode blob data: {0}")] DecodeError(#[from] reth_rlp::DecodeError), diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 3cb30a22e9e9..431d34cc8f41 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -34,6 +34,13 @@ impl BlobStore for NoopBlobStore { Ok(vec![]) } + fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + if txs.is_empty() { + return Ok(vec![]) + } + Err(BlobStoreError::MissingSidecar(txs[0])) + } + fn data_size_hint(&self) -> Option { Some(0) } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index cdf928024fdf..667e611aadde 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -481,6 +481,13 @@ where ) -> Result, BlobStoreError> { self.pool.blob_store().get_all(tx_hashes) } + + fn get_all_blobs_exact( + &self, + tx_hashes: Vec, + ) -> Result, BlobStoreError> { + self.pool.blob_store().get_exact(tx_hashes) + } } impl TransactionPoolExt for Pool diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index d2c8bcd7137f..bfeea1c73d5f 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -192,6 +192,16 @@ impl TransactionPool for NoopTransactionPool { ) -> Result, BlobStoreError> { Ok(vec![]) } + + fn get_all_blobs_exact( + &self, + tx_hashes: Vec, + ) -> Result, BlobStoreError> { + if tx_hashes.is_empty() { + return Ok(vec![]) + } + Err(BlobStoreError::MissingSidecar(tx_hashes[0])) + } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 506ced4941e6..0df53ed51f0b 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -310,6 +310,15 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, ) -> Result, BlobStoreError>; + + /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order + /// they were requested. + /// + /// Returns an error if any of the blobs are not found in the blob store. + fn get_all_blobs_exact( + &self, + tx_hashes: Vec, + ) -> Result, BlobStoreError>; } /// Extension for [TransactionPool] trait that allows to set the current block info. From 297e8870c2e8215ff1e351c8ef8e2fc4bee8d448 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Sep 2023 15:07:56 +0200 Subject: [PATCH 606/722] perf: skip blobs if no blob space available (#4480) --- crates/payload/basic/src/lib.rs | 14 ++++++++++---- crates/transaction-pool/src/pool/best.rs | 8 ++++++++ crates/transaction-pool/src/traits.rs | 16 ++++++++++++++-- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 49307b506bd0..d7fbecd060d9 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -24,9 +24,8 @@ use reth_primitives::{ bytes::{Bytes, BytesMut}, calculate_excess_blob_gas, constants::{ - eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS, - ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION, + eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, + EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION, }, proofs, Block, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, SealedBlock, Withdrawal, EMPTY_OMMER_ROOT, H256, U256, @@ -682,8 +681,10 @@ where // convert tx to a signed transaction let tx = pool_tx.to_recovered_transaction(); + // There's only limited amount of blob space available per block, so we need to check if the + // EIP-4844 can still fit in the block if let Some(blob_tx) = tx.transaction.as_eip4844() { - let tx_blob_gas = blob_tx.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB; + let tx_blob_gas = blob_tx.blob_gas(); if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { // we can't fit this _blob_ transaction into the block, so we mark it as invalid, // which removes its dependent transactions from the iterator. This is similar to @@ -693,6 +694,11 @@ where } else { // add to the data gas if we're going to execute the transaction sum_blob_gas_used += tx_blob_gas; + + // if we've reached the max data gas per block, we can skip blob txs entirely + if sum_blob_gas_used == MAX_DATA_GAS_PER_BLOCK { + best_txs.skip_blobs(); + } } } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 2a57f2f22017..fccb4c55ba3d 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -30,6 +30,10 @@ impl crate::traits::BestTransactions for BestTransaction self.best.no_updates() } + fn skip_blobs(&mut self) { + self.set_skip_blobs(true) + } + fn set_skip_blobs(&mut self, skip_blobs: bool) { self.best.set_skip_blobs(skip_blobs) } @@ -141,6 +145,10 @@ impl crate::traits::BestTransactions for BestTransaction self.new_transaction_receiver.take(); } + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } + fn set_skip_blobs(&mut self, skip_blobs: bool) { self.skip_blobs = skip_blobs; } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0df53ed51f0b..49badfd050aa 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -580,9 +580,19 @@ pub trait BestTransactions: Iterator + Send { /// listen to pool updates. fn no_updates(&mut self); - /// Set the skip_blobs flag to control whether to skip blob transactions (is_eip4844). + /// Skip all blob transactions. /// - /// This flag will control whether the iterator skips blob transactions or not. + /// There's only limited blob space available in a block, once exhausted, EIP-4844 transactions + /// can no longer be included. + /// + /// If called then the iterator will no longer yield blob transactions. + /// + /// Note: this will also exclude any transactions that depend on blob transactions. + fn skip_blobs(&mut self); + + /// Controls whether the iterator skips blob transactions or not. + /// + /// If set to true, no blob transactions will be returned. fn set_skip_blobs(&mut self, skip_blobs: bool); } @@ -592,6 +602,8 @@ impl BestTransactions for std::iter::Empty { fn no_updates(&mut self) {} + fn skip_blobs(&mut self) {} + fn set_skip_blobs(&mut self, _skip_blobs: bool) {} } From 3e7e65127f272d0283d466f22a29c461efeb3962 Mon Sep 17 00:00:00 2001 From: chirag-bgh <76247491+chirag-bgh@users.noreply.github.com> Date: Tue, 5 Sep 2023 19:11:15 +0530 Subject: [PATCH 607/722] feat: delete discarded blob txs from blob store (#4408) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/mod.rs | 33 +++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 8403449945c7..9eb741a6a941 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -336,6 +336,9 @@ where changed_senders, ); + // This will discard outdated transactions based on the account's nonce + self.delete_discarded_blobs(outcome.discarded.iter()); + // notify listeners about updates self.notify_on_new_state(outcome); } @@ -351,6 +354,10 @@ where promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); discarded.iter().for_each(|tx| listener.discarded(tx.hash())); + + // This deletes outdated blob txs from the blob store, based on the account's nonce. This is + // called during txpool maintenance when the pool drifted. + self.delete_discarded_blobs(discarded.iter()); } /// Add a single validated transaction into the pool. @@ -402,6 +409,7 @@ where // store the sidecar in the blob store self.insert_blob(hash, sidecar); } + if let Some(replaced) = added.replaced_blob_transaction() { // delete the replaced transaction from the blob store self.delete_blob(replaced); @@ -415,6 +423,10 @@ where // Notify tx event listeners self.notify_event_listeners(&added); + if let Some(discarded) = added.discarded_transactions() { + self.delete_discarded_blobs(discarded.iter()); + } + // Notify listeners for _all_ transactions self.on_new_transaction(added.into_new_transaction_event()); @@ -735,6 +747,19 @@ where } self.blob_store_metrics.blobstore_entries.set(self.blob_store.blobs_len() as f64); } + + /// Deletes all blob transactions that were discarded. + fn delete_discarded_blobs<'a>( + &'a self, + transactions: impl IntoIterator>>, + ) { + let blob_txs = transactions + .into_iter() + .filter(|tx| tx.transaction.is_eip4844()) + .map(|tx| *tx.hash()) + .collect(); + self.delete_blobs(blob_txs); + } } impl fmt::Debug for PoolInner { @@ -849,6 +874,14 @@ impl AddedTransaction { } } + /// Returns the discarded transactions if there were any + pub(crate) fn discarded_transactions(&self) -> Option<&[Arc>]> { + match self { + AddedTransaction::Pending(tx) => Some(&tx.discarded), + AddedTransaction::Parked { .. } => None, + } + } + /// Returns the hash of the replaced transaction if it is a blob transaction. pub(crate) fn replaced_blob_transaction(&self) -> Option { self.replaced().filter(|tx| tx.transaction.is_eip4844()).map(|tx| *tx.transaction.hash()) From 001cbd75326b8944187edac4e3e0bc1fe6135321 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 5 Sep 2023 18:35:14 +0100 Subject: [PATCH 608/722] feat(storage, tree): respect `Sender Recovery` pruning in the blockchain tree (#4431) --- crates/storage/provider/src/providers/database/mod.rs | 6 ++++++ .../provider/src/providers/database/provider.rs | 10 +++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index e9da00b85059..525d4c7ba822 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -468,6 +468,10 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_)); + assert_matches!( + provider.transaction_sender(0), Ok(Some(sender)) + if sender == block.body[0].recover_signer().unwrap() + ); assert_matches!(provider.transaction_id(block.body[0].hash), Ok(Some(0))); } @@ -478,12 +482,14 @@ mod tests { block.clone(), None, Some(&PruneModes { + sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), ..PruneModes::none() }) ), Ok(_) ); + assert_matches!(provider.transaction_sender(0), Ok(None)); assert_matches!(provider.transaction_id(block.body[0].hash), Ok(None)); } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d811e9ab3ccc..efdf2edbb181 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1980,7 +1980,15 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' for (transaction, sender) in tx_iter { let hash = transaction.hash(); - self.tx.put::(next_tx_num, sender)?; + + if prune_modes + .and_then(|modes| modes.sender_recovery) + .filter(|prune_mode| prune_mode.is_full()) + .is_none() + { + self.tx.put::(next_tx_num, sender)?; + } + self.tx.put::(next_tx_num, transaction.into())?; if prune_modes From 01d4933125270a33fa5ac0f7d8315d8ab539419b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 5 Sep 2023 18:39:49 +0100 Subject: [PATCH 609/722] feat(primitives): remove constraints on `SenderRecovery` pruning (#4488) --- bin/reth/src/args/pruning_args.rs | 2 +- crates/primitives/src/prune/target.rs | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index ccb6dda741fa..550634f0e995 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -24,7 +24,7 @@ impl PruningArgs { Some(PruneConfig { block_interval: 5, parts: PruneModes { - sender_recovery: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + sender_recovery: Some(PruneMode::Full), transaction_lookup: None, receipts: chain_spec .deposit_contract diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 8789b0cf8f55..e715c94ed787 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -13,12 +13,7 @@ pub const MINIMUM_PRUNING_DISTANCE: u64 = 128; #[serde(default)] pub struct PruneModes { /// Sender Recovery pruning configuration. - // TODO(alexey): removing min blocks restriction is possible if we start calculating the senders - // dynamically on blockchain tree unwind. - #[serde( - skip_serializing_if = "Option::is_none", - deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" - )] + #[serde(skip_serializing_if = "Option::is_none")] pub sender_recovery: Option, /// Transaction Lookup pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] @@ -104,7 +99,7 @@ impl PruneModes { } impl_prune_parts!( - (sender_recovery, SenderRecovery, Some(64)), + (sender_recovery, SenderRecovery, None), (transaction_lookup, TransactionLookup, None), (receipts, Receipts, Some(64)), (account_history, AccountHistory, Some(64)), From ca3753d53f668d8005579b685bcc02797547b8e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Sep 2023 21:12:42 +0200 Subject: [PATCH 610/722] feat: add blob costs to cost value (#4489) --- crates/primitives/src/transaction/mod.rs | 9 ++++----- crates/transaction-pool/src/traits.rs | 13 ++++++++++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ade66d1d9a75..4d2e5d06c32c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,6 +1,5 @@ use crate::{ compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}, - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Address, Bytes, TxHash, H256, }; pub use access_list::{AccessList, AccessListItem, AccessListWithGasUsed}; @@ -236,10 +235,10 @@ impl Transaction { /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// - /// This is the number of blobs times the [DATA_GAS_PER_BLOB] a single blob consumes. - pub fn blob_gas_used(&self) -> Option { - let tx = self.as_eip4844()?; - Some(tx.blob_versioned_hashes.len() as u128 * DATA_GAS_PER_BLOB as u128) + /// This is the number of blobs times the + /// [DATA_GAS_PER_BLOB](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + pub fn blob_gas_used(&self) -> Option { + self.as_eip4844().map(TxEip4844::blob_gas) } /// Return the max priority fee per gas if the transaction is an EIP-1559 transaction, and diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 49badfd050aa..71ec7c03a46f 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -629,6 +629,8 @@ pub trait PoolTransaction: /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. + /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + + /// max_blob_fee_per_gas * blob_gas_used`. fn cost(&self) -> U256; /// Amount of gas that should be used in executing this transaction. This is paid up-front. @@ -721,6 +723,8 @@ pub struct EthPooledTransaction { /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. + /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + + /// max_blob_fee_per_gas * blob_gas_used`. pub(crate) cost: U256, /// The blob side car this transaction @@ -754,7 +758,12 @@ impl EthPooledTransaction { U256::from(t.max_fee_per_gas) * U256::from(t.gas_limit) } }; - let cost = gas_cost + U256::from(transaction.value()); + let mut cost = gas_cost + U256::from(transaction.value()); + + if let Some(blob_tx) = transaction.as_eip4844() { + // add max blob cost + cost += U256::from(blob_tx.max_fee_per_gas * blob_tx.blob_gas() as u128); + } Self { transaction, cost, blob_sidecar } } @@ -806,6 +815,8 @@ impl PoolTransaction for EthPooledTransaction { /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. + /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + + /// max_blob_fee_per_gas * blob_gas_used`. fn cost(&self) -> U256 { self.cost } From cd7e1135d88bf3fb3dd23b1778d7d7c204d77119 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 5 Sep 2023 21:26:01 +0200 Subject: [PATCH 611/722] feat: enforce no nonce gaps for eip-4844 (#4487) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/rpc/rpc/src/eth/error.rs | 3 + crates/transaction-pool/src/error.rs | 4 ++ crates/transaction-pool/src/pool/txpool.rs | 71 +++++++++++++++++++-- crates/transaction-pool/src/validate/mod.rs | 6 ++ 4 files changed, 80 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 6a79f59e191d..a2b0e3812683 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -514,6 +514,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), InvalidPoolTransactionError::Eip4844(err) => RpcPoolError::Eip4844(err), + InvalidPoolTransactionError::Overdraft => { + RpcPoolError::Invalid(RpcInvalidTransactionError::InsufficientFunds) + } } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 8b57b61164f2..1f0442833d41 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -169,6 +169,9 @@ pub enum InvalidPoolTransactionError { /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, + /// Thrown if the transaction's would require an account to be overdrawn + #[error("transaction overdraws from account")] + Overdraft, /// Eip-4844 related errors #[error(transparent)] Eip4844(#[from] Eip4844PoolTransactionError), @@ -228,6 +231,7 @@ impl InvalidPoolTransactionError { // local setting false } + InvalidPoolTransactionError::Overdraft => false, InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), InvalidPoolTransactionError::Eip4844(eip4844_err) => { match eip4844_err { diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 98e07d39cab1..d3e84ffafb04 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1,7 +1,7 @@ //! The internal transaction pool implementation. use crate::{ config::TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, - error::{InvalidPoolTransactionError, PoolError}, + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError}, identifier::{SenderId, TransactionId}, metrics::TxPoolMetrics, pool::{ @@ -396,10 +396,10 @@ impl TxPool { Ok(res) } - Err(e) => { + Err(err) => { // Update invalid transactions metric self.metrics.invalid_transactions.increment(1); - match e { + match err { InsertErr::Underpriced { existing, transaction: _ } => { Err(PoolError::ReplacementUnderpriced(existing)) } @@ -420,6 +420,16 @@ impl TxPool { *transaction.hash(), InvalidPoolTransactionError::ExceedsGasLimit(block_gas_limit, tx_gas_limit), )), + InsertErr::BlobTxHasNonceGap { transaction } => { + Err(PoolError::InvalidTransaction( + *transaction.hash(), + Eip4844PoolTransactionError::Eip4844NonceGap.into(), + )) + } + InsertErr::Overdraft { transaction } => Err(PoolError::InvalidTransaction( + *transaction.hash(), + InvalidPoolTransactionError::Overdraft, + )), } } } @@ -1065,6 +1075,39 @@ impl AllTransactions { Ok(transaction) } + /// Enforces additional constraints for blob transactions before attempting to insert: + /// - new blob transactions must not have any nonce gaps + /// - blob transactions cannot go into overdraft + fn ensure_valid_blob_transaction( + &self, + transaction: ValidPoolTransaction, + on_chain_balance: U256, + ancestor: Option, + ) -> Result, InsertErr> { + if let Some(ancestor) = ancestor { + let Some(tx) = self.txs.get(&ancestor) else { + // ancestor tx is missing, so we can't insert the new blob + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(transaction) }) + }; + if tx.state.has_nonce_gap() { + // the ancestor transaction already has a nonce gap, so we can't insert the new + // blob + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(transaction) }) + } + + // check if the new blob would go into overdraft + if tx.next_cumulative_cost() + transaction.cost() > on_chain_balance { + // the transaction would go into overdraft + return Err(InsertErr::Overdraft { transaction: Arc::new(transaction) }) + } + } else if transaction.cost() > on_chain_balance { + // the transaction would go into overdraft + return Err(InsertErr::Overdraft { transaction: Arc::new(transaction) }) + } + + Ok(transaction) + } + /// Returns true if the replacement candidate is underpriced and can't replace the existing /// transaction. #[inline] @@ -1122,6 +1165,10 @@ impl AllTransactions { /// These can include: /// - closing nonce gaps of descendant transactions /// - enough balance updates + /// + /// Note: For EIP-4844 blob transactions additional constraints are enforced: + /// - new blob transactions must not have any nonce gaps + /// - blob transactions cannot go into overdraft pub(crate) fn insert_tx( &mut self, transaction: ValidPoolTransaction, @@ -1130,18 +1177,29 @@ impl AllTransactions { ) -> InsertResult { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); - let transaction = Arc::new(self.ensure_valid(transaction)?); + let mut transaction = self.ensure_valid(transaction)?; + let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); let mut cumulative_cost = U256::ZERO; let mut updates = Vec::new(); + // identifier of the ancestor transaction, will be None if the transaction is the next tx of + // the sender let ancestor = TransactionId::ancestor( transaction.transaction.nonce(), on_chain_nonce, inserted_tx_id.sender, ); + // before attempting to insert a blob transaction, we need to ensure that additional + // constraints are met + if transaction.is_eip4844() { + transaction = + self.ensure_valid_blob_transaction(transaction, on_chain_balance, ancestor)?; + } + let transaction = Arc::new(transaction); + // If there's no ancestor tx then this is the next transaction. if ancestor.is_none() { state.insert(TxState::NO_NONCE_GAPS); @@ -1341,6 +1399,11 @@ pub(crate) enum InsertErr { transaction: Arc>, existing: TxHash, }, + /// Attempted to insert a blob transaction with a nonce gap + BlobTxHasNonceGap { transaction: Arc> }, + /// Attempted to insert a transaction that would overdraft the sender's balance at the time of + /// insertion. + Overdraft { transaction: Arc> }, /// The transactions feeCap is lower than the chain's minimum fee requirement. /// /// See also [`MIN_PROTOCOL_BASE_FEE`] diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index d99100961a87..1a302d14e604 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -285,6 +285,12 @@ impl ValidPoolTransaction { self.origin.is_local() } + /// Whether the transaction is an EIP-4844 blob transaction. + #[inline] + pub fn is_eip4844(&self) -> bool { + self.transaction.is_eip4844() + } + /// The heap allocated size of this transaction. pub(crate) fn size(&self) -> usize { self.transaction.size() From ab0fb4147d16d2bf6c996da2d4e3935e8c66eef9 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:57:14 -0400 Subject: [PATCH 612/722] fix(primitives): map cancun to timestamp fork (#4491) --- crates/primitives/src/chain/spec.rs | 30 +++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 6b4bfffa0d2d..39782cdd3219 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -462,7 +462,7 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { // Block-based hardforks - let hardfork_opts = vec![ + let hardfork_opts = [ (Hardfork::Homestead, genesis.config.homestead_block), (Hardfork::Dao, genesis.config.dao_fork_block), (Hardfork::Tangerine, genesis.config.eip150_block), @@ -494,11 +494,16 @@ impl From for ChainSpec { } // Time-based hardforks - let time_hardforks = genesis - .config - .shanghai_time - .map(|time| (Hardfork::Shanghai, ForkCondition::Timestamp(time))) - .into_iter() + let time_hardfork_opts = [ + (Hardfork::Shanghai, genesis.config.shanghai_time), + (Hardfork::Cancun, genesis.config.cancun_time), + ]; + + let time_hardforks = time_hardfork_opts + .iter() + .filter_map(|(hardfork, opt)| { + opt.map(|time| (*hardfork, ForkCondition::Timestamp(time))) + }) .collect::>(); hardforks.extend(time_hardforks); @@ -1751,4 +1756,17 @@ Post-merge hard forks (timestamp based): .unwrap(); assert_eq!(acc.balance, U256::from(1)); } + + #[test] + fn test_parse_cancun_genesis_json() { + let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0,"cancunTime":4661},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; + let genesis: Genesis = serde_json::from_str(s).unwrap(); + let acc = genesis + .alloc + .get(&"0xaa00000000000000000000000000000000000000".parse::
().unwrap()) + .unwrap(); + assert_eq!(acc.balance, U256::from(1)); + // assert that the cancun time was picked up + assert_eq!(genesis.config.cancun_time, Some(4661)); + } } From ba850169b4e17d313c1567ca1a2b4f981977c2fe Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:57:48 -0400 Subject: [PATCH 613/722] feat(engine): enable v3 endpoints (#4490) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 25 +++++++++++---------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 136e589cd1ee..30fb4826c752 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -472,11 +472,13 @@ where /// See also async fn new_payload_v3( &self, - _payload: ExecutionPayloadV3, - _versioned_hashes: Vec, - _parent_beacon_block_root: H256, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: H256, ) -> RpcResult { - Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); + Ok(EngineApi::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root) + .await?) } /// Handler for `engine_forkchoiceUpdatedV1` @@ -508,10 +510,11 @@ where /// See also async fn fork_choice_updated_v3( &self, - _fork_choice_state: ForkchoiceState, - _payload_attributes: Option, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, ) -> RpcResult { - Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV3"); + Ok(EngineApi::fork_choice_updated_v3(self, fork_choice_state, payload_attributes).await?) } /// Handler for `engine_getPayloadV1` @@ -553,11 +556,9 @@ where /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. - async fn get_payload_v3( - &self, - _payload_id: PayloadId, - ) -> RpcResult { - Err(jsonrpsee_types::error::ErrorCode::MethodNotFound.into()) + async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); + Ok(EngineApi::get_payload_v3(self, payload_id).await?) } /// Handler for `engine_getPayloadBodiesByHashV1` From 82f0fe1a5e6a87a0db3bab810fce44ee7b0e2bd4 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:58:13 -0400 Subject: [PATCH 614/722] fix(rpc-types): do not flatten in ExecutionPayloadV3 (#4492) --- crates/payload/builder/src/payload.rs | 2 +- crates/rpc/rpc-types/src/eth/engine/payload.rs | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 3b5fa7b5a67a..57d0dceff18d 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -98,7 +98,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let BuiltPayload { block, fees, sidecars, .. } = value; ExecutionPayloadEnvelopeV3 { - payload_inner: block.into(), + execution_payload: block.into(), block_value: fees, // From the engine API spec: // diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 3cefc20bf465..6b53c4e84cd2 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -107,8 +107,7 @@ impl ExecutionPayloadEnvelopeV2 { #[serde(rename_all = "camelCase")] pub struct ExecutionPayloadEnvelopeV3 { /// Execution payload V3 - #[serde(flatten)] - pub payload_inner: ExecutionPayloadV3, + pub execution_payload: ExecutionPayloadV3, /// The expected value to be received by the feeRecipient in wei pub block_value: U256, /// The blobs, commitments, and proofs associated with the executed payload. @@ -891,4 +890,12 @@ mod tests { let payload: ExecutionPayloadV3 = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } + + #[test] + fn serde_roundtrip_execution_payload_envelope_v3() { + // pulled from a geth response getPayloadV3 in hive tests + let response = r#"{"executionPayload":{"parentHash":"0xe927a1448525fb5d32cb50ee1408461a945ba6c39bd5cf5621407d500ecc8de9","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x10f8a0830000e8edef6d00cc727ff833f064b1950afd591ae41357f97e543119","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0xe0d8b4521a7da1582a713244ffb6a86aa1726932087386e2dc7973f43fc6cb24","blockNumber":"0x1","gasLimit":"0x2ffbd2","gasUsed":"0x0","timestamp":"0x1235","extraData":"0xd883010d00846765746888676f312e32312e30856c696e7578","baseFeePerGas":"0x342770c0","blockHash":"0x44d0fa5f2f73a938ebb96a2a21679eb8dea3e7b7dd8fd9f35aa756dda8bf0a8a","transactions":[],"withdrawals":[],"blobGasUsed":"0x0","excessBlobGas":"0x0"},"blockValue":"0x0","blobsBundle":{"commitments":[],"proofs":[],"blobs":[]},"shouldOverrideBuilder":false}"#; + let envelope: ExecutionPayloadEnvelopeV3 = serde_json::from_str(response).unwrap(); + assert_eq!(serde_json::to_string(&envelope).unwrap(), response); + } } From 6299c26b56edc245e622aef88914995190f78215 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:38:15 +0200 Subject: [PATCH 615/722] add metrics counter for finished spawned tasks (#4481) --- crates/tasks/src/lib.rs | 20 ++++++++++++++++---- crates/tasks/src/metrics.rs | 23 +++++++++++++++++++++-- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 1d27e1477db6..d4c42b76cfbd 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -22,6 +22,7 @@ use futures_util::{ future::{select, BoxFuture}, pin_mut, Future, FutureExt, TryFutureExt, }; +use metrics::IncCounterOnDrop; use std::{ any::Any, fmt::{Display, Formatter}, @@ -271,9 +272,16 @@ impl TaskExecutor { { let on_shutdown = self.on_shutdown.clone(); - let task = async move { - pin_mut!(fut); - let _ = select(on_shutdown, fut).await; + // Clone only the specific counter that we need. + let finished_regular_tasks_metrics = self.metrics.finished_regular_tasks.clone(); + // Wrap the original future to increment the finished tasks counter upon completion + let task = { + async move { + // Create an instance of IncCounterOnDrop with the counter to increment + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_regular_tasks_metrics); + pin_mut!(fut); + let _ = select(on_shutdown, fut).await; + } } .in_current_span(); @@ -341,7 +349,11 @@ impl TaskExecutor { }) .in_current_span(); + // Clone only the specific counter that we need. + let finished_critical_tasks_metrics = self.metrics.finished_critical_tasks.clone(); let task = async move { + // Create an instance of IncCounterOnDrop with the counter to increment + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_metrics); pin_mut!(task); let _ = select(on_shutdown, task).await; }; @@ -403,7 +415,7 @@ impl TaskExecutor { impl TaskSpawner for TaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { - self.metrics.inc_regular_task(); + self.metrics.inc_regular_tasks(); self.spawn(fut) } diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index 5fa6c252f657..08a557d8f544 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -7,9 +7,12 @@ use reth_metrics::{metrics::Counter, Metrics}; pub struct TaskExecutorMetrics { /// Number of spawned critical tasks pub(crate) critical_tasks: Counter, - + /// Number of finished spawned critical tasks + pub(crate) finished_critical_tasks: Counter, /// Number of spawned regular tasks pub(crate) regular_tasks: Counter, + /// Number of finished spawned regular tasks + pub(crate) finished_regular_tasks: Counter, } impl TaskExecutorMetrics { @@ -17,7 +20,23 @@ impl TaskExecutorMetrics { self.critical_tasks.increment(1); } - pub(crate) fn inc_regular_task(&self) { + pub(crate) fn inc_regular_tasks(&self) { self.regular_tasks.increment(1); } } + +/// Helper type for increasing counters even if a task fails. +pub struct IncCounterOnDrop(Counter); + +impl IncCounterOnDrop { + /// Create a new `IncCounterOnDrop`. + pub fn new(counter: Counter) -> Self { + IncCounterOnDrop(counter) + } +} + +impl Drop for IncCounterOnDrop { + fn drop(&mut self) { + self.0.increment(1); + } +} From 422d930914ed193b3e217a1ffdd390f90e28290f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 6 Sep 2023 08:48:15 -0400 Subject: [PATCH 616/722] feat: support blobs in eth_sendRawTransaction (#4495) --- crates/net/eth-wire/testdata/rpc_blob_transaction | 1 + crates/net/eth-wire/tests/pooled_transactions.rs | 12 +++++++++++- crates/primitives/src/transaction/pooled.rs | 10 +++++++++- crates/rpc/rpc/src/eth/api/transactions.rs | 4 ++-- crates/rpc/rpc/src/eth/utils.rs | 14 ++++++++------ crates/rpc/rpc/src/trace.rs | 2 +- 6 files changed, 32 insertions(+), 11 deletions(-) create mode 100644 crates/net/eth-wire/testdata/rpc_blob_transaction diff --git a/crates/net/eth-wire/testdata/rpc_blob_transaction b/crates/net/eth-wire/testdata/rpc_blob_transaction new file mode 100644 index 000000000000..36232f1c1fb5 --- /dev/null +++ b/crates/net/eth-wire/testdata/rpc_blob_transaction @@ -0,0 +1 @@ +03fa0200fdf88f0780843b9aca008506fc23ac00830186a09400000000000000000000000000000000000001008080c001e1a0010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c44401401a0840650aa8f74d2b07f40067dc33b715078d73422f01da17abdbd11e02bbdfda9a04b2260f6022bf53eadb337b3e59514936f7317d872defb891a708ee279bdca90fa020004ba0200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f1b0c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 66dbdd001985..a204a93e8cfb 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -1,6 +1,6 @@ //! Decoding tests for [`PooledTransactions`] use reth_eth_wire::PooledTransactions; -use reth_primitives::{hex, PooledTransactionsElement}; +use reth_primitives::{hex, Bytes, PooledTransactionsElement}; use reth_rlp::Decodable; use std::{fs, path::PathBuf}; @@ -21,3 +21,13 @@ fn decode_blob_transaction_data() { let hex_data = hex::decode(data.trim()).unwrap(); let _txs = PooledTransactionsElement::decode(&mut &hex_data[..]).unwrap(); } + +#[test] +fn decode_blob_rpc_transaction() { + // test data pulled from hive test that sends blob transactions + let network_data_path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc_blob_transaction"); + let data = fs::read_to_string(network_data_path).expect("Unable to read file"); + let hex_data = Bytes::from(hex::decode(data.trim()).unwrap()); + let _txs = PooledTransactionsElement::decode_enveloped(hex_data).unwrap(); +} diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index e34d05725338..8c5df265750e 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -394,7 +394,7 @@ impl PooledTransactionsElementEcRecovered { self.transaction } - /// Transform back to [`PooledTransactionsElement`] + /// Transform back to [`TransactionSignedEcRecovered`] pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { let (tx, signer) = self.into_components(); tx.into_ecrecovered_transaction(signer) @@ -414,3 +414,11 @@ impl PooledTransactionsElementEcRecovered { Self { transaction, signer } } } + +impl From for PooledTransactionsElementEcRecovered { + fn from(tx: TransactionSignedEcRecovered) -> Self { + let signer = tx.signer; + let transaction = tx.signed_transaction.into(); + Self { transaction, signer } + } +} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index c5b07d7ab67d..7d2a0e6adb77 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -14,7 +14,7 @@ use crate::{ use async_trait::async_trait; use reth_network_api::NetworkInfo; use reth_primitives::{ - Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredTransaction, Header, + Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, IntoRecoveredTransaction, Receipt, SealedBlock, TransactionKind::{Call, Create}, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, H256, U128, U256, U64, @@ -504,7 +504,7 @@ where let recovered = signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; - let pool_transaction = ::from_recovered_transaction(recovered); + let pool_transaction = ::from_recovered_transaction(recovered.into()); // submit the transaction to the pool with a `Local` origin let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; diff --git a/crates/rpc/rpc/src/eth/utils.rs b/crates/rpc/rpc/src/eth/utils.rs index f4a149514877..9aa1fb752635 100644 --- a/crates/rpc/rpc/src/eth/utils.rs +++ b/crates/rpc/rpc/src/eth/utils.rs @@ -1,18 +1,20 @@ //! Commonly used code snippets use crate::eth::error::{EthApiError, EthResult}; -use reth_primitives::{Bytes, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{Bytes, PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -/// Recovers a [TransactionSignedEcRecovered] from an enveloped encoded byte stream. +/// Recovers a [PooledTransactionsElementEcRecovered] from an enveloped encoded byte stream. /// -/// See [TransactionSigned::decode_enveloped] -pub(crate) fn recover_raw_transaction(data: Bytes) -> EthResult { +/// See [PooledTransactionsElement::decode_enveloped] +pub(crate) fn recover_raw_transaction( + data: Bytes, +) -> EthResult { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } - let transaction = TransactionSigned::decode_enveloped(data) + let transaction = PooledTransactionsElement::decode_enveloped(data) .map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; - transaction.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature) + transaction.try_into_ecrecovered().or(Err(EthApiError::InvalidTransactionSignature)) } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 494741097c7b..bca7059b752c 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -110,7 +110,7 @@ where .eth_api .evm_env_at(block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest))) .await?; - let tx = tx_env_with_recovered(&tx); + let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); let env = Env { cfg, block, tx }; let config = tracing_config(&trace_types); From e6f933d53cbfe7440070529cd162d6a1b52d99c9 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 6 Sep 2023 18:06:18 -0400 Subject: [PATCH 617/722] chore: remove outdated TODOs (#4498) --- crates/payload/builder/src/payload.rs | 4 ---- crates/rpc/rpc-engine-api/src/engine_api.rs | 1 - 2 files changed, 5 deletions(-) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 57d0dceff18d..65e7b0ad2fa0 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -81,10 +81,6 @@ impl From for ExecutionPayloadV1 { } // V2 engine_getPayloadV2 response -// TODO(rjected): we could improve this by wrapping envelope / payload types by version, so we can -// have explicitly versioned return types for getPayload. Then BuiltPayload could essentially be a -// builder for those types, and it would not be possible to e.g. return cancun fields for a -// pre-cancun endpoint. impl From for ExecutionPayloadEnvelopeV2 { fn from(value: BuiltPayload) -> Self { let BuiltPayload { block, fees, .. } = value; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 30fb4826c752..2290bc321650 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -104,7 +104,6 @@ where let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; - // TODO: validate versioned hashes and figure out what to do with parent_beacon_block_root Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) } From 685148117aab83d4b38f2e05adb8c6c96943f7db Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Sep 2023 00:15:16 +0200 Subject: [PATCH 618/722] chore: bump c-kzg and add serde feature (#4499) --- Cargo.lock | 22 ++++++++++++---------- bin/reth/src/node/mod.rs | 2 +- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/constants/eip4844.rs | 2 +- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db688335d5d7..0a5efd77de0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -602,36 +602,37 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.64.0" -source = "git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66#0de11f0a521611ac8738b7b01d19dddaf3899e66" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", "lazycell", - "log", "peeking_take_while", + "prettyplease", "proc-macro2 1.0.66", "quote 1.0.33", "regex", "rustc-hash", "shlex", "syn 2.0.29", - "which", ] [[package]] name = "bindgen" -version = "0.65.1" +version = "0.66.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.0", "cexpr", "clang-sys", "lazy_static", "lazycell", + "log", "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", @@ -640,6 +641,7 @@ dependencies = [ "rustc-hash", "shlex", "syn 2.0.29", + "which", ] [[package]] @@ -934,9 +936,9 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#d35b0f3854ab114b48daa9b504f6ee085c61508a" +source = "git+https://github.com/ethereum/c-kzg-4844#f5f6f863d475847876a2bd5ee252058d37c3a15d" dependencies = [ - "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", + "bindgen 0.66.1", "blst", "cc", "glob", @@ -3737,7 +3739,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b18cbf29f8ff3542ba22bdce9ac610fcb75d74bb4e2b306b2a2762242025b4f" dependencies = [ - "bindgen 0.64.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bindgen 0.64.0", "errno 0.2.8", "libc", ] diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index cd42ee81fb6a..6f66d56772d7 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -598,7 +598,7 @@ impl NodeCommand { /// `MAINNET_KZG_TRUSTED_SETUP`. fn kzg_settings(&self) -> eyre::Result> { if let Some(ref trusted_setup_file) = self.trusted_setup_file { - let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file.into()) + let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file) .map_err(LoadKzgSettingsError::KzgError)?; Ok(Arc::new(trusted_setup)) } else { diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index d450c419d3ce..c0601bd3dd25 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -32,7 +32,7 @@ secp256k1 = { workspace = true, default-features = false, features = [ ] } # for eip-4844 -c-kzg = { workspace = true } +c-kzg = { workspace = true, features = ["serde"] } # used for forkid crc = "3" diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 751ddb7390c6..a484fd49d42b 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -51,7 +51,7 @@ pub static MAINNET_KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { pub fn load_trusted_setup_from_bytes(bytes: &[u8]) -> Result { let mut file = tempfile::NamedTempFile::new().map_err(LoadKzgSettingsError::TempFileErr)?; file.write_all(bytes).map_err(LoadKzgSettingsError::TempFileErr)?; - KzgSettings::load_trusted_setup_file(file.path().into()).map_err(LoadKzgSettingsError::KzgError) + KzgSettings::load_trusted_setup_file(file.path()).map_err(LoadKzgSettingsError::KzgError) } /// Error type for loading the trusted setup. From 59bffd411202ad7c0869868f50c2fdb500d9be97 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 7 Sep 2023 06:57:31 -0400 Subject: [PATCH 619/722] fix: filter out pre-genesis timestamp forks (#4501) --- crates/net/eth-wire/src/ethstream.rs | 8 +-- crates/primitives/src/chain/spec.rs | 91 +++++++++++++++++++++++----- crates/primitives/src/forkid.rs | 12 +++- 3 files changed, 91 insertions(+), 20 deletions(-) diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 70472e9e9fbd..3c06b10d4cae 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -333,7 +333,7 @@ mod tests { #[tokio::test] async fn can_handshake() { let genesis = H256::random(); - let fork_filter = ForkFilter::new(Head::default(), genesis, Vec::new()); + let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { version: EthVersion::Eth67 as u8, @@ -380,7 +380,7 @@ mod tests { #[tokio::test] async fn pass_handshake_on_low_td_bitlen() { let genesis = H256::random(); - let fork_filter = ForkFilter::new(Head::default(), genesis, Vec::new()); + let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { version: EthVersion::Eth67 as u8, @@ -427,7 +427,7 @@ mod tests { #[tokio::test] async fn fail_handshake_on_high_td_bitlen() { let genesis = H256::random(); - let fork_filter = ForkFilter::new(Head::default(), genesis, Vec::new()); + let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { version: EthVersion::Eth67 as u8, @@ -568,7 +568,7 @@ mod tests { ); let genesis = H256::random(); - let fork_filter = ForkFilter::new(Head::default(), genesis, Vec::new()); + let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { version: EthVersion::Eth67 as u8, diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 39782cdd3219..f73bcc98306a 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -339,6 +339,11 @@ impl ChainSpec { } } + /// Get the timestamp of the genesis block. + pub fn genesis_timestamp(&self) -> u64 { + self.genesis.timestamp + } + /// Returns the final total difficulty if the given block number is after the Paris hardfork. /// /// Note: technically this would also be valid for the block before the paris upgrade, but this @@ -405,7 +410,7 @@ impl ChainSpec { }) }); - ForkFilter::new(head, self.genesis_hash(), forks) + ForkFilter::new(head, self.genesis_hash(), self.genesis_timestamp(), forks) } /// Compute the [`ForkId`] for the given [`Head`] folowing eip-6122 spec @@ -434,19 +439,22 @@ impl ChainSpec { } // timestamp are ALWAYS applied after the merge. - for (_, cond) in self.forks_iter() { - if let ForkCondition::Timestamp(timestamp) = cond { - if cond.active_at_head(head) { - if timestamp != current_applied { - forkhash += timestamp; - current_applied = timestamp; - } - } else { - // can safely return here because we have already handled all block forks and - // have handled all active timestamp forks, and set the next value to the - // timestamp that is known but not active yet - return ForkId { hash: forkhash, next: timestamp } + // + // this filter ensures that no block-based forks are returned + for timestamp in self.forks_iter().filter_map(|(_, cond)| { + cond.as_timestamp().filter(|time| time > &self.genesis.timestamp) + }) { + let cond = ForkCondition::Timestamp(timestamp); + if cond.active_at_head(head) { + if timestamp != current_applied { + forkhash += timestamp; + current_applied = timestamp; } + } else { + // can safely return here because we have already handled all block forks and + // have handled all active timestamp forks, and set the next value to the + // timestamp that is known but not active yet + return ForkId { hash: forkhash, next: timestamp } } } @@ -594,7 +602,7 @@ impl From for ChainSpec { } /// A helper to build custom chain specs -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub struct ChainSpecBuilder { chain: Option, genesis: Option, @@ -1493,6 +1501,61 @@ Post-merge hard forks (timestamp based): ); } + /// Constructs a [ChainSpec] with the given [ChainSpecBuilder], shanghai, and cancun fork + /// timestamps. + fn construct_chainspec( + builder: ChainSpecBuilder, + shanghai_time: u64, + cancun_time: u64, + ) -> ChainSpec { + builder + .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(shanghai_time)) + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(cancun_time)) + .build() + } + + /// Tests that time-based forks which are active at genesis are not included in forkid hash. + /// + /// This is based off of the test vectors here: + /// + #[test] + fn test_timestamp_fork_in_genesis() { + let timestamp = 1690475657u64; + let default_spec_builder = ChainSpecBuilder::default() + .chain(Chain::Id(1337)) + .genesis(Genesis::default().with_timestamp(timestamp)) + .paris_activated(); + + // test format: (chain spec, expected next value) - the forkhash will be determined by the + // genesis hash of the constructed chainspec + let tests = [ + ( + construct_chainspec(default_spec_builder.clone(), timestamp - 1, timestamp + 1), + timestamp + 1, + ), + ( + construct_chainspec(default_spec_builder.clone(), timestamp, timestamp + 1), + timestamp + 1, + ), + ( + construct_chainspec(default_spec_builder.clone(), timestamp + 1, timestamp + 2), + timestamp + 1, + ), + ]; + + for (spec, expected_timestamp) in tests { + let got_forkid = spec.fork_id(&Head { number: 0, timestamp: 0, ..Default::default() }); + // This is slightly different from the geth test because we use the shanghai timestamp + // to determine whether or not to include a withdrawals root in the genesis header. + // This makes the genesis hash different, and as a result makes the ChainSpec fork hash + // different. + let genesis_hash = spec.genesis_hash(); + let expected_forkid = + ForkId { hash: ForkHash::from(genesis_hash), next: expected_timestamp }; + assert_eq!(got_forkid, expected_forkid); + } + } + /// Checks that the fork is not active at a terminal ttd block. #[test] fn check_terminal_ttd() { diff --git a/crates/primitives/src/forkid.rs b/crates/primitives/src/forkid.rs index a737e8430fc3..5132a084e9f6 100644 --- a/crates/primitives/src/forkid.rs +++ b/crates/primitives/src/forkid.rs @@ -165,6 +165,7 @@ pub struct ForkFilter { /// [eip-6122]: https://eips.ethereum.org/EIPS/eip-6122 forks: BTreeMap, + /// The current head, used to select forks that are active locally. head: Head, cache: Cache, @@ -173,17 +174,22 @@ pub struct ForkFilter { impl ForkFilter { /// Create the filter from provided head, genesis block hash, past forks and expected future /// forks. - pub fn new(head: Head, genesis: H256, forks: F) -> Self + pub fn new(head: Head, genesis_hash: H256, genesis_timestamp: u64, forks: F) -> Self where F: IntoIterator, { - let genesis_fork_hash = ForkHash::from(genesis); + let genesis_fork_hash = ForkHash::from(genesis_hash); let mut forks = forks.into_iter().collect::>(); forks.remove(&ForkFilterKey::Time(0)); forks.remove(&ForkFilterKey::Block(0)); let forks = forks .into_iter() + // filter out forks that are pre-genesis by timestamp + .filter(|key| match key { + ForkFilterKey::Block(_) => true, + ForkFilterKey::Time(time) => *time > genesis_timestamp, + }) .fold( (BTreeMap::from([(ForkFilterKey::Block(0), genesis_fork_hash)]), genesis_fork_hash), |(mut acc, base_hash), key| { @@ -395,6 +401,7 @@ mod tests { let mut filter = ForkFilter::new( Head { number: 0, ..Default::default() }, GENESIS_HASH, + 0, vec![ ForkFilterKey::Block(1_150_000), ForkFilterKey::Block(1_920_000), @@ -568,6 +575,7 @@ mod tests { let mut fork_filter = ForkFilter::new( Head { number: 0, ..Default::default() }, GENESIS_HASH, + 0, vec![ForkFilterKey::Block(b1), ForkFilterKey::Block(b2)], ); From 41b4f46ae1bba898c76a7ac4cf59b991fb18cd0b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Sep 2023 14:17:18 +0200 Subject: [PATCH 620/722] fix: skip optional fields in parity trace (#4503) --- crates/rpc/rpc-types/src/eth/trace/parity.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index ecb73c232c5e..64a4ba7c6c47 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -267,14 +267,18 @@ pub struct LocalizedTransactionTrace { /// Hash of the block, if not pending /// /// Note: this deviates from which always returns a block number + #[serde(skip_serializing_if = "Option::is_none")] pub block_hash: Option, /// Block number the transaction is included in, None if pending. /// /// Note: this deviates from which always returns a block number + #[serde(skip_serializing_if = "Option::is_none")] pub block_number: Option, /// Hash of the transaction + #[serde(skip_serializing_if = "Option::is_none")] pub transaction_hash: Option, /// Transaction index within the block, None if pending. + #[serde(skip_serializing_if = "Option::is_none")] pub transaction_position: Option, } From 548d7f16364b988e8b36fbcfa62bb717e9e8da5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Sep 2023 14:20:40 +0200 Subject: [PATCH 621/722] chore: add is_empty check before read lock (#4507) --- crates/transaction-pool/src/pool/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 9eb741a6a941..0b53af5658fb 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -660,6 +660,9 @@ where /// Removes all transactions that are present in the pool. pub(crate) fn retain_unknown(&self, hashes: &mut Vec) { + if hashes.is_empty() { + return + } let pool = self.pool.read(); hashes.retain(|tx| !pool.contains(tx)) } From 00bebfd64d3b743bfc42c734390697111d305a8d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Sep 2023 14:21:00 +0200 Subject: [PATCH 622/722] perf: small access list perf (#4505) --- .../primitives/src/transaction/access_list.rs | 20 ++++++++++--------- .../revm/revm-inspectors/src/access_list.rs | 4 ++-- crates/rpc/rpc/src/eth/api/call.rs | 5 +++-- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/crates/primitives/src/transaction/access_list.rs b/crates/primitives/src/transaction/access_list.rs index a86b33b69eb8..11b119898a53 100644 --- a/crates/primitives/src/transaction/access_list.rs +++ b/crates/primitives/src/transaction/access_list.rs @@ -48,15 +48,17 @@ pub struct AccessList( impl AccessList { /// Converts the list into a vec, expected by revm pub fn flattened(self) -> Vec<(Address, Vec)> { - self.0 - .into_iter() - .map(|item| { - ( - item.address, - item.storage_keys.into_iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), - ) - }) - .collect() + self.flatten().collect() + } + + /// Returns an iterator over the list's addresses and storage keys. + pub fn flatten(self) -> impl Iterator)> { + self.0.into_iter().map(|item| { + ( + item.address, + item.storage_keys.into_iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), + ) + }) } /// Calculates a heuristic for the in-memory size of the [AccessList]. diff --git a/crates/revm/revm-inspectors/src/access_list.rs b/crates/revm/revm-inspectors/src/access_list.rs index 41faf88a2b0f..bc9d641d7121 100644 --- a/crates/revm/revm-inspectors/src/access_list.rs +++ b/crates/revm/revm-inspectors/src/access_list.rs @@ -31,8 +31,8 @@ impl AccessListInspector { excluded: [from, to].iter().chain(precompiles.iter()).copied().collect(), access_list: access_list .0 - .iter() - .map(|v| (v.address, v.storage_keys.iter().copied().collect())) + .into_iter() + .map(|v| (v.address, v.storage_keys.into_iter().collect())) .collect(), } } diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 6bf638cf5a6d..6f27d1a3b338 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -340,7 +340,7 @@ where pub(crate) async fn create_access_list_at( &self, - request: CallRequest, + mut request: CallRequest, at: Option, ) -> EthResult { let block_id = at.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); @@ -373,7 +373,8 @@ where get_contract_address(from, nonce).into() }; - let initial = request.access_list.clone().unwrap_or_default(); + // can consume the list since we're not using the request anymore + let initial = request.access_list.take().unwrap_or_default(); let precompiles = get_precompiles(&env.cfg.spec_id); let mut inspector = AccessListInspector::new(initial, from, to, precompiles); From f490f558c1e7c538f1b6ea4fb78ea6e34c61c1f0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 7 Sep 2023 08:25:39 -0400 Subject: [PATCH 623/722] fix(primitives): set cancun header fields if active at genesis (#4500) --- crates/primitives/src/chain/spec.rs | 74 ++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index f73bcc98306a..15bc96bf269e 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -1,13 +1,13 @@ use crate::{ constants::{ EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, - EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, + EIP1559_INITIAL_BASE_FEE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, EMPTY_WITHDRAWALS, }, forkid::ForkFilterKey, header::Head, proofs::genesis_state_root, Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, - PruneBatchSizes, SealedHeader, H160, H256, U256, + PruneBatchSizes, SealedHeader, EMPTY_OMMER_ROOT, H160, H256, U256, }; use hex_literal::hex; use once_cell::sync::Lazy; @@ -304,7 +304,24 @@ impl ChainSpec { (self.fork(Hardfork::Shanghai).active_at_timestamp(self.genesis.timestamp)) .then_some(EMPTY_WITHDRAWALS); + // If Cancun is activated at genesis, we set: + // * parent beacon block root to 0x0 + // * blob gas used to 0x0 + // * excess blob gas to 0x0 + let (parent_beacon_block_root, blob_gas_used, excess_blob_gas) = + if self.fork(Hardfork::Cancun).active_at_timestamp(self.genesis.timestamp) { + (Some(H256::zero()), Some(0), Some(0)) + } else { + (None, None, None) + }; + Header { + parent_hash: H256::zero(), + number: 0, + transactions_root: EMPTY_TRANSACTIONS, + ommers_hash: EMPTY_OMMER_ROOT, + receipts_root: EMPTY_RECEIPTS, + logs_bloom: Default::default(), gas_limit: self.genesis.gas_limit, difficulty: self.genesis.difficulty, nonce: self.genesis.nonce, @@ -313,9 +330,12 @@ impl ChainSpec { timestamp: self.genesis.timestamp, mix_hash: self.genesis.mix_hash, beneficiary: self.genesis.coinbase, + gas_used: Default::default(), base_fee_per_gas, withdrawals_root, - ..Default::default() + parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, } } @@ -728,7 +748,7 @@ impl ChainSpecBuilder { /// Enable Cancun at genesis. pub fn cancun_activated(mut self) -> Self { - self = self.paris_activated(); + self = self.shanghai_activated(); self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); self } @@ -1061,13 +1081,15 @@ impl DepositContract { #[cfg(test)] mod tests { use crate::{ - Address, AllGenesisFormats, Chain, ChainSpec, ChainSpecBuilder, DisplayHardforks, - ForkCondition, ForkHash, ForkId, Genesis, Hardfork, Head, DEV, GOERLI, H256, MAINNET, - SEPOLIA, U256, + constants::EMPTY_WITHDRAWALS, Address, AllGenesisFormats, Chain, ChainSpec, + ChainSpecBuilder, DisplayHardforks, ForkCondition, ForkHash, ForkId, Genesis, Hardfork, + Head, DEV, GOERLI, H256, MAINNET, SEPOLIA, U256, }; use bytes::BytesMut; use ethers_core::types as EtherType; use reth_rlp::Encodable; + use std::str::FromStr; + fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) { for (block, expected_id) in cases { let computed_id = spec.fork_id(block); @@ -1832,4 +1854,42 @@ Post-merge hard forks (timestamp based): // assert that the cancun time was picked up assert_eq!(genesis.config.cancun_time, Some(4661)); } + + #[test] + fn test_default_cancun_header_forkhash() { + // set the gas limit from the hive test genesis according to the hash + let genesis = Genesis { gas_limit: 0x2fefd8u64, ..Default::default() }; + let default_chainspec = ChainSpecBuilder::default() + .chain(Chain::Id(1337)) + .genesis(genesis) + .cancun_activated() + .build(); + let mut header = default_chainspec.genesis_header(); + + // set the state root to the same as in the hive test the hash was pulled from + header.state_root = + H256::from_str("0x62e2595e017f0ca23e08d17221010721a71c3ae932f4ea3cb12117786bb392d4") + .unwrap(); + + // shanghai is activated so we should have a withdrawals root + assert_eq!(header.withdrawals_root, Some(EMPTY_WITHDRAWALS)); + + // cancun is activated so we should have a zero parent beacon block root, zero blob gas + // used, and zero excess blob gas + assert_eq!(header.parent_beacon_block_root, Some(H256::zero())); + assert_eq!(header.blob_gas_used, Some(0)); + assert_eq!(header.excess_blob_gas, Some(0)); + println!("header: {:?}", header); + + // check the genesis hash + let genesis_hash = header.hash_slow(); + let expected_hash = H256::from(hex_literal::hex!( + "16bb7c59613a5bad3f7c04a852fd056545ade2483968d9a25a1abb05af0c4d37" + )); + assert_eq!(genesis_hash, expected_hash); + + // check that the forkhash is correct + let expected_forkhash = ForkHash(hex_literal::hex!("8062457a")); + assert_eq!(ForkHash::from(genesis_hash), expected_forkhash); + } } From 82a5ffa4ac6eeac3e475ffc5f043a968ca186660 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 7 Sep 2023 15:52:44 +0100 Subject: [PATCH 624/722] fix(tasks): use `crate::metrics` to avoid ambiguity (#4509) --- crates/tasks/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index d4c42b76cfbd..86fa5a408eb6 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -14,7 +14,7 @@ //! reth task management use crate::{ - metrics::TaskExecutorMetrics, + metrics::{IncCounterOnDrop, TaskExecutorMetrics}, shutdown::{signal, Shutdown, Signal}, }; use dyn_clone::DynClone; @@ -22,7 +22,6 @@ use futures_util::{ future::{select, BoxFuture}, pin_mut, Future, FutureExt, TryFutureExt, }; -use metrics::IncCounterOnDrop; use std::{ any::Any, fmt::{Display, Formatter}, From 8d29a49dd8f0edb5d65b2823c28869608b666326 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 7 Sep 2023 18:44:43 +0100 Subject: [PATCH 625/722] chore(ci): cargo check (#4511) --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f97709127425..12589dfeee94 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,12 @@ jobs: with: cache-on-failure: true + - name: cargo check + uses: actions-rs/cargo@v1 + with: + command: check + args: --all --all-features + - name: cargo fmt uses: actions-rs/cargo@v1 with: From f31706dd9bd4c013dabd8e82d1ccd211ff7d85cc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 7 Sep 2023 15:03:37 -0400 Subject: [PATCH 626/722] feat: add base fee and blob fields to genesis (#4516) --- crates/primitives/src/chain/spec.rs | 16 +++++++--- crates/primitives/src/genesis.rs | 49 +++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 15bc96bf269e..7b4e94d2d9b6 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -306,11 +306,13 @@ impl ChainSpec { // If Cancun is activated at genesis, we set: // * parent beacon block root to 0x0 - // * blob gas used to 0x0 - // * excess blob gas to 0x0 + // * blob gas used to provided genesis or 0x0 + // * excess blob gas to provided genesis or 0x0 let (parent_beacon_block_root, blob_gas_used, excess_blob_gas) = if self.fork(Hardfork::Cancun).active_at_timestamp(self.genesis.timestamp) { - (Some(H256::zero()), Some(0), Some(0)) + let blob_gas_used = self.genesis.blob_gas_used.unwrap_or(0); + let excess_blob_gas = self.genesis.excess_blob_gas.unwrap_or(0); + (Some(H256::zero()), Some(blob_gas_used), Some(excess_blob_gas)) } else { (None, None, None) }; @@ -346,8 +348,11 @@ impl ChainSpec { /// Get the initial base fee of the genesis block. pub fn initial_base_fee(&self) -> Option { + // If the base fee is set in the genesis block, we use that instead of the default. + let genesis_base_fee = self.genesis.base_fee_per_gas.unwrap_or(EIP1559_INITIAL_BASE_FEE); + // If London is activated at genesis, we set the initial base fee as per EIP-1559. - (self.fork(Hardfork::London).active_at_block(0)).then_some(EIP1559_INITIAL_BASE_FEE) + (self.fork(Hardfork::London).active_at_block(0)).then_some(genesis_base_fee) } /// Get the hash of the genesis block. @@ -1833,13 +1838,14 @@ Post-merge hard forks (timestamp based): #[test] fn test_parse_genesis_json() { - let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x3b9aca00"}"#; + let s = r#"{"config":{"ethash":{},"chainId":1337,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"shanghaiTime":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x","gasLimit":"0x4c4b40","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"658bdf435d810c91414ec09147daa6db62406379":{"balance":"0x487a9a304539440000"},"aa00000000000000000000000000000000000000":{"code":"0x6042","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x1","nonce":"0x1"},"bb00000000000000000000000000000000000000":{"code":"0x600154600354","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000000","0x0100000000000000000000000000000000000000000000000000000000000000":"0x0100000000000000000000000000000000000000000000000000000000000000","0x0200000000000000000000000000000000000000000000000000000000000000":"0x0200000000000000000000000000000000000000000000000000000000000000","0x0300000000000000000000000000000000000000000000000000000000000000":"0x0000000000000000000000000000000000000000000000000000000000000303"},"balance":"0x2","nonce":"0x1"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":"0x1337"}"#; let genesis: Genesis = serde_json::from_str(s).unwrap(); let acc = genesis .alloc .get(&"0xaa00000000000000000000000000000000000000".parse::
().unwrap()) .unwrap(); assert_eq!(acc.balance, U256::from(1)); + assert_eq!(genesis.base_fee_per_gas, Some(0x1337)); } #[test] diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 0dd2e4eaf384..97aa9baf0cc6 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -38,6 +38,30 @@ pub struct Genesis { pub coinbase: Address, /// The initial state of accounts in the genesis block. pub alloc: HashMap, + // NOTE: the following fields: + // * base_fee_per_gas + // * excess_blob_gas + // * blob_gas_used + // should NOT be set in a real genesis file, but are included here for compatibility with + // consensus tests, which have genesis files with these fields populated. + /// The genesis header base fee + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_stringified_u64_opt" + )] + pub base_fee_per_gas: Option, + /// The genesis header excess blob gas + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_stringified_u64_opt" + )] + pub excess_blob_gas: Option, + /// The genesis header blob gas used + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "deserialize_stringified_u64_opt" + )] + pub blob_gas_used: Option, } impl Genesis { @@ -83,6 +107,24 @@ impl Genesis { self } + /// Set the base fee. + pub fn with_base_fee(mut self, base_fee: Option) -> Self { + self.base_fee_per_gas = base_fee; + self + } + + /// Set the excess blob gas. + pub fn with_excess_blob_gas(mut self, excess_blob_gas: Option) -> Self { + self.excess_blob_gas = excess_blob_gas; + self + } + + /// Set the blob gas used. + pub fn with_blob_gas_used(mut self, blob_gas_used: Option) -> Self { + self.blob_gas_used = blob_gas_used; + self + } + /// Add accounts to the genesis block. If the address is already present, /// the account is updated. pub fn extend_accounts( @@ -413,6 +455,10 @@ mod ethers_compat { mix_hash: genesis.mix_hash.0.into(), coinbase: genesis.coinbase.0.into(), extra_data: genesis.extra_data.0.into(), + base_fee_per_gas: genesis.base_fee_per_gas.map(|fee| fee.as_u64()), + // TODO: if/when ethers has cancun fields they should be added here + excess_blob_gas: None, + blob_gas_used: None, alloc, } } @@ -1126,6 +1172,9 @@ mod tests { timestamp: 0x123456, extra_data: Bytes::from_str("0xfafbfcfd").unwrap(), gas_limit: 0x2fefd8, + base_fee_per_gas: None, + excess_blob_gas: None, + blob_gas_used: None, alloc: HashMap::from_iter(vec![ ( Address::from_str("0xdbdbdb2cbd23b783741e8d7fcf51e459b497e4a6").unwrap(), From 1ed5ae14bfadeee3dd542dda1bd2dfd65e6ed5ee Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Sep 2023 22:05:32 +0200 Subject: [PATCH 627/722] fix: ensure only canonical state is returned if requested by number/hash (#4517) --- crates/rpc/rpc/src/eth/api/mod.rs | 2 + crates/storage/provider/src/providers/mod.rs | 59 -------------------- crates/storage/provider/src/traits/state.rs | 15 +++-- 3 files changed, 13 insertions(+), 63 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index e09c84969f62..35bd40637735 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -200,6 +200,8 @@ where BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, { /// Returns the state at the given [BlockId] enum. + /// + /// Note: if not [BlockNumberOrTag::Pending] then this will only return canonical state. See also pub fn state_at_block_id(&self, at: BlockId) -> EthResult> { Ok(self.provider().state_by_block_id(at)?) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 4a930e9c17bf..812606a5d397 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -469,65 +469,6 @@ where self.database.latest() } - /// Returns a [StateProviderBox] indexed by the given [BlockId]. - fn state_by_block_id(&self, block_id: BlockId) -> Result> { - match block_id { - BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), - BlockId::Hash(rpc_block_hash) => { - let block_hash = rpc_block_hash.into(); - let mut state = self.history_by_block_hash(block_hash); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() && !rpc_block_hash.require_canonical.unwrap_or(false) { - if let Ok(Some(pending)) = self.pending_state_by_hash(block_hash) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - } - } - - /// Returns a [StateProviderBox] indexed by the given block number or tag. - fn state_by_block_number_or_tag( - &self, - number_or_tag: BlockNumberOrTag, - ) -> Result> { - match number_or_tag { - BlockNumberOrTag::Latest => self.latest(), - BlockNumberOrTag::Finalized => { - // we can only get the finalized state by hash, not by num - let hash = match self.finalized_block_hash()? { - Some(hash) => hash, - None => return Err(ProviderError::FinalizedBlockNotFound.into()), - }; - - self.state_by_block_hash(hash) - } - BlockNumberOrTag::Safe => { - // we can only get the safe state by hash, not by num - let hash = match self.safe_block_hash()? { - Some(hash) => hash, - None => return Err(ProviderError::SafeBlockNotFound.into()), - }; - - self.state_by_block_hash(hash) - } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), - BlockNumberOrTag::Pending => self.pending(), - BlockNumberOrTag::Number(num) => { - let mut state = self.history_by_block_number(num); - if state.is_err() && num == self.chain_info.get_canonical_block_number() + 1 { - // we don't have the block on disk yet but the number is the pending block - state = self.pending(); - } - state - } - } - } - fn history_by_block_number(&self, block_number: BlockNumber) -> Result> { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); self.ensure_canonical_block(block_number)?; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 139f2b7261b0..66b66bb9eb72 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -99,6 +99,8 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { fn latest(&self) -> Result>; /// Returns a [StateProvider] indexed by the given [BlockId]. + /// + /// Note: if a number or hash is provided this will only look at historical(canonical) state. fn state_by_block_id(&self, block_id: BlockId) -> Result> { match block_id { BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), @@ -107,6 +109,8 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { } /// Returns a [StateProvider] indexed by the given block number or tag. + /// + /// Note: if a number is provided this will only look at historical(canonical) state. fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -119,8 +123,8 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { Some(hash) => hash, None => return Err(ProviderError::FinalizedBlockNotFound.into()), }; - - self.state_by_block_hash(hash) + // only look at historical state + self.history_by_block_hash(hash) } BlockNumberOrTag::Safe => { // we can only get the safe state by hash, not by num @@ -129,11 +133,14 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { None => return Err(ProviderError::SafeBlockNotFound.into()), }; - self.state_by_block_hash(hash) + self.history_by_block_hash(hash) } BlockNumberOrTag::Earliest => self.history_by_block_number(0), BlockNumberOrTag::Pending => self.pending(), - BlockNumberOrTag::Number(num) => self.history_by_block_number(num), + BlockNumberOrTag::Number(num) => { + // Note: The `BlockchainProvider` could also lookup the tree for the given block number, if for example the block number is `latest + 1`, however this should only support canonical state: + self.history_by_block_number(num) + } } } From 99dada929187b3e8466f40056d9e3c232df16b69 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 8 Sep 2023 07:26:44 -0400 Subject: [PATCH 628/722] fix: check for parent root before cancun (#4524) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 24 ++++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 2290bc321650..a96144c220bb 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -388,16 +388,24 @@ where /// After Cancun, `parentBeaconBlockRoot` field must be [Some]. /// Before Cancun, `parentBeaconBlockRoot` field must be [None]. /// + /// If the engine API message version is V1 or V2, and the payload attribute's timestamp is + /// post-Cancun, then this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// + /// If the engine API message version is V3, but the `parentBeaconBlockRoot` is [None], then + /// this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// /// If the payload attribute's timestamp is before the Cancun fork and the engine API message /// version is V3, then this will return [EngineApiError::UnsupportedFork]. /// - /// If the engine API message version is V1 or V2, and the payload attribute's timestamp is - /// post-Cancun, then this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// This implements the following Engine API spec rules: /// - /// Implements the following Engine API spec rule: + /// 1. Client software **MUST** check that provided set of parameters and their fields strictly + /// matches the expected one and return `-32602: Invalid params` error if this check fails. + /// Any field having `null` value **MUST** be considered as not provided. /// - /// * Client software MUST return `-38005: Unsupported fork` error if the timestamp of the - /// payload does not fall within the time frame of the Cancun fork. + /// 2. Client software **MUST** return `-38005: Unsupported fork` error if the + /// `payloadAttributes` is set and the `payloadAttributes.timestamp` does not fall within the + /// time frame of the Cancun fork. fn validate_parent_beacon_block_root_presence( &self, version: EngineApiMessageVersion, @@ -416,10 +424,10 @@ where } } EngineApiMessageVersion::V3 => { - if !is_cancun { - return Err(EngineApiError::UnsupportedFork) - } else if !has_parent_beacon_block_root { + if !has_parent_beacon_block_root { return Err(EngineApiError::NoParentBeaconBlockRootPostCancun) + } else if !is_cancun { + return Err(EngineApiError::UnsupportedFork) } } }; From 83987420f1ec4090696d3aa328f538256db01b6f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 8 Sep 2023 07:27:33 -0400 Subject: [PATCH 629/722] fix: use `blob_fee` to calculate blob_gas_used in receipts (#4523) --- crates/primitives/src/constants/eip4844.rs | 24 +++++++++---------- crates/primitives/src/header.rs | 6 ++--- crates/primitives/src/transaction/meta.rs | 2 ++ crates/rpc/rpc/src/eth/api/block.rs | 2 ++ crates/rpc/rpc/src/eth/api/transactions.rs | 3 ++- .../src/providers/database/provider.rs | 1 + 6 files changed, 22 insertions(+), 16 deletions(-) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index a484fd49d42b..94d56a1f629c 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -1,6 +1,6 @@ //! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. -use crate::{kzg::KzgSettings, U256}; +use crate::{kzg::KzgSettings, U128}; use once_cell::sync::Lazy; use std::{io::Write, sync::Arc}; @@ -67,11 +67,11 @@ pub enum LoadKzgSettingsError { } /// Calculates the blob fee for the given excess blob gas. -pub fn blob_fee(excess_blob_gas: u64) -> U256 { +pub fn blob_fee(excess_blob_gas: u64) -> U128 { fake_exponential( - U256::from(BLOB_TX_MIN_BLOB_GASPRICE), - U256::from(excess_blob_gas), - U256::from(BLOB_GASPRICE_UPDATE_FRACTION), + U128::from(BLOB_TX_MIN_BLOB_GASPRICE), + U128::from(excess_blob_gas), + U128::from(BLOB_GASPRICE_UPDATE_FRACTION), ) } @@ -80,14 +80,14 @@ pub fn blob_fee(excess_blob_gas: u64) -> U256 { /// This is used to calculate the blob price. /// /// See also -pub fn fake_exponential(factor: U256, numerator: U256, denominator: U256) -> U256 { - let mut output = U256::ZERO; +pub fn fake_exponential(factor: U128, numerator: U128, denominator: U128) -> U128 { + let mut output = U128::ZERO; let mut numerator_accum = factor.saturating_mul(denominator); - let mut i = U256::from(1u64); - while numerator_accum > U256::ZERO { + let mut i = U128::from(1u64); + while numerator_accum > U128::ZERO { output += numerator_accum; numerator_accum = numerator_accum * numerator / (denominator * i); - i += U256::from(1u64); + i += U128::from(1u64); } output / denominator } @@ -121,8 +121,8 @@ mod tests { (2, 5, 2, 23), // approximate 24.36 (1, 50000000, 2225652, 5709098764), ] { - let res = fake_exponential(U256::from(*factor), U256::from(*num), U256::from(*denom)); - assert_eq!(res, U256::from(*expected)); + let res = fake_exponential(U128::from(*factor), U128::from(*num), U128::from(*denom)); + assert_eq!(res, U128::from(*expected)); } } } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 69a1516312b8..3aa72842402e 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -4,7 +4,7 @@ use crate::{ keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, BaseFeeParams, BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, - H64, U256, + H64, U128, U256, }; use bytes::{Buf, BufMut, BytesMut}; @@ -187,7 +187,7 @@ impl Header { /// Returns the blob fee for _this_ block according to the EIP-4844 spec. /// /// Returns `None` if `excess_blob_gas` is None - pub fn blob_fee(&self) -> Option { + pub fn blob_fee(&self) -> Option { self.excess_blob_gas.map(blob_fee) } @@ -196,7 +196,7 @@ impl Header { /// Returns `None` if `excess_blob_gas` is None. /// /// See also [Self::next_block_excess_blob_gas] - pub fn next_block_blob_fee(&self) -> Option { + pub fn next_block_blob_fee(&self) -> Option { self.next_block_excess_blob_gas().map(blob_fee) } diff --git a/crates/primitives/src/transaction/meta.rs b/crates/primitives/src/transaction/meta.rs index 810bd8f721b4..10199d827e3b 100644 --- a/crates/primitives/src/transaction/meta.rs +++ b/crates/primitives/src/transaction/meta.rs @@ -13,4 +13,6 @@ pub struct TransactionMeta { pub block_number: u64, /// Base fee of the block. pub base_fee: Option, + /// The excess blob gas of the block. + pub excess_blob_gas: Option, } diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index f575998fc385..b778c4d3891a 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -73,6 +73,7 @@ where let block_number = block.number; let base_fee = block.base_fee_per_gas; let block_hash = block.hash; + let excess_blob_gas = block.excess_blob_gas; let receipts = block .body .into_iter() @@ -85,6 +86,7 @@ where block_hash, block_number, base_fee, + excess_blob_gas, }; build_transaction_receipt_with_block_receipts(tx, meta, receipt, &receipts) }) diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 7d2a0e6adb77..3aa07d973d36 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -14,6 +14,7 @@ use crate::{ use async_trait::async_trait; use reth_network_api::NetworkInfo; use reth_primitives::{ + constants::eip4844::blob_fee, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, IntoRecoveredTransaction, Receipt, SealedBlock, TransactionKind::{Call, Create}, @@ -884,7 +885,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( status_code: if receipt.success { Some(U64::from(1)) } else { Some(U64::from(0)) }, // EIP-4844 fields - blob_gas_price: transaction.transaction.max_fee_per_blob_gas().map(U128::from), + blob_gas_price: meta.excess_blob_gas.map(blob_fee), blob_gas_used: transaction.transaction.blob_gas_used().map(U128::from), }; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index efdf2edbb181..a599ee47f6ff 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1139,6 +1139,7 @@ impl<'this, TX: DbTx<'this>> TransactionsProvider for DatabaseProvider<'this, TX block_hash, block_number, base_fee: header.base_fee_per_gas, + excess_blob_gas: header.excess_blob_gas, }; return Ok(Some((transaction, meta))) From 123c79775501b73fdb5053dc33001dcec83f2cb1 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 8 Sep 2023 07:33:11 -0400 Subject: [PATCH 630/722] fix: enable cancun in transaction validator if active at genesis (#4522) --- crates/transaction-pool/src/validate/eth.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 01f5253b3c52..5eb8cd498c43 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -380,6 +380,9 @@ pub struct EthTransactionValidatorBuilder { impl EthTransactionValidatorBuilder { /// Creates a new builder for the given [ChainSpec] pub fn new(chain_spec: Arc) -> Self { + // If cancun is enabled at genesis, enable it + let cancun = chain_spec.is_cancun_activated_at_timestamp(chain_spec.genesis_timestamp()); + Self { chain_spec, block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, @@ -398,7 +401,7 @@ impl EthTransactionValidatorBuilder { shanghai: true, // TODO: can hard enable by default once mainnet transitioned - cancun: false, + cancun, } } From d0d50a067887ddfd351cf6390fb1aa2560942e5e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 8 Sep 2023 07:34:06 -0400 Subject: [PATCH 631/722] fix: use max_fee_per_blob_gas in blob gas cost calc (#4521) --- crates/transaction-pool/src/traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 71ec7c03a46f..7b4e7c03d279 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -762,7 +762,7 @@ impl EthPooledTransaction { if let Some(blob_tx) = transaction.as_eip4844() { // add max blob cost - cost += U256::from(blob_tx.max_fee_per_gas * blob_tx.blob_gas() as u128); + cost += U256::from(blob_tx.max_fee_per_blob_gas * blob_tx.blob_gas() as u128); } Self { transaction, cost, blob_sidecar } From 5f329e78b6988dea29f4929dd421d43aaf3e684b Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Fri, 8 Sep 2023 13:48:57 +0200 Subject: [PATCH 632/722] #2511: extended reth-rpc example with custom rpc ext (#4508) Co-authored-by: Matthias Seitz --- Cargo.lock | 11 ++++ Cargo.toml | 1 + bin/reth/src/lib.rs | 23 +++++++++ bin/reth/src/utils.rs | 5 ++ examples/Cargo.toml | 4 -- examples/rpc-db/Cargo.toml | 13 +++++ examples/{rpc-db.rs => rpc-db/src/main.rs} | 60 ++++++++++++++-------- examples/rpc-db/src/myrpc_ext.rs | 33 ++++++++++++ 8 files changed, 125 insertions(+), 25 deletions(-) create mode 100644 examples/rpc-db/Cargo.toml rename examples/{rpc-db.rs => rpc-db/src/main.rs} (64%) create mode 100644 examples/rpc-db/src/myrpc_ext.rs diff --git a/Cargo.lock b/Cargo.lock index 0a5efd77de0f..989a55bcba8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6327,6 +6327,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "rpc-db" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "jsonrpsee", + "reth", + "tokio", +] + [[package]] name = "ruint" version = "1.10.1" diff --git a/Cargo.toml b/Cargo.toml index 529715acc475..41bd2464bef8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ members = [ "crates/rpc/rpc-types-compat", "examples", "examples/additional-rpc-namespace-in-cli", + "examples/rpc-db", ] default-members = ["bin/reth"] diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index c9e02bc34216..2f452eb4610f 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -46,6 +46,25 @@ pub mod providers { pub use reth_provider::*; } +/// Re-exported from `reth_primitives`. +pub mod primitives { + pub use reth_primitives::*; +} + +/// Re-exported from `reth_beacon_consensus`. +pub mod beacon_consensus { + pub use reth_beacon_consensus::*; +} +/// Re-exported from `reth_blockchain_tree`. +pub mod blockchain_tree { + pub use reth_blockchain_tree::*; +} + +/// Re-exported from `reth_revm`. +pub mod revm { + pub use reth_revm::*; +} + /// Re-exported from `reth_tasks`. pub mod tasks { pub use reth_tasks::*; @@ -79,6 +98,10 @@ pub mod rpc { pub mod api { pub use reth_rpc_api::*; } + /// Re-exported from `reth_rpc::eth`. + pub mod eth { + pub use reth_rpc::eth::*; + } } #[cfg(all(feature = "jemalloc", unix))] diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index a40e92625ead..64a798dc9e1e 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -26,6 +26,11 @@ use std::{ }; use tracing::info; +/// Exposing `open_db_read_only` function +pub mod db { + pub use reth_db::open_db_read_only; +} + /// Get a single header from network pub async fn get_single_header( client: Client, diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fd244ad0fe61..619935b49538 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -28,10 +28,6 @@ futures.workspace = true async-trait.workspace = true tokio.workspace = true -[[example]] -name = "rpc-db" -path = "rpc-db.rs" - [[example]] name = "db-access" path = "db-access.rs" diff --git a/examples/rpc-db/Cargo.toml b/examples/rpc-db/Cargo.toml new file mode 100644 index 000000000000..ed9cc88e793c --- /dev/null +++ b/examples/rpc-db/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "rpc-db" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +futures.workspace = true +jsonrpsee.workspace = true +reth.workspace = true +tokio = { workspace = true, features = ["full"] } +eyre = "0.6.8" diff --git a/examples/rpc-db.rs b/examples/rpc-db/src/main.rs similarity index 64% rename from examples/rpc-db.rs rename to examples/rpc-db/src/main.rs index 86f890299a51..54aa243acc36 100644 --- a/examples/rpc-db.rs +++ b/examples/rpc-db/src/main.rs @@ -1,31 +1,45 @@ -// Talking to the DB -use reth_db::open_db_read_only; -use reth_primitives::ChainSpecBuilder; -use reth_provider::{providers::BlockchainProvider, ProviderFactory}; - +//! Example illustrating how to run the ETH JSON RPC API as standalone over a DB file. +//! +//! Run with +//! +//! ```not_rust +//! cargo run -p rpc-db +//! ``` +//! +//! This installs an additional RPC method `myrpcExt_customMethod` that can queried via [cast](https://github.com/foundry-rs/foundry) +//! +//! ```sh +//! cast rpc myrpcExt_customMethod +//! ``` +use reth::{ + primitives::ChainSpecBuilder, + providers::{providers::BlockchainProvider, ProviderFactory}, + utils::db::open_db_read_only, +}; // Bringing up the RPC -use reth_rpc_builder::{ +use reth::rpc::builder::{ RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, }; // Code which we'd ideally like to not need to import if you're only spinning up // read-only parts of the API and do not require access to pending state or to // EVM sims -use reth_beacon_consensus::BeaconConsensus; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, +use reth::{ + beacon_consensus::BeaconConsensus, + blockchain_tree::{ + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + }, + revm::Factory as ExecutionFactory, }; -use reth_revm::Factory as ExecutionFactory; + // Configuring the network parts, ideally also wouldn't ned to think about this. -use reth_network_api::noop::NoopNetwork; -use reth_provider::test_utils::TestCanonStateSubscriptions; -use reth_tasks::TokioTaskExecutor; -use reth_transaction_pool::noop::NoopTransactionPool; +use reth::{providers::test_utils::TestCanonStateSubscriptions, tasks::TokioTaskExecutor}; use std::{path::Path, sync::Arc}; -// Example illustrating how to run the ETH JSON RPC API as standalone over a DB file. -// TODO: Add example showing how to spin up your own custom RPC namespace alongside -// the other default name spaces. +use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; +// Custom rpc extension +pub mod myrpc_ext; + #[tokio::main] async fn main() -> eyre::Result<()> { // 1. Setup the DB @@ -55,16 +69,20 @@ async fn main() -> eyre::Result<()> { }; let rpc_builder = RpcModuleBuilder::default() - .with_provider(provider) + .with_provider(provider.clone()) // Rest is just noops that do nothing - .with_pool(NoopTransactionPool::default()) - .with_network(NoopNetwork::default()) + .with_noop_pool() + .with_noop_network() .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let server = rpc_builder.build(config); + let mut server = rpc_builder.build(config); + + // Add a custom rpc namespace + let custom_rpc = MyRpcExt { provider }; + server.merge_configured(custom_rpc.into_rpc())?; // Start the server & keep it alive let server_args = diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs new file mode 100644 index 000000000000..d1898b81cb15 --- /dev/null +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -0,0 +1,33 @@ +// Reth block related imports +use reth::{primitives::Block, providers::BlockReaderIdExt}; + +// Rpc related imports +use jsonrpsee::proc_macros::rpc; +use reth::rpc::eth::error::EthResult; + +/// trait interface for a custom rpc namespace: `MyRpc` +/// +/// This defines an additional namespace where all methods are configured as trait functions. +#[rpc(server, namespace = "myrpcExt")] +pub trait MyRpcExtApi { + /// Returns block 0. + #[method(name = "customMethod")] + fn custom_method(&self) -> EthResult>; +} + +/// The type that implements `myRpc` rpc namespace trait +pub struct MyRpcExt { + pub provider: Provider, +} + +impl MyRpcExtApiServer for MyRpcExt +where + Provider: BlockReaderIdExt + 'static, +{ + /// Showcasing how to implement a custom rpc method + /// using the provider. + fn custom_method(&self) -> EthResult> { + let block = self.provider.block_by_number(0)?; + Ok(block) + } +} From 95fe20886b2a64051a3c958fe17805b4932553e4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov <62447812+klkvr@users.noreply.github.com> Date: Fri, 8 Sep 2023 16:29:53 +0300 Subject: [PATCH 633/722] Apply foundry improvements to reth (#4518) --- .../revm/revm-inspectors/src/access_list.rs | 5 +- .../revm-inspectors/src/tracing/js/mod.rs | 10 ++- .../revm/revm-inspectors/src/tracing/mod.rs | 65 +++++++++---------- 3 files changed, 35 insertions(+), 45 deletions(-) diff --git a/crates/revm/revm-inspectors/src/access_list.rs b/crates/revm/revm-inspectors/src/access_list.rs index bc9d641d7121..efe61cc23769 100644 --- a/crates/revm/revm-inspectors/src/access_list.rs +++ b/crates/revm/revm-inspectors/src/access_list.rs @@ -68,10 +68,7 @@ where _data: &mut EVMData<'_, DB>, _is_static: bool, ) -> InstructionResult { - let pc = interpreter.program_counter(); - let op = interpreter.contract.bytecode.bytecode()[pc]; - - match op { + match interpreter.current_opcode() { opcode::SLOAD | opcode::SSTORE => { if let Ok(slot) = interpreter.stack().peek(0) { let cur_contract = interpreter.contract.address; diff --git a/crates/revm/revm-inspectors/src/tracing/js/mod.rs b/crates/revm/revm-inspectors/src/tracing/js/mod.rs index b79b41f648f5..d88b55ee6def 100644 --- a/crates/revm/revm-inspectors/src/tracing/js/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/js/mod.rs @@ -299,12 +299,11 @@ where let db = EvmDb::new(data.journaled_state.state.clone(), self.to_db_service.clone()); - let pc = interp.program_counter(); let step = StepLog { stack: StackObj(interp.stack.clone()), - op: interp.contract.bytecode.bytecode()[pc].into(), + op: interp.current_opcode().into(), memory: MemoryObj(interp.memory.clone()), - pc: pc as u64, + pc: interp.program_counter() as u64, gas_remaining: interp.gas.remaining(), cost: interp.gas.spend(), depth: data.journaled_state.depth(), @@ -342,12 +341,11 @@ where if matches!(eval, return_revert!()) { let db = EvmDb::new(data.journaled_state.state.clone(), self.to_db_service.clone()); - let pc = interp.program_counter(); let step = StepLog { stack: StackObj(interp.stack.clone()), - op: interp.contract.bytecode.bytecode()[pc].into(), + op: interp.current_opcode().into(), memory: MemoryObj(interp.memory.clone()), - pc: pc as u64, + pc: interp.program_counter() as u64, gas_remaining: interp.gas.remaining(), cost: interp.gas.spend(), depth: data.journaled_state.depth(), diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index c01d19c8e94d..e850e523578a 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -254,14 +254,12 @@ impl TracingInspector { self.step_stack.push(StackStep { trace_idx, step_idx: trace.trace.steps.len() }); - let pc = interp.program_counter(); - let memory = self.config.record_memory_snapshots.then(|| interp.memory.clone()).unwrap_or_default(); let stack = self.config.record_stack_snapshots.then(|| interp.stack.clone()).unwrap_or_default(); - let op = OpCode::try_from_u8(interp.contract.bytecode.bytecode()[pc]) + let op = OpCode::try_from_u8(interp.current_opcode()) .or_else(|| { // if the opcode is invalid, we'll use the invalid opcode to represent it because // this is invoked before the opcode is executed, the evm will eventually return a @@ -273,7 +271,7 @@ impl TracingInspector { trace.trace.steps.push(CallTraceStep { depth: data.journaled_state.depth(), - pc, + pc: interp.program_counter(), op, contract: interp.contract.address, stack, @@ -315,39 +313,36 @@ impl TracingInspector { } } - if let Some(pc) = interp.program_counter().checked_sub(1) { - if self.config.record_state_diff { - let op = interp.contract.bytecode.bytecode()[pc]; - - let journal_entry = data - .journaled_state - .journal - .last() - // This should always work because revm initializes it as `vec![vec![]]` - // See [JournaledState::new](revm::JournaledState) - .expect("exists; initialized with vec") - .last(); - - step.storage_change = match (op, journal_entry) { - ( - opcode::SLOAD | opcode::SSTORE, - Some(JournalEntry::StorageChange { address, key, had_value }), - ) => { - // SAFETY: (Address,key) exists if part if StorageChange - let value = - data.journaled_state.state[address].storage[key].present_value(); - let change = StorageChange { key: *key, value, had_value: *had_value }; - Some(change) - } - _ => None, - }; - } - - // The gas cost is the difference between the recorded gas remaining at the start of the - // step the remaining gas here, at the end of the step. - step.gas_cost = step.gas_remaining - self.gas_inspector.gas_remaining(); + if self.config.record_state_diff { + let op = interp.current_opcode(); + + let journal_entry = data + .journaled_state + .journal + .last() + // This should always work because revm initializes it as `vec![vec![]]` + // See [JournaledState::new](revm::JournaledState) + .expect("exists; initialized with vec") + .last(); + + step.storage_change = match (op, journal_entry) { + ( + opcode::SLOAD | opcode::SSTORE, + Some(JournalEntry::StorageChange { address, key, had_value }), + ) => { + // SAFETY: (Address,key) exists if part if StorageChange + let value = data.journaled_state.state[address].storage[key].present_value(); + let change = StorageChange { key: *key, value, had_value: *had_value }; + Some(change) + } + _ => None, + }; } + // The gas cost is the difference between the recorded gas remaining at the start of the + // step the remaining gas here, at the end of the step. + step.gas_cost = step.gas_remaining - self.gas_inspector.gas_remaining(); + // set the status step.status = status; } From d1f8c6ec01cbe2fecd58985b848f66d4b76f874b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Sep 2023 15:52:08 +0200 Subject: [PATCH 634/722] chore: rm todo about polling jobs (#4532) --- crates/payload/builder/src/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 278beef6e9c4..310e2876313d 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -229,7 +229,6 @@ where } // marker for exit condition - // TODO(mattsse): this could be optmized so we only poll new jobs let mut new_job = false; // drain all requests From 0fed70773cf603b126b832057f977aa9c0ac42cd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Sep 2023 15:48:14 +0200 Subject: [PATCH 635/722] test: add backward compat toml test (#4531) --- Cargo.lock | 1 + Cargo.toml | 4 ++ bin/reth/Cargo.toml | 4 +- crates/config/Cargo.toml | 7 ++- crates/config/src/config.rs | 92 +++++++++++++++++++++++++++++++++++++ 5 files changed, 104 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 989a55bcba8f..3de1ce42ea28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5358,6 +5358,7 @@ dependencies = [ "serde", "serde_json", "tempfile", + "toml 0.7.6", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 41bd2464bef8..efacf44b4f6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -154,6 +154,10 @@ enr = { version = "0.9", default-features = false, features = ["k256"] } # for eip-4844 c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } +## config +confy = "0.5" +toml = "0.7" + ### misc-testing proptest = "1.0" arbitrary = "1.1" diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index cba10e316281..96f60f54a39c 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -62,8 +62,8 @@ serde.workspace = true serde_json.workspace = true shellexpand = "3.0.0" dirs-next = "2.0.0" -confy = "0.5" -toml = { version = "0.7", features = ["display"] } +confy.workspace = true +toml = { workspace = true, features = ["display"] } # metrics metrics-exporter-prometheus = "0.12.1" diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index d50b42d0fb9d..f3872f0c787e 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -23,6 +23,9 @@ serde_json.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -confy = "0.5" +# misc +confy.workspace = true +tempfile = "3.4" -tempfile = "3.4" \ No newline at end of file +[dev-dependencies] +toml.workspace = true \ No newline at end of file diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 95f92c9a5c33..d5e0f9bb29cc 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -327,4 +327,96 @@ mod tests { assert_eq!(config, loaded_config); }) } + + // ensures config deserialization is backwards compatible + #[test] + fn test_backwards_compatibility() { + let alpha_0_0_8 = r"#[stages.headers] +downloader_max_concurrent_requests = 100 +downloader_min_concurrent_requests = 5 +downloader_max_buffered_responses = 100 +downloader_request_limit = 1000 +commit_threshold = 10000 + +[stages.total_difficulty] +commit_threshold = 100000 + +[stages.bodies] +downloader_request_limit = 200 +downloader_stream_batch_size = 1000 +downloader_max_buffered_blocks_size_bytes = 2147483648 +downloader_min_concurrent_requests = 5 +downloader_max_concurrent_requests = 100 + +[stages.sender_recovery] +commit_threshold = 5000000 + +[stages.execution] +max_blocks = 500000 +max_changes = 5000000 + +[stages.account_hashing] +clean_threshold = 500000 +commit_threshold = 100000 + +[stages.storage_hashing] +clean_threshold = 500000 +commit_threshold = 100000 + +[stages.merkle] +clean_threshold = 50000 + +[stages.transaction_lookup] +commit_threshold = 5000000 + +[stages.index_account_history] +commit_threshold = 100000 + +[stages.index_storage_history] +commit_threshold = 100000 + +[peers] +refill_slots_interval = '1s' +trusted_nodes = [] +connect_trusted_nodes_only = false +max_backoff_count = 5 +ban_duration = '12h' + +[peers.connection_info] +max_outbound = 100 +max_inbound = 30 + +[peers.reputation_weights] +bad_message = -16384 +bad_block = -16384 +bad_transactions = -16384 +already_seen_transactions = 0 +timeout = -4096 +bad_protocol = -2147483648 +failed_to_connect = -25600 +dropped = -4096 + +[peers.backoff_durations] +low = '30s' +medium = '3m' +high = '15m' +max = '1h' + +[sessions] +session_command_buffer = 32 +session_event_buffer = 260 + +[sessions.limits] + +[sessions.initial_internal_request_timeout] +secs = 20 +nanos = 0 + +[sessions.protocol_breach_request_timeout] +secs = 120 +nanos = 0 +#"; + + let _conf: Config = toml::from_str(alpha_0_0_8).unwrap(); + } } From d6c8336071bbdd80f8ed4b68cdcdd215d5466035 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 8 Sep 2023 18:24:59 +0300 Subject: [PATCH 636/722] dep: ethers@2.0.10 (#4535) --- Cargo.lock | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3de1ce42ea28..e691c0c23da1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2202,9 +2202,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02bb80fd2c22631a5eb8a02cbf373cc5fd86937fc966bb670b9a884580c8e71c" +checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" dependencies = [ "const-hex", "ethers-contract-abigen", @@ -2221,9 +2221,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22c54db0d393393e732a5b20273e4f8ab89f0cce501c84e75fab9c126799a6e6" +checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" dependencies = [ "Inflector", "const-hex", @@ -2243,9 +2243,9 @@ dependencies = [ [[package]] name = "ethers-contract-derive" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ee4f216184a1304b707ed258f4f70aa40bf7e1522ab8963d127a8d516eaa1a" +checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" dependencies = [ "Inflector", "const-hex", @@ -2259,9 +2259,9 @@ dependencies = [ [[package]] name = "ethers-core" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c29523f73c12753165781c6e5dc11c84d3e44c080a15f7c6cfbd70b514cb6f1" +checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" dependencies = [ "arrayvec", "bytes", @@ -2289,9 +2289,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aab5af432b3fe5b7756b60df5c9ddeb85a13414575ad8a9acd707c24f0a77a5" +checksum = "0e53451ea4a8128fbce33966da71132cf9e1040dcfd2a2084fd7733ada7b2045" dependencies = [ "ethers-core", "reqwest", @@ -2304,9 +2304,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356151d5ded56d4918146366abc9dfc9df367cf0096492a7a5477b21b7693615" +checksum = "473f1ccd0c793871bbc248729fa8df7e6d2981d6226e4343e3bbaa9281074d5d" dependencies = [ "async-trait", "auto_impl", @@ -2331,9 +2331,9 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c84664b294e47fc2860d6db0db0246f79c4c724e552549631bb9505b834bee" +checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" dependencies = [ "async-trait", "auto_impl", @@ -2369,9 +2369,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "170b299698702ef1f53d2275af7d6d97409cfa4f9398ee9ff518f6bc9102d0ad" +checksum = "5ea44bec930f12292866166f9ddbea6aa76304850e4d8dcd66dc492b43d00ff1" dependencies = [ "async-trait", "coins-bip32", From 0477f977737203d714827beafa2f2975323e004b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Sep 2023 17:28:00 +0200 Subject: [PATCH 637/722] chore: change best unconnected condition (#4527) --- crates/net/network/src/peers/manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 1955603c7349..988332fe44cc 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -645,9 +645,9 @@ impl PeersManager { /// Returns `None` if no peer is available. fn best_unconnected(&mut self) -> Option<(PeerId, &mut Peer)> { let mut unconnected = self.peers.iter_mut().filter(|(_, peer)| { - peer.state.is_unconnected() && + !peer.is_backed_off() && !peer.is_banned() && - !peer.is_backed_off() && + peer.state.is_unconnected() && (!self.connect_trusted_nodes_only || peer.is_trusted()) }); From 6ae6c9e71b91cf189066924995610f2419323247 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Sep 2023 18:33:02 +0200 Subject: [PATCH 638/722] perf: increase refill slots interval (#4528) --- crates/net/network/src/peers/manager.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 988332fe44cc..1c4dc44dc72d 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -759,9 +759,7 @@ impl PeersManager { }) } - if self.refill_slots_interval.poll_tick(cx).is_ready() { - // this ensures the manager will be polled periodically, see [Interval::poll_tick] - let _ = self.refill_slots_interval.poll_tick(cx); + while self.refill_slots_interval.poll_tick(cx).is_ready() { self.fill_outbound_slots(); } @@ -1101,7 +1099,7 @@ pub struct PeersConfig { impl Default for PeersConfig { fn default() -> Self { Self { - refill_slots_interval: Duration::from_millis(1_000), + refill_slots_interval: Duration::from_millis(5_000), connection_info: Default::default(), reputation_weights: Default::default(), ban_list: Default::default(), From ba51d9fb09645d141b7b5b94815068202b5ee675 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 8 Sep 2023 10:48:19 -0700 Subject: [PATCH 639/722] docs: bump latitude discount code --- book/installation/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index 0dd779cb2768..47533c7b1ca5 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -73,4 +73,4 @@ Once you're synced to the tip you will need a reliable connection, especially if If you are buying your own NVMe SSD, please consult [this hardware comparison](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038) which is being actively maintained. We recommend against buying DRAM-less or QLC devices as these are noticeably slower. -All our benchmarks have been produced on [Latitude.sh](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude.sh, so for a limited time you can use `RETH200` for a $200 discount. [Run a node now!](https://metal.new/reth) +All our benchmarks have been produced on [Latitude.sh](https://www.latitude.sh/), a bare metal provider. We use `c3.large.x86` boxes, and also recommend trying the `s2.small.x86` box for pruned/full nodes. So far our experience has been smooth with some users reporting that the NVMEs there outperform AWS NVMEs by 3x or more. We're excited for more Reth nodes on Latitude.sh, so for a limited time you can use `RETH400` for a $250 discount. [Run a node now!](https://metal.new/reth) From 9ad229bc13e66107481439b0326154021ecfb4c5 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 8 Sep 2023 10:52:42 -0700 Subject: [PATCH 640/722] docs: add full node size on installation recs --- book/installation/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/installation/installation.md b/book/installation/installation.md index 47533c7b1ca5..ef2c904a8229 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -16,7 +16,7 @@ The most important requirement is by far the disk, whereas CPU and RAM requireme | | Archive Node | Full Node | |-----------|---------------------------------------|-------------------------------------| -| Disk | At least 2.1TB (TLC NVMe recommended) | TBD | +| Disk | At least 2.1TB (TLC NVMe recommended) | At least 1TB (TLC NVMe recommended) | | Memory | 8GB+ | 8GB+ | | CPU | Higher clock speed over core count | Higher clock speeds over core count | | Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ | From b87dfe507d3638a46107570cc737c87f71fb15e8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 9 Sep 2023 00:20:39 +0300 Subject: [PATCH 641/722] feat: holesky support (#4359) --- crates/primitives/res/genesis/holesky.json | 1000 ++++++++++++++++++++ crates/primitives/src/chain/mod.rs | 9 +- crates/primitives/src/chain/spec.rs | 50 +- crates/primitives/src/constants/mod.rs | 4 + crates/primitives/src/lib.rs | 5 +- 5 files changed, 1064 insertions(+), 4 deletions(-) create mode 100644 crates/primitives/res/genesis/holesky.json diff --git a/crates/primitives/res/genesis/holesky.json b/crates/primitives/res/genesis/holesky.json new file mode 100644 index 000000000000..0ce62bd939f3 --- /dev/null +++ b/crates/primitives/res/genesis/holesky.json @@ -0,0 +1,1000 @@ +{ + "nonce": "0x1234", + "timestamp": "1694786100", + "extraData": "0x686f77206d7563682069732074686520666973683f", + "gasLimit": "0x17D7840", + "difficulty": "0x01", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "stateRoot": "0x69d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783", + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000010": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000011": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000012": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000013": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000014": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000015": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000016": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000017": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000018": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000019": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000020": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000021": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000022": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000023": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000024": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000025": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000026": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000027": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000028": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000029": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000030": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000031": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000032": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000033": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000034": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000035": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000036": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000037": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000038": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000039": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000040": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000041": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000042": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000043": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000044": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000045": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000046": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000047": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000048": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000049": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000050": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000051": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000052": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000053": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000054": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000055": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000056": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000057": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000058": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000059": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000060": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000061": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000062": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000063": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000064": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000065": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000066": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000067": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000068": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000069": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000070": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000071": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000072": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000073": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000074": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000075": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000076": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000077": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000078": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000079": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000080": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000081": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000082": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000083": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000084": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000085": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000086": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000087": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000088": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000089": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000090": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000091": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000092": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000093": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000094": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000095": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000096": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000097": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000098": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000099": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009f": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000aa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ab": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ac": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ad": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ae": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000af": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ba": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000be": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ca": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ce": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000da": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000db": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000de": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000df": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ea": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000eb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ec": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ed": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ee": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ef": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fe": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ff": { + "balance": "1" + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x0000006916a87b82333f4245046623b23794C65C": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x0be949928Ff199c9EBA9E110db210AA5C94EFAd0": { + "balance": "0x7c13bc4b2c133c56000000" + }, + "0x0C100000006d7b5e23a1eAEE637f28cA32Cd5b31": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x0C35317B7a96C454E2CB3d1A255D775Ab112cCc8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x0d731cfabC5574329823F26d488416451d2ea376": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x0e79065B5F11b5BD1e62B935A600976ffF3754B9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x105083929bF9bb22C26cB1777Ec92661170D4285": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x10F5d45854e038071485AC9e402308cF80D2d2fE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x1268AD189526AC0b386faF06eFfC46779c340eE6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x12Cba59f5A74DB81a12ff63C349Bd82CBF6007C2": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1446D7f6dF00380F246d8211dE7f0FaBC4Fd248C": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x164e38a375247A784A81d420201AA8fe4E513921": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1B7aA44088a0eA95bdc65fef6E5071E946Bf7d8f": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x222222222222cF64a76AE3d36859958c864fDA2c": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2f14582947E292a2eCd20C430B46f2d27CFE213c": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x2f2c75B5Dd5D246194812b00eEb3B09c2c66e2eE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x341c40b94bf2afbfa42573cb78f16ee15a056238": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x34f845773D4364999f2fbC7AA26ABDeE902cBb46": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3C75594181e03E8ECD8468A0037F058a9dAfad79": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x462396E69dBfa455F405f4DD82F3014Af8003B72": { + "balance": "0xa56fa5b99019a5c8000000" + }, + "0x49Df3CCa2670eB0D591146B16359fe336e476F29": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4D0b04b405c6b62C7cFC3aE54759747e2C0b4662": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4D496CcC28058B1D74B7a19541663E21154f9c84": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x509a7667aC8D0320e36172c192506a6188aA84f6": { + "balance": "0x7c13bc4b2c133c56000000" + }, + "0x5180db0237291A6449DdA9ed33aD90a38787621c": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x52730f347dEf6BA09adfF62EaC60D5fEe8205BC4": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5EAC0fBd3dfef8aE3efa3c5dc1aa193bc6033dFd": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x6a7aA9b882d50Bb7bc5Da1a244719C99f12F06a3": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x6Cc9397c3B38739daCbfaA68EaD5F5D77Ba5F455": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x762cA62ca2549ad806763B3Aa1eA317c429bDBDa": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x778F5F13C4Be78A3a4d7141BCB26999702f407CF": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x875D25Ee4bC604C71BaF6236a8488F22399BED4b": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8dF7878d3571BEF5e5a744F96287C8D20386d75A": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x9E415A096fF77650dc925dEA546585B4adB322B6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xA0766B65A4f7B1da79a1AF79aC695456eFa28644": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xA29B144A449E414A472c60C7AAf1aaFfE329021D": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa55395566b0b54395B3246f96A0bDc4b8a483df9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xAC9ba72fb61aA7c31A95df0A8b6ebA6f41EF875e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xB0498C15879db2eE5471d4926c5fAA25C9a09683": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xB19Fb4c1f280327e60Ed37b1Dc6EE77533539314": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xC21cB9C99C316d1863142F7dD86dd5496D81A8D6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xc473d412dc52e349862209924c8981b2ee420768": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xC48E23C5F6e1eA0BaEf6530734edC3968f79Af2e": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xc6e2459991BfE27cca6d86722F35da23A1E4Cb97": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xD3994e4d3202dD23c8497d7F75bF1647d1DA1bb1": { + "balance": "0x19D971E4FE8401E74000000" + }, + "0xDCA6e9B48Ea86AeBFDf9929949124042296b6e34": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xe0a2Bd4258D2768837BAa26A28fE71Dc079f84c7": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xEA28d002042fd9898D0Db016be9758eeAFE35C1E": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xEfA7454f1116807975A4750B46695E967850de5D": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xFBFd6Fa9F73Ac6A058E01259034C28001BEf8247": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xe0991E844041bE6F11B99da5b114b6bCf84EBd57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15E719b6AcAf1E4411Bf0f9576CB1D0dB161DdFc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x346D827a75F98F0A7a324Ff80b7C3F90252E8baC": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x73b2e0E54510239E22cC936F0b4a6dE1acf0AbdE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xBb977B2EE8a111D788B3477D242078d0B837E72b": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x834Dbf5A03e29c25bc55459cCe9c021EeBE676Ad": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xD1F77E4C1C45186e8653C489F90e008a73597296": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xb04aeF2a3d2D86B01006cCD4339A2e943d9c6480": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xC9CA2bA9A27De1Db589d8c33Ab8EDFa2111b31fb": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4BC656B34De23896fa6069C9862F355b740401aF": { + "balance": "0x084595161401484a000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 69afd1fe03fc..5ffdf9c2daf8 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -1,4 +1,5 @@ use crate::{ + holesky_nodes, net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, NodeRecord, U256, U64, }; @@ -11,7 +12,7 @@ use std::{fmt, str::FromStr}; mod spec; pub use spec::{ AllGenesisFormats, BaseFeeParams, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkCondition, - ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, + ForkTimestamps, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; // The chain info module. @@ -44,6 +45,11 @@ impl Chain { Chain::Named(ethers_core::types::Chain::Sepolia) } + /// Returns the holesky chain. + pub const fn holesky() -> Self { + Chain::Named(ethers_core::types::Chain::Holesky) + } + /// Returns the dev chain. pub const fn dev() -> Self { Chain::Named(ethers_core::types::Chain::Dev) @@ -88,6 +94,7 @@ impl Chain { Mainnet => Some(mainnet_nodes()), Goerli => Some(goerli_nodes()), Sepolia => Some(sepolia_nodes()), + Holesky => Some(holesky_nodes()), _ => None, } } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 7b4e94d2d9b6..80687d4cbd4f 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -157,6 +157,47 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { .into() }); +/// The Holesky spec +pub static HOLESKY: Lazy> = Lazy::new(|| { + ChainSpec { + chain: Chain::holesky(), + genesis: serde_json::from_str(include_str!("../../res/genesis/holesky.json")) + .expect("Can't deserialize Holesky genesis json"), + genesis_hash: Some(H256(hex!( + "ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d" + ))), + paris_block_and_final_difficulty: Some((0, U256::from(1))), + fork_timestamps: ForkTimestamps::default().shanghai(1694790240), + hardforks: BTreeMap::from([ + (Hardfork::Frontier, ForkCondition::Block(0)), + (Hardfork::Homestead, ForkCondition::Block(0)), + (Hardfork::Dao, ForkCondition::Block(0)), + (Hardfork::Tangerine, ForkCondition::Block(0)), + (Hardfork::SpuriousDragon, ForkCondition::Block(0)), + (Hardfork::Byzantium, ForkCondition::Block(0)), + (Hardfork::Constantinople, ForkCondition::Block(0)), + (Hardfork::Petersburg, ForkCondition::Block(0)), + (Hardfork::Istanbul, ForkCondition::Block(0)), + (Hardfork::MuirGlacier, ForkCondition::Block(0)), + (Hardfork::Berlin, ForkCondition::Block(0)), + (Hardfork::London, ForkCondition::Block(0)), + ( + Hardfork::Paris, + ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, + ), + (Hardfork::Shanghai, ForkCondition::Timestamp(1694790240)), + ]), + deposit_contract: Some(DepositContract::new( + H160(hex!("4242424242424242424242424242424242424242")), + 0, + H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), + )), + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::testnet(), + } + .into() +}); + /// Dev testnet specification /// /// Includes 20 prefunded accounts with 10_000 ETH each derived from mnemonic "test test test test @@ -1088,7 +1129,7 @@ mod tests { use crate::{ constants::EMPTY_WITHDRAWALS, Address, AllGenesisFormats, Chain, ChainSpec, ChainSpecBuilder, DisplayHardforks, ForkCondition, ForkHash, ForkId, Genesis, Hardfork, - Head, DEV, GOERLI, H256, MAINNET, SEPOLIA, U256, + Head, DEV, GOERLI, H256, HOLESKY, MAINNET, SEPOLIA, U256, }; use bytes::BytesMut; use ethers_core::types as EtherType; @@ -1898,4 +1939,11 @@ Post-merge hard forks (timestamp based): let expected_forkhash = ForkHash(hex_literal::hex!("8062457a")); assert_eq!(ForkHash::from(genesis_hash), expected_forkhash); } + + #[test] + fn holesky_paris_activated_at_genesis() { + assert!(HOLESKY + .fork(Hardfork::Paris) + .active_at_ttd(HOLESKY.genesis.difficulty, HOLESKY.genesis.difficulty)); + } } diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index f3f09c4f3107..7955a6e47d37 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -84,6 +84,10 @@ pub const GOERLI_GENESIS: H256 = pub const SEPOLIA_GENESIS: H256 = H256(hex!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9")); +/// Holesky genesis hash. +pub const HOLESKY_GENESIS: H256 = + H256(hex!("ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d")); + /// Testnet genesis hash. pub const DEV_GENESIS: H256 = H256(hex!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c")); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a1cfdfa274cb..b08a6e9d610f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -61,11 +61,12 @@ pub use block::{ pub use bloom::Bloom; pub use chain::{ AllGenesisFormats, BaseFeeParams, Chain, ChainInfo, ChainSpec, ChainSpecBuilder, - DisplayHardforks, ForkCondition, ForkTimestamps, DEV, GOERLI, MAINNET, SEPOLIA, + DisplayHardforks, ForkCondition, ForkTimestamps, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; pub use compression::*; pub use constants::{ - DEV_GENESIS, EMPTY_OMMER_ROOT, GOERLI_GENESIS, KECCAK_EMPTY, MAINNET_GENESIS, SEPOLIA_GENESIS, + DEV_GENESIS, EMPTY_OMMER_ROOT, GOERLI_GENESIS, HOLESKY_GENESIS, KECCAK_EMPTY, MAINNET_GENESIS, + SEPOLIA_GENESIS, }; pub use eip4844::{calculate_excess_blob_gas, kzg_to_versioned_hash}; pub use forkid::{ForkFilter, ForkHash, ForkId, ForkTransition, ValidationError}; From b151d6ea8d79cf3a0bb511ec00d0d375a7d3882c Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 9 Sep 2023 06:27:22 -0400 Subject: [PATCH 642/722] fix: wake future after buffering pong (#4536) --- crates/net/eth-wire/src/p2pstream.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 025f0b347e31..081662db09bc 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -401,6 +401,9 @@ where _ if id == P2PMessageID::Ping as u8 => { tracing::trace!("Received Ping, Sending Pong"); this.send_pong(); + // This is required because the `Sink` may not be polled externally, and if + // that happens, the pong will never be sent. + cx.waker().wake_by_ref(); } _ if id == P2PMessageID::Disconnect as u8 => { let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| { From 900ada5aaa4b5d4a633df78764e7dd7169a13405 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 9 Sep 2023 15:33:55 +0200 Subject: [PATCH 643/722] chore: rm redundant clone (#4540) --- crates/transaction-pool/src/pool/pending.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 0708f1c8840e..91beed0883c9 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -264,7 +264,7 @@ impl PendingPool { self.all.remove(&tx); self.size_of -= tx.transaction.size(); self.independent_transactions.remove(&tx); - Some(tx.transaction.clone()) + Some(tx.transaction) } fn next_id(&mut self) -> u64 { From 34417ff6fc2cd1d0076da94000693d095230eb96 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 10 Sep 2023 02:41:03 +0200 Subject: [PATCH 644/722] fix: add missing holesky variant (#4541) --- bin/reth/src/args/utils.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/args/utils.rs b/bin/reth/src/args/utils.rs index c9fe80685d5d..b243c6a592b1 100644 --- a/bin/reth/src/args/utils.rs +++ b/bin/reth/src/args/utils.rs @@ -1,7 +1,7 @@ //! Clap parser utilities use reth_primitives::{ - fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, DEV, GOERLI, MAINNET, SEPOLIA, + fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; use reth_revm::primitives::B256 as H256; use std::{ @@ -25,6 +25,7 @@ pub fn chain_spec_value_parser(s: &str) -> eyre::Result, eyre::Er "mainnet" => MAINNET.clone(), "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), + "holesky" => HOLESKY.clone(), "dev" => DEV.clone(), _ => { let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; @@ -40,6 +41,7 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error "mainnet" => MAINNET.clone(), "goerli" => GOERLI.clone(), "sepolia" => SEPOLIA.clone(), + "holesky" => HOLESKY.clone(), "dev" => DEV.clone(), _ => { let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?; @@ -110,7 +112,7 @@ mod tests { #[test] fn parse_chain_spec() { - for chain in ["mainnet", "sepolia", "goerli"] { + for chain in ["mainnet", "sepolia", "goerli", "holesky"] { chain_spec_value_parser(chain).unwrap(); genesis_value_parser(chain).unwrap(); } From 3cc169fcfefc7060a4cece89530e48852c4b3223 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 10 Sep 2023 10:04:20 +0200 Subject: [PATCH 645/722] fix: set trace results root trace's gas used to execution results gas (#4506) --- .../src/tracing/builder/parity.rs | 11 +++++- crates/rpc/rpc-types/src/eth/trace/parity.rs | 39 +++++++++++++++++++ crates/rpc/rpc/src/trace.rs | 3 +- 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 014aca08e9e6..3c5bafe36fa1 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -136,6 +136,7 @@ impl ParityTraceBuilder { res: ExecutionResult, trace_types: &HashSet, ) -> TraceResults { + let gas_used = res.gas_used(); let output = match res { ExecutionResult::Success { output, .. } => output.into_data(), ExecutionResult::Revert { output, .. } => output, @@ -144,12 +145,18 @@ impl ParityTraceBuilder { let (trace, vm_trace, state_diff) = self.into_trace_type_traces(trace_types); - TraceResults { + let mut trace = TraceResults { output: output.into(), trace: trace.unwrap_or_default(), vm_trace, state_diff, - } + }; + + // we're setting the gas used of the root trace explicitly to the gas used of the execution + // result + trace.set_root_trace_gas_used(gas_used); + + trace } /// Consumes the inspector and returns the trace results according to the configured trace diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 64a4ba7c6c47..da71213da886 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -39,6 +39,21 @@ pub struct TraceResults { pub vm_trace: Option, } +// === impl TraceResults === + +impl TraceResults { + /// Sets the gas used of the root trace. + /// + /// The root trace's gasUsed should mirror the actual gas used by the transaction. + /// + /// This allows setting int manually by consuming the execution result's gas for example. + pub fn set_root_trace_gas_used(&mut self, gas_used: u64) { + if let Some(r) = self.trace.first_mut().and_then(|t| t.result.as_mut()) { + r.set_gas_used(gas_used) + } + } +} + /// A `FullTrace` with an additional transaction hash #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -240,13 +255,37 @@ pub struct CreateOutput { pub address: Address, } +/// Represents the output of a trace. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum TraceOutput { + /// Output of a regular call transaction. Call(CallOutput), + /// Output of a CREATE transaction. Create(CreateOutput), } +// === impl TraceOutput === + +impl TraceOutput { + /// Returns the gas used by this trace. + pub fn gas_used(&self) -> U64 { + match self { + TraceOutput::Call(call) => call.gas_used, + TraceOutput::Create(create) => create.gas_used, + } + } + + /// Sets the gas used by this trace. + pub fn set_gas_used(&mut self, gas_used: u64) { + match self { + TraceOutput::Call(call) => call.gas_used = U64::from(gas_used), + TraceOutput::Create(create) => create.gas_used = U64::from(gas_used), + } + } +} + +/// A parity style trace of a transaction. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionTrace { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index bca7059b752c..73c20187a73e 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -268,7 +268,8 @@ where .await } - /// Executes all transactions of a block and returns a list of callback results. + /// Executes all transactions of a block and returns a list of callback results invoked for each + /// transaction in the block. /// /// This /// 1. fetches all transactions of the block From 2de63c664179fbcb3d5a2f9f0e499795725cf92e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 10 Sep 2023 10:29:15 +0200 Subject: [PATCH 646/722] chore: bump sale period to 21 days (#4526) --- .github/workflows/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9caae6965958..31ce92c2685b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -13,13 +13,13 @@ jobs: steps: - uses: actions/stale@v8 with: - days-before-stale: 14 + days-before-stale: 21 days-before-close: 7 stale-issue-label: "S-stale" stale-pr-label: "S-stale" exempt-issue-labels: "M-prevent-stale" exempt-pr-labels: "M-prevent-stale" - stale-issue-message: "This issue is stale because it has been open for 14 days with no activity." + stale-issue-message: "This issue is stale because it has been open for 21 days with no activity." close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale." exempt-all-milestones: true exempt-all-assignees: true From e1d668681d157019abad8f2f6a30353155707ea3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Sep 2023 10:42:46 +0200 Subject: [PATCH 647/722] chore(deps): weekly `cargo update` (#4542) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 193 +++++++++++++++++++++++++++-------------------------- 1 file changed, 97 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e691c0c23da1..b40f61ea37e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -434,7 +434,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -618,7 +618,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -640,7 +640,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.31", "which", ] @@ -726,7 +726,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -739,7 +739,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -777,7 +777,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "boa_macros", "boa_profiler", @@ -788,7 +788,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "icu_collections", "icu_normalizer", @@ -801,7 +801,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "boa_gc", "boa_macros", @@ -816,18 +816,18 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", "synstructure 0.13.0", ] [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -847,7 +847,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" +source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" [[package]] name = "boyer-moore-magiclen" @@ -914,9 +914,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "byteorder" @@ -926,9 +926,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" dependencies = [ "serde", ] @@ -1032,9 +1032,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.28" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" +checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1131,7 +1131,7 @@ dependencies = [ "heck", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1155,7 +1155,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "serde", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1484,9 +1484,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1524,9 +1524,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" +checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" dependencies = [ "cfg-if", "cpufeatures", @@ -1547,7 +1547,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1595,7 +1595,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "strsim 0.10.0", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1617,7 +1617,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1702,7 +1702,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1876,7 +1876,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1949,9 +1949,9 @@ dependencies = [ [[package]] name = "educe" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" dependencies = [ "enum-ordinalize", "proc-macro2 1.0.66", @@ -2078,7 +2078,7 @@ dependencies = [ "num-traits", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2089,7 +2089,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2236,8 +2236,8 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.29", - "toml 0.7.6", + "syn 2.0.31", + "toml 0.7.8", "walkdir", ] @@ -2254,7 +2254,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "serde_json", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2280,7 +2280,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.29", + "syn 2.0.31", "tempfile", "thiserror", "tiny-keccak", @@ -2483,9 +2483,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] name = "findshlibs" @@ -2623,7 +2623,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3422,7 +3422,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.11", + "rustix 0.38.12", "windows-sys 0.48.0", ] @@ -3776,9 +3776,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "litemap" @@ -3926,7 +3926,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4228,7 +4228,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4242,9 +4242,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] @@ -4519,7 +4519,7 @@ dependencies = [ "phf_shared", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4548,7 +4548,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -4727,12 +4727,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -5117,9 +5117,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regress" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a9ecfa0cb04d0b04dddb99b8ccf4f66bc8dfd23df694b398570bd8ae3a50fb" +checksum = "4ed9969cad8051328011596bf549629f1b800cf1731e7964b1eef8dfc480d2c2" dependencies = [ "hashbrown 0.13.2", "memchr", @@ -5236,7 +5236,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.7.6", + "toml 0.7.8", "tracing", "tui", "vergen", @@ -5358,7 +5358,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "toml 0.7.6", + "toml 0.7.8", ] [[package]] @@ -5650,7 +5650,7 @@ dependencies = [ "quote 1.0.33", "regex", "serial_test 0.10.0", - "syn 2.0.29", + "syn 2.0.31", "trybuild", ] @@ -5808,7 +5808,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", - "toml 0.7.6", + "toml 0.7.8", "tracing", "triehash", "url", @@ -5920,7 +5920,7 @@ version = "0.1.0-alpha.8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6421,14 +6421,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.11" +version = "0.38.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" +checksum = "bdf14a7a466ce88b5eac3da815b53aefc208ce7e74d1c263aabb04d88c4abeb1" dependencies = [ "bitflags 2.4.0", "errno 0.3.3", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.7", "windows-sys 0.48.0", ] @@ -6719,14 +6719,14 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", @@ -6779,7 +6779,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6829,7 +6829,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -6908,9 +6908,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook" @@ -7170,7 +7170,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "rustversion", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -7213,9 +7213,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.3.0" +version = "12.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167a4ffd7c35c143fd1030aa3c2caf76ba42220bd5a6b5f4781896434723b8c3" +checksum = "9e0e9bc48b3852f36a84f8d0da275d50cb3c2b88b59b9ec35fdd8b7fa239e37d" dependencies = [ "debugid", "memmap2", @@ -7225,9 +7225,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.3.0" +version = "12.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e378c50e80686c1c5c205674e1f86a2858bec3d2a7dfdd690331a8a19330f293" +checksum = "691e53bdc0702aba3a5abc2cffff89346fcbd4050748883c7e2f714b33a69045" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7258,9 +7258,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -7287,7 +7287,7 @@ checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", "unicode-xid 0.2.4", ] @@ -7306,7 +7306,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.11", + "rustix 0.38.12", "windows-sys 0.48.0", ] @@ -7363,7 +7363,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "subprocess", - "syn 2.0.29", + "syn 2.0.31", "test-fuzz-internal", "toolchain_find", ] @@ -7405,7 +7405,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -7528,7 +7528,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -7592,9 +7592,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", @@ -7613,9 +7613,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.0.0", "serde", @@ -7732,7 +7732,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -8138,9 +8138,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -8188,7 +8188,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-shared", ] @@ -8222,7 +8222,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8251,13 +8251,14 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix 0.38.12", ] [[package]] @@ -8524,9 +8525,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.16" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" +checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" [[package]] name = "xmltree" @@ -8605,7 +8606,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] From fc9f1168bc904d7037d9f089df4b8a7cd0e458e9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 10 Sep 2023 11:15:25 +0200 Subject: [PATCH 648/722] feat: add discovery port function (#4543) --- crates/net/discv4/src/lib.rs | 8 +++++++- crates/net/network/src/config.rs | 35 ++++++++++++++++---------------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index fa7077bc3122..36dde7c6e96a 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -53,7 +53,7 @@ use std::{ cell::RefCell, collections::{btree_map, hash_map::Entry, BTreeMap, HashMap, VecDeque}, io, - net::{IpAddr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}, pin::Pin, rc::Rc, sync::Arc, @@ -96,6 +96,12 @@ pub use reth_net_nat::{external_ip, NatResolver}; /// Note: the default TCP port is the same. pub const DEFAULT_DISCOVERY_PORT: u16 = 30303; +/// The default address for discv4 via UDP: "0.0.0.0:30303" +/// +/// Note: The default TCP address is the same. +pub const DEFAULT_DISCOVERY_ADDRESS: SocketAddr = + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_DISCOVERY_PORT)); + /// The maximum size of any packet is 1280 bytes. const MAX_PACKET_SIZE: usize = 1280; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 0b093e8f1250..c1492f8ae6b0 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -7,7 +7,7 @@ use crate::{ session::SessionsConfig, NetworkHandle, NetworkManager, }; -use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT}; +use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_dns_discovery::DnsDiscoveryConfig; use reth_ecies::util::pk2id; use reth_eth_wire::{HelloMessage, Status}; @@ -17,11 +17,7 @@ use reth_primitives::{ use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; -use std::{ - collections::HashSet, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - sync::Arc, -}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; // re-export for convenience pub use secp256k1::SecretKey; @@ -244,14 +240,15 @@ impl NetworkConfigBuilder { /// This is a convenience function for both [NetworkConfigBuilder::listener_addr] and /// [NetworkConfigBuilder::discovery_addr]. /// - /// By default, both are on the same port: [DEFAULT_DISCOVERY_PORT] + /// By default, both are on the same port: + /// [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) pub fn set_addrs(self, addr: SocketAddr) -> Self { self.listener_addr(addr).discovery_addr(addr) } /// Sets the socket address the network will listen on. /// - /// By default, this is [Ipv4Addr::UNSPECIFIED] on [DEFAULT_DISCOVERY_PORT] + /// By default, this is [DEFAULT_DISCOVERY_ADDRESS] pub fn listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = Some(listener_addr); self @@ -259,11 +256,9 @@ impl NetworkConfigBuilder { /// Sets the port of the address the network will listen on. /// - /// By default, this is [DEFAULT_DISCOVERY_PORT] + /// By default, this is [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) pub fn listener_port(mut self, port: u16) -> Self { - self.listener_addr - .get_or_insert(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_DISCOVERY_PORT).into()) - .set_port(port); + self.listener_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port); self } @@ -273,6 +268,14 @@ impl NetworkConfigBuilder { self } + /// Sets the port of the address the discovery network will listen on. + /// + /// By default, this is [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) + pub fn discovery_port(mut self, port: u16) -> Self { + self.discovery_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port); + self + } + /// Sets the discv4 config to use. pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self { self.discovery_v4_builder = Some(builder); @@ -369,9 +372,7 @@ impl NetworkConfigBuilder { head, } = self; - let listener_addr = listener_addr.unwrap_or_else(|| { - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_DISCOVERY_PORT)) - }); + let listener_addr = listener_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS); let mut hello_message = hello_message.unwrap_or_else(|| HelloMessage::builder(peer_id).build()); @@ -408,9 +409,7 @@ impl NetworkConfigBuilder { boot_nodes, dns_discovery_config, discovery_v4_config: discovery_v4_builder.map(|builder| builder.build()), - discovery_addr: discovery_addr.unwrap_or_else(|| { - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_DISCOVERY_PORT)) - }), + discovery_addr: discovery_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS), listener_addr, peers_config: peers_config.unwrap_or_default(), sessions_config: sessions_config.unwrap_or_default(), From 8cb92e35f113688c0dc922b26ae24c9be6837ca4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 13:17:39 +0200 Subject: [PATCH 649/722] feat: add metrics for inflight get pooled tx requests (#4547) --- crates/net/network/src/metrics.rs | 2 ++ crates/net/network/src/transactions.rs | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 7245624ca17b..a25c84e88fb2 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -67,6 +67,8 @@ pub struct TransactionsManagerMetrics { pub(crate) messages_with_already_seen_transactions: Counter, /// Number of transactions about to be imported into the pool. pub(crate) pending_pool_imports: Gauge, + /// Currently active outgoing GetPooledTransactions requests. + pub(crate) inflight_transaction_requests: Gauge, /// How often we failed to send a request to the peer because the channel was full. pub(crate) egress_peer_channel_full: Counter, } diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 17318aa67d62..130fc65aa918 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -174,10 +174,16 @@ where TransactionsHandle { manager_tx: self.command_tx.clone() } } + #[inline] fn update_import_metrics(&self) { self.metrics.pending_pool_imports.set(self.pool_imports.len() as f64); } + #[inline] + fn update_request_metrics(&self) { + self.metrics.inflight_transaction_requests.set(self.inflight_requests.len() as f64); + } + /// Request handler for an incoming request for transactions fn on_get_pooled_transactions( &mut self, @@ -591,6 +597,8 @@ where this.on_network_tx_event(event); } + this.update_request_metrics(); + // Advance all requests. while let Poll::Ready(Some(GetPooledTxResponse { peer_id, result })) = this.inflight_requests.poll_next_unpin(cx) @@ -609,6 +617,7 @@ where } } + this.update_request_metrics(); this.update_import_metrics(); // Advance all imports From cc576bc8690a3e16e6e5bf1cbbbfdd029e85e3d4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 13:17:51 +0200 Subject: [PATCH 650/722] test: enable geth tests again (#4544) --- crates/net/network/tests/it/connect.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index b6156ea71884..b7b5e72792b6 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -310,7 +310,7 @@ async fn test_connect_to_trusted_peer() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] -#[ignore] // TODO: Re-enable once we figure out why this test is flakey +#[cfg_attr(not(feature = "geth-tests"), ignore)] async fn test_incoming_node_id_blacklist() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { @@ -363,8 +363,7 @@ async fn test_incoming_node_id_blacklist() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] -// #[cfg_attr(not(feature = "geth-tests"), ignore)] -#[ignore] // TODO: Re-enable once we figure out why this test is flakey +#[cfg_attr(not(feature = "geth-tests"), ignore)] async fn test_incoming_connect_with_single_geth() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { @@ -409,7 +408,6 @@ async fn test_incoming_connect_with_single_geth() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] #[cfg_attr(not(feature = "geth-tests"), ignore)] -#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_outgoing_connect_with_single_geth() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { @@ -456,7 +454,6 @@ async fn test_outgoing_connect_with_single_geth() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] #[cfg_attr(not(feature = "geth-tests"), ignore)] -#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_geth_disconnect() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { @@ -566,13 +563,12 @@ async fn test_shutdown() { #[tokio::test(flavor = "multi_thread")] async fn test_disconnect_incoming_when_exceeded_incoming_connections() { let net = Testnet::create(1).await; - let (reth_p2p, reth_disc) = unused_tcp_udp(); let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(0); let config = NetworkConfigBuilder::new(secret_key) - .listener_addr(reth_p2p) - .discovery_addr(reth_disc) + .listener_port(0) + .discovery_port(0) .peer_config(peers_config) .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); From 692a081d8d207ccfff9bf0226c80bb1c88ab2dd3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 11 Sep 2023 12:57:46 +0100 Subject: [PATCH 651/722] chore(book): fix imposter USDC address (#4549) --- book/run/config.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/run/config.md b/book/run/config.md index e432055f9a42..4a25c3decc1b 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -377,9 +377,9 @@ We can also prune receipts more granular, using the logs filtering: # by the specified addresses, discarding all others. This setting is overridden by `receipts`. [prune.parts.receipts_log_filter] # Prune all receipts, leaving only those which: -# - Contain logs from address `0x7ea2be2df7ba6e54b1a9c70676f668455e329d29`, starting from the block 17000000 +# - Contain logs from address `0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48`, starting from the block 17000000 # - Contain logs from address `0xdac17f958d2ee523a2206206994597c13d831ec7` in the last 1001 blocks -"0x7ea2be2df7ba6e54b1a9c70676f668455e329d29" = { before = 17000000 } +"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48" = { before = 17000000 } "0xdac17f958d2ee523a2206206994597c13d831ec7" = { distance = 1000 } ``` From 7dd14a7eba7d0c62d3e5a357e25ab91f48dae710 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 11 Sep 2023 16:25:35 +0200 Subject: [PATCH 652/722] chore: pin git dependencies to specific revs (#4552) --- Cargo.lock | 80 ++++++++++++++++---------- Cargo.toml | 37 ++++++++---- crates/net/discv4/Cargo.toml | 2 +- crates/net/nat/Cargo.toml | 2 +- crates/revm/revm-inspectors/Cargo.toml | 6 +- deny.toml | 9 ++- 6 files changed, 87 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b40f61ea37e3..529f0e040510 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,12 +141,6 @@ dependencies = [ "alloc-no-stdlib", ] -[[package]] -name = "allocator-api2" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" - [[package]] name = "alloy-rlp" version = "0.3.2" @@ -726,7 +720,8 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c261398db3b2f9ba05f76872721d6a8a142d10ae6c0a58d3ddc5c2853cc02d" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -739,7 +734,8 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31e7a37b855625f1615a07414fb341361475950e57bb9396afe1389bbc2ccdc" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -758,7 +754,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "num_enum", + "num_enum 0.6.1", "once_cell", "pollster", "rand 0.8.5", @@ -777,18 +773,19 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2346f8ac7b736236de0608a7c75a9a32bac0a1137b98574cfebde6343e4ff6b7" dependencies = [ "boa_macros", "boa_profiler", - "hashbrown 0.14.0", "thin-vec", ] [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07652c6f1ca97bbe16bd2ab1ebc39313ac81568d2671aeb24a4a45964d2291a4" dependencies = [ "icu_collections", "icu_normalizer", @@ -801,7 +798,8 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b968bd467737cace9723a5d01a3d32fe95471526d36db9654a1779c4b766fb6" dependencies = [ "boa_gc", "boa_macros", @@ -816,7 +814,8 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3de43b7806061fccfba716fef51eea462d636de36803b62d10f902608ffef4" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -827,7 +826,8 @@ dependencies = [ [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ff1108bda6d573049191b6452490844c5ba4b12f7bdcc512a33e5c3f5037196" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -847,7 +847,8 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#306709177bf742425a61d0205d6f25e7811f6ebf" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a24f6aa1ecc56e797506437b1f9a172e4a5f207894e74196c682cb656d2c2d60" [[package]] name = "boyer-moore-magiclen" @@ -936,7 +937,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/ethereum/c-kzg-4844#f5f6f863d475847876a2bd5ee252058d37c3a15d" +source = "git+https://github.com/ethereum/c-kzg-4844?rev=f5f6f863d475847876a2bd5ee252058d37c3a15d#f5f6f863d475847876a2bd5ee252058d37c3a15d" dependencies = [ "bindgen 0.66.1", "blst", @@ -1841,7 +1842,7 @@ dependencies = [ [[package]] name = "discv5" version = "0.3.1" -source = "git+https://github.com/sigp/discv5#d2e30e04ee62418b9e57278cee907c02b99d5bd1" +source = "git+https://github.com/sigp/discv5?rev=d2e30e04ee62418b9e57278cee907c02b99d5bd1#d2e30e04ee62418b9e57278cee907c02b99d5bd1" dependencies = [ "aes 0.7.5", "aes-gcm", @@ -2272,7 +2273,7 @@ dependencies = [ "ethabi", "generic-array", "k256", - "num_enum", + "num_enum 0.7.0", "once_cell", "open-fastrlp", "rand 0.8.5", @@ -2860,10 +2861,6 @@ name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" -dependencies = [ - "ahash 0.8.3", - "allocator-api2", -] [[package]] name = "hashers" @@ -3241,7 +3238,7 @@ checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" [[package]] name = "igd" version = "0.12.0" -source = "git+https://github.com/stevefan1999-personal/rust-igd#c2d1f83eb1612a462962453cb0703bc93258b173" +source = "git+https://github.com/stevefan1999-personal/rust-igd?rev=c2d1f83eb1612a462962453cb0703bc93258b173#c2d1f83eb1612a462962453cb0703bc93258b173" dependencies = [ "attohttpc", "bytes", @@ -4210,13 +4207,34 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +dependencies = [ + "num_enum_derive 0.6.1", +] + [[package]] name = "num_enum" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.7.0", +] + +[[package]] +name = "num_enum_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +dependencies = [ + "proc-macro-crate", + "proc-macro2 1.0.66", + "quote 1.0.33", + "syn 2.0.31", ] [[package]] @@ -5117,9 +5135,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regress" -version = "0.7.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed9969cad8051328011596bf549629f1b800cf1731e7964b1eef8dfc480d2c2" +checksum = "82a9ecfa0cb04d0b04dddb99b8ccf4f66bc8dfd23df694b398570bd8ae3a50fb" dependencies = [ "hashbrown 0.13.2", "memchr", @@ -6190,7 +6208,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "auto_impl", "revm-interpreter", @@ -6200,7 +6218,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "derive_more", "enumn", @@ -6211,7 +6229,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "k256", "num", @@ -6227,7 +6245,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm/?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" dependencies = [ "arbitrary", "auto_impl", diff --git a/Cargo.toml b/Cargo.toml index efacf44b4f6d..20188dddb388 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,9 +63,7 @@ rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" -exclude = [ - ".github/", -] +exclude = [".github/"] # Like release, but with full debug symbols. Useful for e.g. `perf`. [profile.debug-fast] @@ -78,14 +76,8 @@ lto = "fat" codegen-units = 1 incremental = false -[patch.crates-io] -revm = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } -revm-primitives = { git = "https://github.com/bluealloy/revm/", branch = "release/v25" } - [workspace.dependencies] ## reth -revm = { version = "3" } -revm-primitives = "1.1" reth = { path = "./bin/reth" } reth-primitives = { path = "./crates/primitives" } reth-interfaces = { path = "./crates/interfaces" } @@ -103,8 +95,12 @@ reth-transaction-pool = { path = "./crates/transaction-pool" } reth-tasks = { path = "./crates/tasks" } reth-network = { path = "./crates/net/network" } reth-network-api = { path = "./crates/net/network-api" } -reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat"} +reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat" } +revm = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-precompile = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } ## eth ethers-core = { version = "2.0", default-features = false } @@ -112,6 +108,13 @@ ethers-providers = { version = "2.0", default-features = false } ethers-signers = { version = "2.0", default-features = false } ethers-middleware = { version = "2.0", default-features = false } +discv5 = { git = "https://github.com/sigp/discv5", rev = "d2e30e04ee62418b9e57278cee907c02b99d5bd1" } +igd = { git = "https://github.com/stevefan1999-personal/rust-igd", rev = "c2d1f83eb1612a462962453cb0703bc93258b173" } + +## js +boa_engine = "0.17" +boa_gc = "0.17" + ## misc aquamarine = "0.3" bytes = "1.4" @@ -149,10 +152,14 @@ jsonrpsee-core = { version = "0.20" } jsonrpsee-types = { version = "0.20" } ## crypto -secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { version = "0.27.0", default-features = false, features = [ + "global-context", + "rand-std", + "recovery", +] } enr = { version = "0.9", default-features = false, features = ["k256"] } # for eip-4844 -c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } +c-kzg = { git = "https://github.com/ethereum/c-kzg-4844", rev = "f5f6f863d475847876a2bd5ee252058d37c3a15d" } ## config confy = "0.5" @@ -162,3 +169,9 @@ toml = "0.7" proptest = "1.0" arbitrary = "1.1" assert_matches = "1.5.0" + +[patch.crates-io] +revm = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-precompile = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 310da6c8a61b..2f3c2293e858 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -19,7 +19,7 @@ reth-net-common = { path = "../common" } reth-net-nat = { path = "../nat" } # ethereum -discv5 = { git = "https://github.com/sigp/discv5" } +discv5.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/nat/Cargo.toml b/crates/net/nat/Cargo.toml index ee127ea40277..3ee7893c80ca 100644 --- a/crates/net/nat/Cargo.toml +++ b/crates/net/nat/Cargo.toml @@ -15,7 +15,7 @@ Helpers for working around NAT # nat public-ip = "0.2" ## fork of rust-igd with ipv6 support: https://github.com/sbstp/rust-igd/issues/47 -igd = { git = "https://github.com/stevefan1999-personal/rust-igd", features = ["aio", "tokio1"] } +igd = { workspace = true, features = ["aio", "tokio1"] } # misc tracing.workspace = true diff --git a/crates/revm/revm-inspectors/Cargo.toml b/crates/revm/revm-inspectors/Cargo.toml index 3627a826545b..b5e18ffcbc11 100644 --- a/crates/revm/revm-inspectors/Cargo.toml +++ b/crates/revm/revm-inspectors/Cargo.toml @@ -22,10 +22,10 @@ thiserror = { workspace = true, optional = true } serde_json = { workspace = true, optional = true } # js-tracing-inspector -boa_engine = { git = "https://github.com/boa-dev/boa", optional = true } -boa_gc = { git = "https://github.com/boa-dev/boa", optional = true } +boa_engine = { workspace = true, optional = true } +boa_gc = { workspace = true, optional = true } tokio = { version = "1", features = ["sync"], optional = true } [features] default = ["js-tracer"] -js-tracer = ["boa_engine", "boa_gc", "tokio","thiserror", "serde_json"] +js-tracer = ["boa_engine", "boa_gc", "tokio", "thiserror", "serde_json"] diff --git a/deny.toml b/deny.toml index c76bbe889cc0..73c18ff3eda0 100644 --- a/deny.toml +++ b/deny.toml @@ -93,4 +93,11 @@ license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] unknown-registry = "warn" # Lint level for what to happen when a crate from a git repository that is not # in the allow list is encountered -unknown-git = "allow" +unknown-git = "deny" +allow-git = [ + "https://github.com/bluealloy/revm", + "https://github.com/boa-dev/boa", + "https://github.com/ethereum/c-kzg-4844", + "https://github.com/sigp/discv5", + "https://github.com/stevefan1999-personal/rust-igd", +] From 28f511804871de98e046a3d98e2733ea8bc2eba2 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 11 Sep 2023 16:53:09 +0200 Subject: [PATCH 653/722] chore: update dependencies with breaking changes (#4555) --- Cargo.lock | 304 +++++++++--------- Cargo.toml | 6 +- bin/reth/Cargo.toml | 2 +- crates/blockchain-tree/Cargo.toml | 2 +- crates/interfaces/Cargo.toml | 10 +- crates/metrics/metrics-derive/Cargo.toml | 2 +- crates/net/dns/Cargo.toml | 4 +- crates/net/dns/src/resolver.rs | 8 +- crates/net/ecies/Cargo.toml | 2 +- crates/net/eth-wire/Cargo.toml | 16 +- crates/net/nat/Cargo.toml | 2 +- crates/net/network/Cargo.toml | 2 +- crates/primitives/Cargo.toml | 14 +- crates/revm/revm-inspectors/Cargo.toml | 2 +- crates/rlp/Cargo.toml | 4 +- crates/storage/codecs/Cargo.toml | 4 +- crates/storage/db/Cargo.toml | 6 +- crates/storage/libmdbx-rs/Cargo.toml | 2 +- crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml | 2 +- 19 files changed, 196 insertions(+), 198 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 529f0e040510..3b01df05b5dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,6 +141,12 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "alloy-rlp" version = "0.3.2" @@ -189,9 +195,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" [[package]] name = "anstyle-parse" @@ -428,7 +434,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -531,9 +537,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -596,15 +602,16 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.65.1" +version = "0.66.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.0", "cexpr", "clang-sys", "lazy_static", "lazycell", + "log", "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", @@ -612,30 +619,28 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.31", + "syn 2.0.32", + "which", ] [[package]] name = "bindgen" -version = "0.66.1" +version = "0.68.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" +checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" dependencies = [ "bitflags 2.4.0", "cexpr", "clang-sys", "lazy_static", "lazycell", - "log", "peeking_take_while", - "prettyplease", "proc-macro2 1.0.66", "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.31", - "which", + "syn 2.0.32", ] [[package]] @@ -819,7 +824,7 @@ checksum = "ca3de43b7806061fccfba716fef51eea462d636de36803b62d10f902608ffef4" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", "synstructure 0.13.0", ] @@ -852,9 +857,9 @@ checksum = "a24f6aa1ecc56e797506437b1f9a172e4a5f207894e74196c682cb656d2c2d60" [[package]] name = "boyer-moore-magiclen" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c77eb6b3a37f71fcd40e49b56c028ea8795c0e550afd8021e3e6a2369653035" +checksum = "116d76fee857b03ecdd95d5f9555e46aa0cd34e5bb348a520e9445d151182a7e" dependencies = [ "debug-helper", ] @@ -1132,7 +1137,7 @@ dependencies = [ "heck", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1156,7 +1161,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "serde", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1197,7 +1202,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bech32", "bs58", "digest 0.10.7", @@ -1468,6 +1473,22 @@ dependencies = [ "winapi", ] +[[package]] +name = "crossterm" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" +dependencies = [ + "bitflags 2.4.0", + "crossterm_winapi", + "libc", + "mio", + "parking_lot 0.12.1", + "signal-hook", + "signal-hook-mio", + "winapi", +] + [[package]] name = "crossterm_winapi" version = "0.9.1" @@ -1548,7 +1569,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1596,7 +1617,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "strsim 0.10.0", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1618,7 +1639,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1703,7 +1724,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1877,7 +1898,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2030,7 +2051,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "ed25519-dalek", "hex", @@ -2059,14 +2080,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 1.0.109", + "syn 2.0.32", ] [[package]] @@ -2079,7 +2100,7 @@ dependencies = [ "num-traits", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2090,7 +2111,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2237,7 +2258,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.31", + "syn 2.0.32", "toml 0.7.8", "walkdir", ] @@ -2255,7 +2276,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "serde_json", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2281,7 +2302,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.31", + "syn 2.0.32", "tempfile", "thiserror", "tiny-keccak", @@ -2338,7 +2359,7 @@ checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.3", + "base64 0.21.4", "bytes", "const-hex", "enr", @@ -2624,7 +2645,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2861,6 +2882,10 @@ name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", +] [[package]] name = "hashers" @@ -2925,12 +2950,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hex-literal" version = "0.4.1" @@ -3017,9 +3036,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human_bytes" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e2b089f28ad15597b48d8c0a8fe94eeb1c1cb26ca99b6f66ac9582ae10c5e6" +checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" [[package]] name = "humantime" @@ -3334,6 +3353,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -3419,7 +3439,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.12", + "rustix 0.38.13", "windows-sys 0.48.0", ] @@ -3662,7 +3682,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "pem", "ring", "serde", @@ -3810,11 +3830,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" +checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -3903,7 +3923,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "hyper", "indexmap 1.9.3", "ipnet", @@ -3923,7 +3943,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4234,7 +4254,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4246,7 +4266,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4343,9 +4363,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "page_size" -version = "0.4.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" dependencies = [ "libc", "winapi", @@ -4353,9 +4373,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "f88eaac72ead1b9bd4ce747d577dbd2ad31fb0a56a9a20c611bf27bd1b97fbed" dependencies = [ "arrayvec", "bitvec", @@ -4368,9 +4388,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "33bdcd446e9400b6ad9fc85b4aea68846c258b07c3efb994679ae82707b133f0" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", @@ -4537,7 +4557,7 @@ dependencies = [ "phf_shared", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4566,7 +4586,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4750,7 +4770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -4863,6 +4883,17 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "proptest-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.33", + "syn 1.0.109", +] + [[package]] name = "public-ip" version = "0.2.2" @@ -5149,7 +5180,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -5198,7 +5229,7 @@ dependencies = [ "comfy-table", "confy", "const-str", - "crossterm 0.25.0", + "crossterm 0.27.0", "dirs-next", "eyre", "fdlimit", @@ -5333,7 +5364,7 @@ dependencies = [ "aquamarine", "assert_matches", "linked_hash_set", - "lru 0.10.1", + "lru 0.11.1", "metrics", "parking_lot 0.12.1", "reth-db", @@ -5355,7 +5386,7 @@ dependencies = [ "codecs-derive", "modular-bitfield", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "revm-primitives", "serde", "test-fuzz", @@ -5413,7 +5444,7 @@ dependencies = [ "postcard", "pprof", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "rand 0.8.5", "reth-codecs", "reth-db", @@ -5519,7 +5550,7 @@ dependencies = [ "educe", "futures", "generic-array", - "hex-literal 0.3.4", + "hex-literal", "hmac", "pin-project", "rand 0.8.5", @@ -5547,11 +5578,11 @@ dependencies = [ "ethers-core", "futures", "hex", - "hex-literal 0.3.4", + "hex-literal", "metrics", "pin-project", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "rand 0.8.5", "reth-codecs", "reth-discv4", @@ -5581,7 +5612,7 @@ dependencies = [ "auto_impl", "clap", "futures", - "hex-literal 0.3.4", + "hex-literal", "modular-bitfield", "parity-scale-codec", "parking_lot 0.12.1", @@ -5627,7 +5658,7 @@ dependencies = [ "byteorder", "criterion", "derive_more", - "indexmap 1.9.3", + "indexmap 2.0.0", "libc", "lifetimed-bytes", "parking_lot 0.12.1", @@ -5643,7 +5674,7 @@ dependencies = [ name = "reth-mdbx-sys" version = "0.1.0-alpha.8" dependencies = [ - "bindgen 0.65.1", + "bindgen 0.68.1", "cc", "libc", ] @@ -5667,8 +5698,8 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "regex", - "serial_test 0.10.0", - "syn 2.0.31", + "serial_test", + "syn 2.0.32", "trybuild", ] @@ -5737,7 +5768,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serial_test 2.0.0", + "serial_test", "tempfile", "thiserror", "tokio", @@ -5797,7 +5828,7 @@ dependencies = [ "fixed-hash", "hash-db", "hex", - "hex-literal 0.3.4", + "hex-literal", "impl-serde", "modular-bitfield", "once_cell", @@ -5805,7 +5836,7 @@ dependencies = [ "plain_hasher", "pprof", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "rand 0.8.5", "rayon", "reth-codecs", @@ -5895,7 +5926,7 @@ version = "0.1.0-alpha.8" dependencies = [ "boa_engine", "boa_gc", - "hashbrown 0.13.2", + "hashbrown 0.14.0", "reth-primitives", "reth-rpc-types", "revm", @@ -5924,7 +5955,7 @@ dependencies = [ "criterion", "ethereum-types", "ethnum", - "hex-literal 0.4.1", + "hex-literal", "pprof", "reth-rlp", "reth-rlp-derive", @@ -5938,7 +5969,7 @@ version = "0.1.0-alpha.8" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6256,10 +6287,10 @@ dependencies = [ "fixed-hash", "hashbrown 0.13.2", "hex", - "hex-literal 0.4.1", + "hex-literal", "primitive-types", "proptest", - "proptest-derive", + "proptest-derive 0.3.0", "rlp", "ruint", "serde", @@ -6439,9 +6470,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.12" +version = "0.38.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf14a7a466ce88b5eac3da815b53aefc208ce7e74d1c263aabb04d88c4abeb1" +checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" dependencies = [ "bitflags 2.4.0", "errno 0.3.3", @@ -6480,7 +6511,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", ] [[package]] @@ -6737,7 +6768,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -6774,14 +6805,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "base64 0.13.1", + "base64 0.21.4", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.0.0", "serde", "serde_json", "serde_with_macros", @@ -6790,28 +6822,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", -] - -[[package]] -name = "serial_test" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2" -dependencies = [ - "dashmap", - "futures", - "lazy_static", - "log", - "parking_lot 0.12.1", - "serial_test_derive 0.10.0", + "syn 2.0.32", ] [[package]] @@ -6825,18 +6843,7 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.12.1", - "serial_test_derive 2.0.0", -] - -[[package]] -name = "serial_test_derive" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 1.0.109", + "serial_test_derive", ] [[package]] @@ -6847,7 +6854,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7188,7 +7195,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "rustversion", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7222,9 +7229,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "sucds" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c1c7f814471a34d2355f9eb25ef3517ec491ac243612b1c83137739998c5444" +checksum = "64accd20141dfbef67ad83c51d588146cff7810616e1bda35a975be369059533" dependencies = [ "anyhow", ] @@ -7276,9 +7283,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.31" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", @@ -7305,7 +7312,7 @@ checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", "unicode-xid 0.2.4", ] @@ -7324,7 +7331,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.12", + "rustix 0.38.13", "windows-sys 0.48.0", ] @@ -7381,7 +7388,7 @@ dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", "subprocess", - "syn 2.0.31", + "syn 2.0.32", "test-fuzz-internal", "toolchain_find", ] @@ -7423,7 +7430,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7546,7 +7553,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7683,7 +7690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "base64 0.21.3", + "base64 0.21.4", "bitflags 2.4.0", "bytes", "futures-core", @@ -7750,7 +7757,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -7872,20 +7879,20 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "0dc775440033cb114085f6f2437682b194fa7546466024b1037e82a48a052a69" dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner 0.5.1", + "enum-as-inner 0.6.0", "futures-channel", "futures-io", "futures-util", - "idna 0.2.3", + "idna 0.4.0", "ipnet", - "lazy_static", + "once_cell", "rand 0.8.5", "smallvec 1.11.0", "thiserror", @@ -7897,22 +7904,23 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "2dff7aed33ef3e8bf2c9966fccdfed93f93d46f432282ea875cd66faabc6ef2f" dependencies = [ "cfg-if", "futures-util", "ipconfig", - "lazy_static", "lru-cache", + "once_cell", "parking_lot 0.12.1", + "rand 0.8.5", "resolv-conf", "smallvec 1.11.0", "thiserror", "tokio", "tracing", - "trust-dns-proto 0.22.0", + "trust-dns-proto 0.23.0", ] [[package]] @@ -7923,9 +7931,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.83" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" +checksum = "a5c89fd17b7536f2cf66c97cff6e811e89e728ca0ed13caeed610c779360d8b4" dependencies = [ "basic-toml", "glob", @@ -8206,7 +8214,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -8240,7 +8248,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8276,7 +8284,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.12", + "rustix 0.38.13", ] [[package]] @@ -8624,7 +8632,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.33", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 20188dddb388..343a5dbaf287 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,6 +130,7 @@ rayon = "1.7" itertools = "0.11" parking_lot = "0.12" metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +hex-literal = "0.4" ### proc-macros proc-macro2 = "1.0" @@ -166,10 +167,13 @@ confy = "0.5" toml = "0.7" ### misc-testing -proptest = "1.0" arbitrary = "1.1" assert_matches = "1.5.0" +proptest = "1.0" +proptest-derive = "0.4" +serial_test = "2" + [patch.crates-io] revm = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 96f60f54a39c..28b0d8118e34 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -77,7 +77,7 @@ proptest.workspace = true # tui comfy-table = "7.0" -crossterm = "0.25.0" +crossterm = "0.27.0" tui = "0.19.0" human_bytes = "0.4.1" diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 81a8b81d2e90..211f12b2dc6c 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -23,7 +23,7 @@ reth-stages = { path = "../stages" } # common parking_lot.workspace = true -lru = "0.10" +lru = "0.11" tracing.workspace = true # metrics diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 26bec4881da6..1f4786f75d74 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -45,13 +45,9 @@ reth-db = { path = "../storage/db", features = ["test-utils"] } tokio = { workspace = true, features = ["full"] } tokio-stream = { workspace = true, features = ["sync"] } arbitrary = { workspace = true, features = ["derive"] } -hex-literal = "0.3" -secp256k1 = { workspace = true, features = [ - "alloc", - "recovery", - "rand", -] } +hex-literal.workspace = true +secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] test-utils = ["tokio-stream/sync", "secp256k1", "rand/std_rng"] -cli = ["clap"] \ No newline at end of file +cli = ["clap"] diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml index a5e85a8422b3..8af2af4af9b7 100644 --- a/crates/metrics/metrics-derive/Cargo.toml +++ b/crates/metrics/metrics-derive/Cargo.toml @@ -19,5 +19,5 @@ once_cell = "1.17.0" [dev-dependencies] metrics.workspace = true +serial_test.workspace = true trybuild = "1.0" -serial_test = "0.10" diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index b4ef53c89473..012fe4548de0 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -23,7 +23,7 @@ tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true # trust-dns -trust-dns-resolver = "0.22" +trust-dns-resolver = "0.23" # misc data-encoding = "2" @@ -34,7 +34,7 @@ thiserror.workspace = true tracing.workspace = true parking_lot.workspace = true serde = { workspace = true, optional = true } -serde_with = { version = "2.1.0", optional = true } +serde_with = { version = "3.3.0", optional = true } [dev-dependencies] tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] } diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 3b9e5b0e5729..907a3b054a56 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -5,7 +5,7 @@ use parking_lot::RwLock; use std::collections::HashMap; use tracing::trace; pub use trust_dns_resolver::{error::ResolveError, TokioAsyncResolver}; -use trust_dns_resolver::{proto::DnsHandle, AsyncResolver, ConnectionProvider}; +use trust_dns_resolver::{name_server::ConnectionProvider, AsyncResolver}; /// A type that can lookup DNS entries #[async_trait] @@ -15,11 +15,7 @@ pub trait Resolver: Send + Sync + Unpin + 'static { } #[async_trait] -impl Resolver for AsyncResolver -where - C: DnsHandle, - P: ConnectionProvider, -{ +impl Resolver for AsyncResolver

{ async fn lookup_txt(&self, query: &str) -> Option { // See: [AsyncResolver::txt_lookup] // > *hint* queries that end with a '.' are fully qualified names and are cheaper lookups diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index 9a83e64bc7e3..7b4253d0621e 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -40,4 +40,4 @@ block-padding = "0.3.2" cipher = { version = "0.4.3", features = ["block-padding"] } [dev-dependencies] -hex-literal = "0.3.4" +hex-literal.workspace = true diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index e6d865246732..bda62bcfdd51 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -17,14 +17,8 @@ serde = { workspace = true, optional = true } reth-codecs = { path = "../../storage/codecs" } reth-primitives.workspace = true reth-ecies = { path = "../ecies" } -reth-rlp = { workspace = true, features = [ - "alloc", - "derive", - "std", - "ethereum-types", - "smol_str", -] } -reth-discv4 = {path = "../discv4" } +reth-rlp = { workspace = true, features = ["alloc", "derive", "std", "ethereum-types", "smol_str"] } +reth-discv4 = { path = "../discv4" } # metrics reth-metrics.workspace = true @@ -46,7 +40,7 @@ async-trait.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { version = "0.3", optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } @@ -55,14 +49,14 @@ ethers-core = { workspace = true, default-features = false } test-fuzz = "4" tokio-util = { workspace = true, features = ["io", "codec"] } -hex-literal = "0.3" +hex-literal.workspace = true hex = "0.4" rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive = "0.3" +proptest-derive.workspace = true [features] default = ["serde"] diff --git a/crates/net/nat/Cargo.toml b/crates/net/nat/Cargo.toml index 3ee7893c80ca..54d373908c64 100644 --- a/crates/net/nat/Cargo.toml +++ b/crates/net/nat/Cargo.toml @@ -22,7 +22,7 @@ tracing.workspace = true pin-project-lite = "0.2.9" tokio = { workspace = true, features = ["time"] } thiserror.workspace = true -serde_with = { version = "2.1.0", optional = true } +serde_with = { version = "3.3.0", optional = true } [dev-dependencies] reth-tracing = { path = "../../tracing" } diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 0f4444a2486f..cbd2ce5ec748 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -88,8 +88,8 @@ enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc hex = "0.4" +serial_test.workspace = true tempfile = "3.3" -serial_test = "2.0" [features] default = ["serde"] diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index c0601bd3dd25..a3e1551e8cb0 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -48,11 +48,11 @@ tokio-stream.workspace = true bytes.workspace = true serde.workspace = true serde_json.workspace = true -serde_with = "2.1.0" +serde_with = "3.3.0" thiserror.workspace = true -sucds = "0.5.0" +sucds = "~0.6" hex = "0.4" -hex-literal = "0.3" +hex-literal.workspace = true modular-bitfield = "0.11.2" derive_more = "0.99" url = "2.3" @@ -68,23 +68,23 @@ sha2 = "0.10.7" triehash = "0.8" # See to replace hashers to simplify libraries plain_hasher = "0.2" -hash-db = "0.15" +hash-db = "~0.15" # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { version = "0.3", optional = true } +proptest-derive = { workspace = true, optional = true } strum = { workspace = true, features = ["derive"] } [dev-dependencies] serde_json.workspace = true -hex-literal = "0.3" +hex-literal.workspace = true test-fuzz = "4" rand.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive = "0.3" +proptest-derive.workspace = true assert_matches.workspace = true toml = "0.7.4" diff --git a/crates/revm/revm-inspectors/Cargo.toml b/crates/revm/revm-inspectors/Cargo.toml index b5e18ffcbc11..6994bd8af226 100644 --- a/crates/revm/revm-inspectors/Cargo.toml +++ b/crates/revm/revm-inspectors/Cargo.toml @@ -15,7 +15,7 @@ reth-rpc-types.workspace = true revm.workspace = true # remove from reth and reexport from revm -hashbrown = "0.13" +hashbrown = "0.14" serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true, optional = true } diff --git a/crates/rlp/Cargo.toml b/crates/rlp/Cargo.toml index 98c25c7e31b6..8a92f6388d6a 100644 --- a/crates/rlp/Cargo.toml +++ b/crates/rlp/Cargo.toml @@ -16,7 +16,7 @@ ethnum = { version = "1", default-features = false, optional = true } smol_str = { version = "0.2", default-features = false, optional = true } ethereum-types = { version = "0.14", features = ["codec"], optional = true } revm-primitives = { workspace = true, features = ["serde"] } -reth-rlp-derive = { path = "./rlp-derive", optional = true } +reth-rlp-derive = { path = "./rlp-derive", optional = true } # for eip-4844 c-kzg = { workspace = true, optional = true } @@ -29,7 +29,7 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } -hex-literal = "0.4" +hex-literal.workspace = true criterion = "0.5.0" pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 458ef3063659..80bdefe2be7a 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -23,7 +23,7 @@ revm-primitives = { workspace = true, features = ["serde"] } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { version = "0.3", optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] revm-primitives = { workspace = true, features = ["serde", "arbitrary"] } @@ -34,4 +34,4 @@ test-fuzz = "4" arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive = "0.3" +proptest-derive.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index ae4d67ca8f49..76d650b975b0 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -36,7 +36,7 @@ metrics.workspace = true # misc bytes.workspace = true -page_size = "0.4.2" +page_size = "0.6.0" thiserror.workspace = true tempfile = { version = "3.3.0", optional = true } parking_lot.workspace = true @@ -46,7 +46,7 @@ eyre = "0.6.8" # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { version = "0.3", optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] # reth libs with arbitrary @@ -70,7 +70,7 @@ async-trait.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive = "0.3" +proptest-derive.workspace = true serde_json.workspace = true diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 94c8c015781b..e83d067859e3 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -15,7 +15,7 @@ name = "reth_libmdbx" bitflags.workspace = true byteorder = "1" derive_more = "0.99" -indexmap = "1" +indexmap = "2" libc = "0.2" parking_lot.workspace = true thiserror.workspace = true diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index 51e8da1788a1..1e2e93e1a9ed 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -16,4 +16,4 @@ libc = "0.2" [build-dependencies] cc = "1.0" -bindgen = { version = "0.65", default-features = false, features = ["runtime"] } +bindgen = { version = "0.68", default-features = false, features = ["runtime"] } From 624d9d581b79d8e28da8693dd8fb04891c69eaf2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 17:00:01 +0200 Subject: [PATCH 654/722] feat: enforce replacements dont conflict (#4539) --- crates/rpc/rpc/src/eth/error.rs | 9 +++++ crates/transaction-pool/src/error.rs | 9 +++++ crates/transaction-pool/src/pool/txpool.rs | 43 ++++++++++++++++++--- crates/transaction-pool/src/validate/mod.rs | 10 +++++ 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index a2b0e3812683..6062b00100e6 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -475,6 +475,12 @@ pub enum RpcPoolError { /// Eip-4844 related error #[error(transparent)] Eip4844(#[from] Eip4844PoolTransactionError), + /// Thrown if a conflicting transaction type is already in the pool + /// + /// In other words, thrown if a transaction with the same sender that violates the exclusivity + /// constraint (blob vs normal tx) + #[error("address already reserved")] + AddressAlreadyReserved, #[error(transparent)] Other(Box), } @@ -498,6 +504,9 @@ impl From for RpcPoolError { PoolError::InvalidTransaction(_, err) => err.into(), PoolError::Other(_, err) => RpcPoolError::Other(err), PoolError::AlreadyImported(_) => RpcPoolError::AlreadyKnown, + PoolError::ExistingConflictingTransactionType(_, _, _) => { + RpcPoolError::AddressAlreadyReserved + } } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 1f0442833d41..f405f963010e 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -39,6 +39,9 @@ pub enum PoolError { /// Thrown when the transaction is considered invalid. #[error("[{0:?}] {1:?}")] InvalidTransaction(TxHash, InvalidPoolTransactionError), + /// Thrown if the mutual exclusivity constraint (blob vs normal transaction) is violated. + #[error("[{1:?}] Transaction type {2} conflicts with existing transaction for {0:?}")] + ExistingConflictingTransactionType(Address, TxHash, u8), /// Any other error that occurred while inserting/validating a transaction. e.g. IO database /// error #[error("[{0:?}] {1:?}")] @@ -58,6 +61,7 @@ impl PoolError { PoolError::DiscardedOnInsert(hash) => hash, PoolError::InvalidTransaction(hash, _) => hash, PoolError::Other(hash, _) => hash, + PoolError::ExistingConflictingTransactionType(_, hash, _) => hash, } } @@ -110,6 +114,11 @@ impl PoolError { // internal error unrelated to the transaction false } + PoolError::ExistingConflictingTransactionType(_, _, _) => { + // this is not a protocol error but an implementation error since the pool enforces + // exclusivity (blob vs normal tx) for all senders + false + } } } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index d3e84ffafb04..80e25d96360f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -430,6 +430,13 @@ impl TxPool { *transaction.hash(), InvalidPoolTransactionError::Overdraft, )), + InsertErr::TxTypeConflict { transaction } => { + Err(PoolError::ExistingConflictingTransactionType( + transaction.sender(), + *transaction.hash(), + transaction.tx_type(), + )) + } } } } @@ -1169,6 +1176,23 @@ impl AllTransactions { /// Note: For EIP-4844 blob transactions additional constraints are enforced: /// - new blob transactions must not have any nonce gaps /// - blob transactions cannot go into overdraft + /// + /// ## Transaction type Exclusivity + /// + /// The pool enforces exclusivity of eip-4844 blob vs non-blob transactions on a per sender + /// basis: + /// - If the pool already includes a blob transaction from the `transaction`'s sender, then + /// the `transaction` must also be a blob transaction + /// - If the pool already includes a non-blob transaction from the `transaction`'s sender, then + /// the `transaction` must _not_ be a blob transaction. + /// + /// In other words, the presence of blob transactions exclude non-blob transactions and vice + /// versa: + /// + /// ## Replacements + /// + /// The replacement candidate must satisfy given price bump constraints: replacement candidate + /// must not be underpriced pub(crate) fn insert_tx( &mut self, transaction: ValidPoolTransaction, @@ -1225,7 +1249,7 @@ impl AllTransactions { let mut replaced_tx = None; let pool_tx = PoolInternalTransaction { - transaction: transaction.clone(), + transaction: Arc::clone(&transaction), subpool: state.into(), state, cumulative_cost, @@ -1239,13 +1263,18 @@ impl AllTransactions { entry.insert(pool_tx); } Entry::Occupied(mut entry) => { + // Transaction with the same nonce already exists: replacement candidate + let existing_transaction = entry.get().transaction.as_ref(); + let maybe_replacement = transaction.as_ref(); + if existing_transaction.tx_type_conflicts_with(maybe_replacement) { + // blob vs non blob replacement + return Err(InsertErr::TxTypeConflict { transaction: pool_tx.transaction }) + } + // Transaction already exists // Ensure the new transaction is not underpriced - if Self::is_underpriced( - entry.get().transaction.as_ref(), - transaction.as_ref(), - &self.price_bumps, - ) { + if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) + { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, existing: *entry.get().transaction.hash(), @@ -1418,6 +1447,8 @@ pub(crate) enum InsertErr { block_gas_limit: u64, tx_gas_limit: u64, }, + /// Thrown if the mutual exclusivity constraint (blob vs normal transaction) is violated. + TxTypeConflict { transaction: Arc> }, } /// Transaction was successfully inserted into the pool diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 1a302d14e604..e53828b7e95d 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -295,6 +295,16 @@ impl ValidPoolTransaction { pub(crate) fn size(&self) -> usize { self.transaction.size() } + + /// EIP-4844 blob transactions and normal transactions are treated as mutually exclusive per + /// account. + /// + /// Returns true if the transaction is an EIP-4844 blob transaction and the other is not, or + /// vice versa. + #[inline] + pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool { + self.is_eip4844() != other.is_eip4844() + } } impl IntoRecoveredTransaction for ValidPoolTransaction { From eb38c0dc85993506500d9a9eec50b5cb8f7e3c65 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 11 Sep 2023 17:25:29 +0200 Subject: [PATCH 655/722] chore: fix warnings (#4553) --- crates/stages/src/stages/execution.rs | 18 +++--------------- crates/stages/src/stages/mod.rs | 6 +----- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index c8dfed70a930..ef9e1ce49014 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -623,11 +623,7 @@ mod tests { let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let input = ExecInput { - target: Some(1), - /// The progress of this stage the last time it was executed. - checkpoint: None, - }; + let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); @@ -733,11 +729,7 @@ mod tests { let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let input = ExecInput { - target: Some(1), - /// The progress of this stage the last time it was executed. - checkpoint: None, - }; + let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); @@ -811,11 +803,7 @@ mod tests { let test_tx = TestTransaction::default(); let factory = ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let input = ExecInput { - target: Some(1), - /// The progress of this stage the last time it was executed. - checkpoint: None, - }; + let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901f8f901f3a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0c9ceb8372c88cb461724d8d3d87e8b933f6fc5f679d4841800e662f4428ffd0da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080830f4240808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index fbe9ff00eaec..70ce03c5038b 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -72,11 +72,7 @@ mod tests { let provider = factory.provider_rw().unwrap(); let tip = 66; - let input = ExecInput { - target: Some(tip), - /// The progress of this stage the last time it was executed. - checkpoint: None, - }; + let input = ExecInput { target: Some(tip), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); From 3fdeade3396bf6b183ef1a047805f349bd7eb79f Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 11 Sep 2023 17:44:13 +0200 Subject: [PATCH 656/722] Add inflight transaction requests metric to dashboard (#4554) --- etc/grafana/dashboards/overview.json | 102 +++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 4 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 326afbc3c049..c32b4f8a5734 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -3335,6 +3335,100 @@ "title": "Pending pool imports", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Currently active outgoing GetPooledTransactions requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 127 + }, + "id": 104, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_network_inflight_transaction_requests{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Inflight Transaction Requests", + "range": true, + "refId": "C" + } + ], + "title": "Inflight Transaction Requests", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -3426,7 +3520,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 127 }, "id": 93, @@ -3569,8 +3663,8 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 127 + "x": 0, + "y": 135 }, "id": 95, "options": { @@ -3688,7 +3782,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 135 }, "id": 115, From e745aadffa7610bb845b76c6f2294a21c68f63de Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 19:44:36 +0200 Subject: [PATCH 657/722] feat: add blob tx replacement overdraft check (#4534) --- crates/transaction-pool/src/pool/txpool.rs | 53 ++++++++++++++++------ 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 80e25d96360f..70efcd0ec574 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -990,7 +990,7 @@ impl AllTransactions { .take_while(move |(other, _)| sender == other.sender) } - /// Returns all transactions that _follow_ after the given id but have the same sender. + /// Returns all transactions that _follow_ after the given id and have the same sender. /// /// NOTE: The range is _exclusive_ pub(crate) fn descendant_txs_exclusive<'a, 'b: 'a>( @@ -1002,11 +1002,9 @@ impl AllTransactions { /// Returns all transactions that _follow_ after the given id but have the same sender. /// - /// NOTE: The range is _inclusive_: if the transaction that belongs to `id` it field be the + /// NOTE: The range is _inclusive_: if the transaction that belongs to `id` it will be the /// first value. - #[cfg(test)] - #[allow(unused)] - pub(crate) fn descendant_txs<'a, 'b: 'a>( + pub(crate) fn descendant_txs_inclusive<'a, 'b: 'a>( &'a self, id: &'b TransactionId, ) -> impl Iterator)> + '_ { @@ -1085,34 +1083,59 @@ impl AllTransactions { /// Enforces additional constraints for blob transactions before attempting to insert: /// - new blob transactions must not have any nonce gaps /// - blob transactions cannot go into overdraft + /// - replacement blob transaction with a higher fee must not shift an already propagated + /// descending blob transaction into overdraft fn ensure_valid_blob_transaction( &self, - transaction: ValidPoolTransaction, + new_blob_tx: ValidPoolTransaction, on_chain_balance: U256, ancestor: Option, ) -> Result, InsertErr> { if let Some(ancestor) = ancestor { - let Some(tx) = self.txs.get(&ancestor) else { + let Some(ancestor_tx) = self.txs.get(&ancestor) else { // ancestor tx is missing, so we can't insert the new blob - return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(transaction) }) + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) }; - if tx.state.has_nonce_gap() { + if ancestor_tx.state.has_nonce_gap() { // the ancestor transaction already has a nonce gap, so we can't insert the new // blob - return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(transaction) }) + return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) } + // the max cost executing this transaction requires + let mut cumulative_cost = ancestor_tx.next_cumulative_cost() + new_blob_tx.cost(); + // check if the new blob would go into overdraft - if tx.next_cumulative_cost() + transaction.cost() > on_chain_balance { + if cumulative_cost > on_chain_balance { // the transaction would go into overdraft - return Err(InsertErr::Overdraft { transaction: Arc::new(transaction) }) + return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } - } else if transaction.cost() > on_chain_balance { + + // ensure that a replacement would not shift already propagated blob transactions into + // overdraft + let id = new_blob_tx.transaction_id; + let mut descendants = self.descendant_txs_inclusive(&id).peekable(); + if let Some((maybe_replacement, _)) = descendants.peek() { + if **maybe_replacement == new_blob_tx.transaction_id { + // replacement transaction + descendants.next(); + + // check if any of descendant blob transactions should be shifted into overdraft + for (_, tx) in descendants { + cumulative_cost += tx.transaction.cost(); + if tx.transaction.is_eip4844() && cumulative_cost > on_chain_balance { + // the transaction would shift + return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) + } + } + } + } + } else if new_blob_tx.cost() > on_chain_balance { // the transaction would go into overdraft - return Err(InsertErr::Overdraft { transaction: Arc::new(transaction) }) + return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } - Ok(transaction) + Ok(new_blob_tx) } /// Returns true if the replacement candidate is underpriced and can't replace the existing From 6beb3c43223e3acd7ad3b84cbc745f20b9f7a131 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 20:21:33 +0200 Subject: [PATCH 658/722] fix: set trace results root trace's gas used to execution result gas (#4556) --- .../src/tracing/builder/parity.rs | 20 ++++++++++++++++ .../revm/revm-inspectors/src/tracing/mod.rs | 23 +++++++++++++++++++ crates/rpc/rpc-types/src/eth/trace/parity.rs | 2 +- crates/rpc/rpc/src/trace.rs | 16 ++++++++----- 4 files changed, 54 insertions(+), 7 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 3c5bafe36fa1..294caa8c4778 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -41,6 +41,26 @@ impl ParityTraceBuilder { self.nodes.iter().map(|node| node.trace.caller).collect() } + /// Manually the gas used of the root trace. + /// + /// The root trace's gasUsed should mirror the actual gas used by the transaction. + /// + /// This allows setting it manually by consuming the execution result's gas for example. + #[inline] + pub fn set_transaction_gas_used(&mut self, gas_used: u64) { + if let Some(node) = self.nodes.first_mut() { + node.trace.gas_used = gas_used; + } + } + + /// Convenience function for [ParityTraceBuilder::set_transaction_gas_used] that consumes the + /// type. + #[inline] + pub fn with_transaction_gas_used(mut self, gas_used: u64) -> Self { + self.set_transaction_gas_used(gas_used); + self + } + /// Returns the trace addresses of all call nodes in the set /// /// Each entry in the returned vector represents the [Self::trace_address] of the corresponding diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index e850e523578a..aad2e9d9a615 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -82,12 +82,35 @@ impl TracingInspector { } } + /// Manually the gas used of the root trace. + /// + /// This is useful if the root trace's gasUsed should mirror the actual gas used by the + /// transaction. + /// + /// This allows setting it manually by consuming the execution result's gas for example. + #[inline] + pub fn set_transaction_gas_used(&mut self, gas_used: u64) { + if let Some(node) = self.traces.arena.first_mut() { + node.trace.gas_used = gas_used; + } + } + + /// Convenience function for [ParityTraceBuilder::set_transaction_gas_used] that consumes the + /// type. + #[inline] + pub fn with_transaction_gas_used(mut self, gas_used: u64) -> Self { + self.set_transaction_gas_used(gas_used); + self + } + /// Consumes the Inspector and returns a [ParityTraceBuilder]. + #[inline] pub fn into_parity_builder(self) -> ParityTraceBuilder { ParityTraceBuilder::new(self.traces.arena, self.spec_id, self.config) } /// Consumes the Inspector and returns a [GethTraceBuilder]. + #[inline] pub fn into_geth_builder(self) -> GethTraceBuilder { GethTraceBuilder::new(self.traces.arena, self.config) } diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index da71213da886..200460b4a921 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -46,7 +46,7 @@ impl TraceResults { /// /// The root trace's gasUsed should mirror the actual gas used by the transaction. /// - /// This allows setting int manually by consuming the execution result's gas for example. + /// This allows setting it manually by consuming the execution result's gas for example. pub fn set_root_trace_gas_used(&mut self, gas_used: u64) { if let Some(r) = self.trace.first_mut().and_then(|t| t.result.as_mut()) { r.set_gas_used(gas_used) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 73c20187a73e..ed67c66c6f7d 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -259,9 +259,11 @@ where .spawn_trace_transaction_in_block( hash, TracingInspectorConfig::default_parity(), - move |tx_info, inspector, _, _| { - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + move |tx_info, inspector, res, _| { + let traces = inspector + .with_transaction_gas_used(res.result.gas_used()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ) @@ -364,9 +366,11 @@ where let traces = self.trace_block_with( block_id, TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _, _| { - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + |tx_info, inspector, res, _, _| { + let traces = inspector + .with_transaction_gas_used(res.gas_used()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ); From 660ea0c9376c4b03793995f700ee36182cfdf781 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Sep 2023 20:35:50 +0200 Subject: [PATCH 659/722] perf: limit number of new batch of dials (#4530) --- crates/net/network/src/peers/manager.rs | 54 ++++++++++++++++++++----- crates/net/network/src/peers/mod.rs | 3 ++ 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 1c4dc44dc72d..a9a7e48bdb3d 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -2,7 +2,8 @@ use crate::{ error::{BackoffKind, SessionError}, peers::{ reputation::{is_banned_reputation, DEFAULT_REPUTATION}, - ReputationChangeWeights, DEFAULT_MAX_PEERS_INBOUND, DEFAULT_MAX_PEERS_OUTBOUND, + ReputationChangeWeights, DEFAULT_MAX_CONCURRENT_DIALS, DEFAULT_MAX_PEERS_INBOUND, + DEFAULT_MAX_PEERS_OUTBOUND, }, session::{Direction, PendingSessionHandshakeError}, }; @@ -336,6 +337,7 @@ impl PeersManager { } } + /// Returns the tracked reputation for a peer. pub(crate) fn get_reputation(&self, peer_id: &PeerId) -> Option { self.peers.get(peer_id).map(|peer| peer.reputation) } @@ -370,17 +372,19 @@ impl PeersManager { } } - /// Gracefully disconnected a pending session + /// Gracefully disconnected a pending _outgoing_ session pub(crate) fn on_pending_session_gracefully_closed(&mut self, peer_id: &PeerId) { if let Some(peer) = self.peers.get_mut(peer_id) { peer.state = PeerConnectionState::Idle; } else { return } - self.connection_info.decr_out() + + self.connection_info.decr_out(); } - /// Invoked when a pending outgoing session was closed during authentication or the handshake. + /// Invoked when an _outgoing_ pending session was closed during authentication or the + /// handshake. pub(crate) fn on_pending_session_dropped( &mut self, remote_addr: &SocketAddr, @@ -428,7 +432,8 @@ impl PeersManager { self.on_connection_failure(remote_addr, peer_id, err, ReputationChangeKind::Dropped) } - /// Called when an attempt to create a pending session failed while setting up a tcp connection. + /// Called when an attempt to create an _outgoing_ pending session failed while setting up a tcp + /// connection. pub(crate) fn on_outgoing_connection_failure( &mut self, remote_addr: &SocketAddr, @@ -513,7 +518,8 @@ impl PeersManager { self.fill_outbound_slots(); } - /// Invoked if a session was disconnected because there's already a connection to the peer. + /// Invoked if a pending session was disconnected because there's already a connection to the + /// peer. /// /// If the session was an outgoing connection, this means that the peer initiated a connection /// to us at the same time and this connection is already established. @@ -580,8 +586,6 @@ impl PeersManager { // disconnecting, See `on_incoming_session_established` peer.remove_after_disconnect = false; } - - return } Entry::Vacant(entry) => { trace!(target : "net::peers", ?peer_id, ?addr, "discovered new node"); @@ -591,8 +595,6 @@ impl PeersManager { self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); } } - - self.fill_outbound_slots(); } /// Removes the tracked node from the set. @@ -681,6 +683,7 @@ impl PeersManager { self.tick(); // as long as there a slots available try to fill them with the best peers + let mut new_outbound_dials = 1; while self.connection_info.has_out_capacity() { let action = { let (peer_id, peer) = match self.best_unconnected() { @@ -700,7 +703,13 @@ impl PeersManager { }; self.connection_info.inc_out(); + self.queued_actions.push_back(action); + + new_outbound_dials += 1; + if new_outbound_dials > self.connection_info.max_concurrent_outbound_dials { + break + } } } @@ -778,7 +787,7 @@ impl Default for PeersManager { /// Tracks stats about connected nodes #[derive(Debug, Clone, PartialEq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))] pub struct ConnectionInfo { /// Counter for currently occupied slots for active outbound connections. #[cfg_attr(feature = "serde", serde(skip))] @@ -790,6 +799,9 @@ pub struct ConnectionInfo { max_outbound: usize, /// Maximum allowed inbound connections. max_inbound: usize, + /// Maximum allowed concurrent outbound dials. + #[cfg_attr(feature = "serde", serde(default))] + max_concurrent_outbound_dials: usize, } // === impl ConnectionInfo === @@ -837,6 +849,7 @@ impl Default for ConnectionInfo { num_inbound: 0, max_outbound: DEFAULT_MAX_PEERS_OUTBOUND, max_inbound: DEFAULT_MAX_PEERS_INBOUND, + max_concurrent_outbound_dials: DEFAULT_MAX_CONCURRENT_DIALS, } } } @@ -2172,4 +2185,23 @@ mod test { assert_eq!(peer.state, PeerConnectionState::Idle); assert!(!peer.remove_after_disconnect); } + + #[tokio::test] + async fn test_max_concurrent_dials() { + let config = PeersConfig::default(); + let mut peer_manager = PeersManager::new(config); + let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); + let socket_addr = SocketAddr::new(ip, 8008); + for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { + peer_manager.add_peer(PeerId::random(), socket_addr, None); + } + + peer_manager.fill_outbound_slots(); + let dials = peer_manager + .queued_actions + .iter() + .filter(|ev| matches!(ev, PeerAction::Connect { .. })) + .count(); + assert_eq!(dials, peer_manager.connection_info.max_concurrent_outbound_dials); + } } diff --git a/crates/net/network/src/peers/mod.rs b/crates/net/network/src/peers/mod.rs index 80f58b01b1fd..fb28d401328a 100644 --- a/crates/net/network/src/peers/mod.rs +++ b/crates/net/network/src/peers/mod.rs @@ -13,3 +13,6 @@ pub(crate) const DEFAULT_MAX_PEERS_OUTBOUND: usize = 100; /// Maximum number of available slots for inbound sessions. pub(crate) const DEFAULT_MAX_PEERS_INBOUND: usize = 30; + +/// Maximum number of available slots concurrent outgoing dials. +pub(crate) const DEFAULT_MAX_CONCURRENT_DIALS: usize = 10; From 25558b36c80782a4af614760bb57c34bda7bcdd9 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 11 Sep 2023 20:32:08 -0400 Subject: [PATCH 660/722] fix: update Status ForkId on new head (#4560) --- crates/net/network/src/session/mod.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index f97a5cbfcbeb..04d3ffe1da58 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -176,12 +176,14 @@ impl SessionManager { /// Invoked on a received status update. /// - /// If the updated activated another fork, this will return a [`ForkTransition`] and updates the - /// active [`ForkId`](ForkId). See also [`ForkFilter::set_head`]. + /// If the updated activated another fork, this will return a [ForkTransition] and updates the + /// active [ForkId]. See also [ForkFilter::set_head]. pub(crate) fn on_status_update(&mut self, head: Head) -> Option { self.status.blockhash = head.hash; self.status.total_difficulty = head.total_difficulty; - self.fork_filter.set_head(head) + let transition = self.fork_filter.set_head(head); + self.status.forkid = self.fork_filter.current(); + transition } /// An incoming TCP connection was received. This starts the authentication process to turn this From 261a9f949996eb2f6129cf7f17469d5134c09761 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Sep 2023 12:10:36 +0200 Subject: [PATCH 661/722] chore: add missing is empty check (#4565) --- crates/transaction-pool/src/pool/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 0b53af5658fb..670e9e874789 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -696,6 +696,9 @@ where /// Notify about propagated transactions. pub(crate) fn on_propagated(&self, txs: PropagatedTransactions) { + if txs.0.is_empty() { + return + } let mut listener = self.event_listener.write(); txs.0.into_iter().for_each(|(hash, peers)| listener.propagated(&hash, peers)) From c4956143b0faa5c059fd69162497365f851e71ac Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 12 Sep 2023 13:07:52 -0400 Subject: [PATCH 662/722] fix: enforce unsupported fork rules on get_payload_v3 (#4562) --- crates/payload/basic/src/lib.rs | 4 ++ crates/payload/builder/src/lib.rs | 4 ++ crates/payload/builder/src/service.rs | 41 +++++++++++++++++++++ crates/payload/builder/src/test_utils.rs | 4 ++ crates/payload/builder/src/traits.rs | 3 ++ crates/rpc/rpc-engine-api/src/engine_api.rs | 18 +++++++++ 6 files changed, 74 insertions(+) diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index d7fbecd060d9..4b125238a2f8 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -412,6 +412,10 @@ where build_empty_payload(&self.client, self.config.clone()).map(Arc::new) } + fn payload_attributes(&self) -> Result { + Ok(self.config.attributes.clone()) + } + fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let best_payload = self.best_payload.take(); let maybe_better = self.pending_block.take(); diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index b1b6e8989d68..d0bfeb1c95f1 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -86,6 +86,10 @@ //! Ok(Arc::new(payload)) //! } //! +//! fn payload_attributes(&self) -> Result { +//! Ok(self.attributes.clone()) +//! } +//! //! fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { //! let payload = self.best_payload(); //! (futures_util::future::ready(payload), KeepPayloadJobAlive::No) diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 310e2876313d..3a46f011b9c2 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -48,6 +48,16 @@ impl PayloadStore { ) -> Option, PayloadBuilderError>> { self.inner.best_payload(id).await } + + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + pub async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + self.inner.payload_attributes(id).await + } } impl From for PayloadStore { @@ -94,6 +104,18 @@ impl PayloadBuilderHandle { rx.await.ok()? } + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + pub async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + let (tx, rx) = oneshot::channel(); + self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; + rx.await.ok()? + } + /// Sends a message to the service to start building a new payload for the given payload. /// /// This is the same as [PayloadBuilderHandle::new_payload] but does not wait for the result and @@ -178,6 +200,17 @@ where self.payload_jobs.iter().find(|(_, job_id)| *job_id == id).map(|(j, _)| j.best_payload()) } + /// Returns the payload attributes for the given payload. + fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + self.payload_jobs + .iter() + .find(|(_, job_id)| *job_id == id) + .map(|(j, _)| j.payload_attributes()) + } + /// Returns the best payload for the given identifier that has been built so far and terminates /// the job if requested. fn resolve(&mut self, id: PayloadId) -> Option { @@ -262,6 +295,9 @@ where PayloadServiceCommand::BestPayload(id, tx) => { let _ = tx.send(this.best_payload(id)); } + PayloadServiceCommand::PayloadAttributes(id, tx) => { + let _ = tx.send(this.payload_attributes(id)); + } PayloadServiceCommand::Resolve(id, tx) => { let _ = tx.send(this.resolve(id)); } @@ -287,6 +323,11 @@ enum PayloadServiceCommand { ), /// Get the best payload so far BestPayload(PayloadId, oneshot::Sender, PayloadBuilderError>>>), + /// Get the payload attributes for the given payload + PayloadAttributes( + PayloadId, + oneshot::Sender>>, + ), /// Resolve the payload and return the payload Resolve(PayloadId, oneshot::Sender>), } diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 20b290de89c5..0257c4c0bd01 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -68,6 +68,10 @@ impl PayloadJob for TestPayloadJob { ))) } + fn payload_attributes(&self) -> Result { + Ok(self.attr.clone()) + } + fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let fut = futures_util::future::ready(self.best_payload()); (fut, KeepPayloadJobAlive::No) diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 4fddbebecb93..ab118709fa41 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -26,6 +26,9 @@ pub trait PayloadJob: Future> + Send + /// Note: This is never called by the CL. fn best_payload(&self) -> Result, PayloadBuilderError>; + /// Returns the payload attributes for the payload being built. + fn payload_attributes(&self) -> Result; + /// Called when the payload is requested by the CL. /// /// This is invoked on [`engine_getPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) and [`engine_getPayloadV1`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_getpayloadv1). diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index a96144c220bb..88513e8de3f5 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -208,6 +208,24 @@ where &self, payload_id: PayloadId, ) -> EngineApiResult { + // First we fetch the payload attributes to check the timestamp + let attributes = self + .inner + .payload_store + .payload_attributes(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)??; + + // From the Engine API spec: + // + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the built payload does not fall within the time frame of the Cancun fork. + if !self.inner.chain_spec.is_cancun_activated_at_timestamp(attributes.timestamp) { + return Err(EngineApiError::UnsupportedFork) + } + + // Now resolve the payload Ok(self .inner .payload_store From e2270cd71cfb5c5553bc523c673878af50d6d88a Mon Sep 17 00:00:00 2001 From: niko-renko <93560662+niko-renko@users.noreply.github.com> Date: Tue, 12 Sep 2023 13:09:46 -0400 Subject: [PATCH 663/722] Update network.rs -- typo fix (#4545) Co-authored-by: Matthias Seitz --- crates/net/network/src/network.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 9199718be288..ced055f58887 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -132,7 +132,9 @@ impl NetworkHandle { /// Announce a block over devp2p /// - /// Caution: in PoS this is a noop, since new block propagation will happen over devp2p + /// Caution: in PoS this is a noop, since new block are no longer announced over devp2p, but are + /// instead sent to node node by the CL. However, they can still be requested over devp2p, but + /// broadcasting them is a considered a protocol violation.. pub fn announce_block(&self, block: NewBlock, hash: H256) { self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash)) } From 76d56cb7ba2a4d0a78b0feadcf9957abdbfe3950 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Sep 2023 21:42:37 +0200 Subject: [PATCH 664/722] docs: add note about eip-4844 broadcast (#4570) --- crates/net/network/src/transactions.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 130fc65aa918..d0b8b93f1953 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -20,7 +20,7 @@ use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; use reth_primitives::{ FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - TransactionSigned, TxHash, TxType, H256, + TransactionSigned, TxHash, H256, }; use reth_rlp::Encodable; use reth_transaction_pool::{ @@ -244,7 +244,7 @@ where /// The message for new pooled hashes depends on the negotiated version of the stream. /// See [NewPooledTransactionHashes](NewPooledTransactionHashes) /// - /// TODO add note that this never broadcasts full 4844 transactions + /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . fn propagate_transactions( &mut self, to_propagate: Vec, @@ -276,7 +276,7 @@ where // via `GetPooledTransactions`. // // From: - if tx.tx_type() != TxType::EIP4844 { + if !tx.transaction.is_eip4844() { full_transactions.push(tx); } } @@ -672,10 +672,6 @@ impl PropagateTransaction { self.transaction.hash() } - fn tx_type(&self) -> TxType { - self.transaction.tx_type() - } - fn new(transaction: Arc) -> Self { Self { size: transaction.length(), transaction } } From 60fa4f8457684bdf9bfe984f6e9a67f053a0df4a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Sep 2023 21:59:40 +0200 Subject: [PATCH 665/722] fix: ensure sender transaction types dont conflict (#4567) --- crates/transaction-pool/src/pool/txpool.rs | 64 +++++++++++++++++++--- 1 file changed, 57 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 70efcd0ec574..a4e3ab1f3e6a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1051,12 +1051,29 @@ impl AllTransactions { self.by_hash.remove(internal.transaction.hash()).map(|tx| (tx, internal.subpool)) } + /// Checks if the given transaction's type conflicts with an existing transaction. + /// + /// See also [ValidPoolTransaction::tx_type_conflicts_with]. + /// + /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. + #[inline] + fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { + let mut iter = self.txs_iter(tx.transaction_id.sender); + if let Some((_, existing)) = iter.next() { + return tx.tx_type_conflicts_with(&existing.transaction) + } + // no existing transaction for this sender + false + } + /// Additional checks for a new transaction. /// /// This will enforce all additional rules in the context of this pool, such as: /// - Spam protection: reject new non-local transaction from a sender that exhausted its slot /// capacity. /// - Gas limit: reject transactions if they exceed a block's maximum gas. + /// - Ensures transaction types are not conflicting for the sender: blob vs normal + /// transactions are mutually exclusive for the same sender. fn ensure_valid( &self, transaction: ValidPoolTransaction, @@ -1077,6 +1094,12 @@ impl AllTransactions { transaction: Arc::new(transaction), }) } + + if self.contains_conflicting_transaction(&transaction) { + // blob vs non blob transactions are mutually exclusive for the same sender + return Err(InsertErr::TxTypeConflict { transaction: Arc::new(transaction) }) + } + Ok(transaction) } @@ -1240,7 +1263,7 @@ impl AllTransactions { ); // before attempting to insert a blob transaction, we need to ensure that additional - // constraints are met + // constraints are met that only apply to blob transactions if transaction.is_eip4844() { transaction = self.ensure_valid_blob_transaction(transaction, on_chain_balance, ancestor)?; @@ -1289,12 +1312,7 @@ impl AllTransactions { // Transaction with the same nonce already exists: replacement candidate let existing_transaction = entry.get().transaction.as_ref(); let maybe_replacement = transaction.as_ref(); - if existing_transaction.tx_type_conflicts_with(maybe_replacement) { - // blob vs non blob replacement - return Err(InsertErr::TxTypeConflict { transaction: pool_tx.transaction }) - } - // Transaction already exists // Ensure the new transaction is not underpriced if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) { @@ -1375,7 +1393,7 @@ impl AllTransactions { tx.subpool = tx.state.into(); if inserted_tx_id.eq(id) { - // if it is the new transaction, track the state + // if it is the new transaction, track its updated state state = tx.state; } else { // check if anything changed @@ -1700,6 +1718,38 @@ mod tests { assert!(matches!(err, InsertErr::Underpriced { .. })); } + #[test] + fn insert_conflicting_type_normal_to_blob() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = AllTransactions::default(); + let tx = MockTransaction::eip1559().inc_price().inc_limit(); + let first = f.validated(tx.clone()); + pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); + let tx = + MockTransaction::eip4844().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let blob = f.validated(tx); + let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err(); + assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{:?}", err); + } + + #[test] + fn insert_conflicting_type_blob_to_normal() { + let on_chain_balance = U256::from(10_000); + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = AllTransactions::default(); + let tx = MockTransaction::eip4844().inc_price().inc_limit(); + let first = f.validated(tx.clone()); + pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); + let tx = + MockTransaction::eip1559().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = f.validated(tx); + let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err(); + assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{:?}", err); + } + // insert nonce then nonce - 1 #[test] fn insert_previous() { From b8e15fa10b6da6fa8650047a13b9887ceb3d683f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Sep 2023 22:03:20 +0200 Subject: [PATCH 666/722] fix: ensure final message is always delivered (#4569) --- crates/metrics/src/common/mpsc.rs | 13 ++++ crates/net/network/src/session/active.rs | 93 +++++++++++++++--------- crates/net/network/src/session/mod.rs | 1 + 3 files changed, 74 insertions(+), 33 deletions(-) diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 6148e5a876a7..d1a03dd6fd32 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -11,6 +11,7 @@ use std::{ use tokio::sync::mpsc::{ self, error::{SendError, TryRecvError, TrySendError}, + OwnedPermit, }; /// Wrapper around [mpsc::unbounded_channel] that returns a new unbounded metered channel. @@ -142,6 +143,18 @@ impl MeteredSender { Self { sender, metrics: MeteredSenderMetrics::new(scope) } } + /// Tries to acquire a permit to send a message. + /// + /// See also [Sender](mpsc::Sender)'s `try_reserve_owned`. + pub fn try_reserve_owned(&self) -> Result, TrySendError>> { + self.sender.clone().try_reserve_owned() + } + + /// Returns the underlying [Sender](mpsc::Sender). + pub fn inner(&self) -> &mpsc::Sender { + &self.sender + } + /// Calls the underlying [Sender](mpsc::Sender)'s `try_send`, incrementing the appropriate /// metrics depending on the result. pub fn try_send(&self, message: T) -> Result<(), TrySendError> { diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 180096241a5c..64d0cf8cfae7 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -37,6 +37,7 @@ use tokio::{ time::Interval, }; use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::PollSender; use tracing::{debug, info, trace}; /// Constants for timeout updating @@ -79,11 +80,11 @@ pub(crate) struct ActiveSession { pub(crate) to_session_manager: MeteredSender, /// A message that needs to be delivered to the session manager pub(crate) pending_message_to_session: Option, - /// Incoming request to send to delegate to the remote peer. + /// Incoming internal requests which are delegated to the remote peer. pub(crate) internal_request_tx: Fuse>, /// All requests sent to the remote peer we're waiting on a response pub(crate) inflight_requests: FnvHashMap, - /// All requests that were sent by the remote peer. + /// All requests that were sent by the remote peer and we're waiting on an internal response pub(crate) received_requests_from_remote: Vec, /// Buffered messages that should be handled and sent to the peer. pub(crate) queued_outgoing: VecDeque, @@ -94,6 +95,8 @@ pub(crate) struct ActiveSession { /// If an [ActiveSession] does not receive a response at all within this duration then it is /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, + /// Used to reserve a slot to guarantee that the termination message is delivered + pub(crate) terminate_message: Option<(PollSender, ActiveSessionMessage)>, } impl ActiveSession { @@ -118,7 +121,7 @@ impl ActiveSession { /// Handle a message read from the connection. /// /// Returns an error if the message is considered to be in violation of the protocol. - fn on_incoming(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { + fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { /// A macro that handles an incoming request /// This creates a new channel and tries to send the sender half to the session while /// storing the receiver half internally so the pending response can be polled. @@ -247,7 +250,7 @@ impl ActiveSession { } /// Handle a message received from the internal network - fn on_peer_message(&mut self, msg: PeerMessage) { + fn on_internal_peer_message(&mut self, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(msg) => { self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into()); @@ -283,6 +286,8 @@ impl ActiveSession { } /// Handle a Response to the peer + /// + /// This will queue the response to be sent to the peer fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { match resp.try_into_message(id) { Ok(msg) => { @@ -355,25 +360,28 @@ impl ActiveSession { } /// Report back that this session has been closed. - fn emit_disconnect(&self) { + fn emit_disconnect(&mut self, cx: &mut Context<'_>) -> Poll<()> { trace!(target: "net::session", remote_peer_id=?self.remote_peer_id, "emitting disconnect"); - // NOTE: we clone here so there's enough capacity to deliver this message - let _ = self.to_session_manager.clone().try_send(ActiveSessionMessage::Disconnected { + let msg = ActiveSessionMessage::Disconnected { peer_id: self.remote_peer_id, remote_addr: self.remote_addr, - }); + }; + + self.terminate_message = + Some((PollSender::new(self.to_session_manager.inner().clone()).clone(), msg)); + self.poll_terminate_message(cx).expect("message is set") } /// Report back that this session has been closed due to an error - fn close_on_error(&self, error: EthStreamError) { - // NOTE: we clone here so there's enough capacity to deliver this message - let _ = self.to_session_manager.clone().try_send( - ActiveSessionMessage::ClosedOnConnectionError { - peer_id: self.remote_peer_id, - remote_addr: self.remote_addr, - error, - }, - ); + fn close_on_error(&mut self, error: EthStreamError, cx: &mut Context<'_>) -> Poll<()> { + let msg = ActiveSessionMessage::ClosedOnConnectionError { + peer_id: self.remote_peer_id, + remote_addr: self.remote_addr, + error, + }; + self.terminate_message = + Some((PollSender::new(self.to_session_manager.inner().clone()).clone(), msg)); + self.poll_terminate_message(cx).expect("message is set") } /// Starts the disconnect process @@ -391,8 +399,7 @@ impl ActiveSession { // try to close the flush out the remaining Disconnect message let _ = ready!(self.conn.poll_close_unpin(cx)); - self.emit_disconnect(); - Poll::Ready(()) + self.emit_disconnect(cx) } /// Attempts to disconnect by sending the given disconnect reason @@ -404,8 +411,7 @@ impl ActiveSession { } Err(err) => { debug!(target: "net::session", ?err, remote_peer_id=?self.remote_peer_id, "could not send disconnect"); - self.close_on_error(err); - Poll::Ready(()) + self.close_on_error(err, cx) } } } @@ -443,6 +449,25 @@ impl ActiveSession { self.internal_request_timeout.store(request_timeout.as_millis() as u64, Ordering::Relaxed); self.internal_request_timeout_interval = tokio::time::interval(request_timeout); } + + /// If a termination message is queued this will try to send it + fn poll_terminate_message(&mut self, cx: &mut Context<'_>) -> Option> { + let (mut tx, msg) = self.terminate_message.take()?; + match tx.poll_reserve(cx) { + Poll::Pending => { + self.terminate_message = Some((tx, msg)); + return Some(Poll::Pending) + } + Poll::Ready(Ok(())) => { + let _ = tx.send_item(msg); + } + Poll::Ready(Err(_)) => { + // channel closed + } + } + // terminate the task + Some(Poll::Ready(())) + } } impl Future for ActiveSession { @@ -451,6 +476,11 @@ impl Future for ActiveSession { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); + // if the session is terminate we have to send the termination message before we can close + if let Some(terminate) = this.poll_terminate_message(cx) { + return terminate + } + if this.is_disconnecting() { return this.poll_disconnect(cx) } @@ -486,7 +516,7 @@ impl Future for ActiveSession { return this.try_disconnect(reason, cx) } SessionCommand::Message(msg) => { - this.on_peer_message(msg); + this.on_internal_peer_message(msg); } } } @@ -526,8 +556,7 @@ impl Future for ActiveSession { if let Err(err) = res { debug!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to send message"); // notify the manager - this.close_on_error(err); - return Poll::Ready(()) + return this.close_on_error(err, cx) } } else { // no more messages to send over the wire @@ -571,8 +600,7 @@ impl Future for ActiveSession { break } else { debug!(target: "net::session", remote_peer_id=?this.remote_peer_id, "eth stream completed"); - this.emit_disconnect(); - return Poll::Ready(()) + return this.emit_disconnect(cx) } } Poll::Ready(Some(res)) => { @@ -580,15 +608,14 @@ impl Future for ActiveSession { Ok(msg) => { trace!(target: "net::session", msg_id=?msg.message_id(), remote_peer_id=?this.remote_peer_id, "received eth message"); // decode and handle message - match this.on_incoming(msg) { + match this.on_incoming_message(msg) { OnIncomingMessageOutcome::Ok => { // handled successfully progress = true; } OnIncomingMessageOutcome::BadMessage { error, message } => { debug!(target: "net::session", ?error, msg=?message, remote_peer_id=?this.remote_peer_id, "received invalid protocol message"); - this.close_on_error(error); - return Poll::Ready(()) + return this.close_on_error(error, cx) } OnIncomingMessageOutcome::NoCapacity(msg) => { // failed to send due to lack of capacity @@ -599,8 +626,7 @@ impl Future for ActiveSession { } Err(err) => { debug!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to receive message"); - this.close_on_error(err); - return Poll::Ready(()) + return this.close_on_error(err, cx) } } } @@ -612,8 +638,7 @@ impl Future for ActiveSession { } } - if this.internal_request_timeout_interval.poll_tick(cx).is_ready() { - let _ = this.internal_request_timeout_interval.poll_tick(cx); + while this.internal_request_timeout_interval.poll_tick(cx).is_ready() { // check for timed out requests if this.check_timed_out_requests(Instant::now()) { let _ = this.to_session_manager.clone().try_send( @@ -664,6 +689,7 @@ impl InflightRequest { matches!(self.request, RequestState::Waiting(_)) } + /// This will timeout the request by sending an error response to the internal channel fn timeout(&mut self) { let mut req = RequestState::TimedOut; std::mem::swap(&mut self.request, &mut req); @@ -866,6 +892,7 @@ mod tests { INITIAL_REQUEST_TIMEOUT.as_millis() as u64, )), protocol_breach_request_timeout: PROTOCOL_BREACH_REQUEST_TIMEOUT, + terminate_message: None, } } ev => { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 04d3ffe1da58..b5624c87963f 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -458,6 +458,7 @@ impl SessionManager { ), internal_request_timeout: Arc::clone(&timeout), protocol_breach_request_timeout: self.protocol_breach_request_timeout, + terminate_message: None, }; self.spawn(session); From c4acd08ff0181eeacfd8a5c9cf6e72bb480c824d Mon Sep 17 00:00:00 2001 From: samtvlabs <112424909+samtvlabs@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:18:54 +0400 Subject: [PATCH 667/722] chore: silence clippy clone warning (#4574) --- crates/primitives/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b08a6e9d610f..5b63b1a073af 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -10,6 +10,7 @@ no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] +#![allow(clippy::incorrect_clone_impl_on_copy_type)] //! Commonly used types in reth. //! From 48b75e0ad2f5905bc85ed7b2606b928ef3a40a96 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Sep 2023 14:48:45 +0200 Subject: [PATCH 668/722] chore(clippy): make clippy happy (#4578) Co-authored-by: Alexey Shekhirin --- crates/rlp/src/encode.rs | 8 ++++++++ crates/storage/libmdbx-rs/src/codec.rs | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/crates/rlp/src/encode.rs b/crates/rlp/src/encode.rs index 6d669fd667e1..cbad6be18651 100644 --- a/crates/rlp/src/encode.rs +++ b/crates/rlp/src/encode.rs @@ -47,9 +47,17 @@ pub const fn const_add(a: usize, b: usize) -> usize { a + b } +/// A trait for types that have a maximum encoded length. +/// +/// # Safety +/// Invalid value can cause the encoder to crash. #[doc(hidden)] pub unsafe trait MaxEncodedLen: Encodable {} +/// A trait for types that have a maximum encoded length. +/// +/// # Safety +/// Invalid value can cause the encoder to crash. #[doc(hidden)] pub unsafe trait MaxEncodedLenAssoc: Encodable { const LEN: usize; diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index 008e719479b1..f313492d7ac4 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -4,10 +4,16 @@ use std::{borrow::Cow, slice}; /// Implement this to be able to decode data values pub trait TableObject<'tx> { + /// Decodes the object from the given bytes. fn decode(data_val: &[u8]) -> Result where Self: Sized; + /// Decodes the value directly from the given MDBX_val pointer. + /// + /// # Safety + /// + /// This should only in the context of an MDBX transaction. #[doc(hidden)] unsafe fn decode_val( _: *const ffi::MDBX_txn, From a3952f12811ac33d23b021f33a7e0afaa247ec7d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 13 Sep 2023 14:09:50 +0100 Subject: [PATCH 669/722] chore(ci): different clippy action (#4579) --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 12589dfeee94..acd32150d30b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,10 +41,10 @@ jobs: args: --all --check - name: cargo clippy - uses: actions-rs/clippy-check@v1 + uses: actions-rs/cargo@v1 with: - args: --all --all-features -- -A clippy::incorrect_clone_impl_on_copy_type -A clippy::arc_with_non_send_sync - token: ${{ secrets.GITHUB_TOKEN }} + command: clippy + args: --all --all-features doc-lint: name: doc lint From 9a9a6e009373410cb356acb86d3eef6836368e4b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Sep 2023 18:18:38 +0200 Subject: [PATCH 670/722] feat: add missing caps (#4581) --- crates/rpc/rpc-types/src/eth/engine/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/engine/mod.rs b/crates/rpc/rpc-types/src/eth/engine/mod.rs index 54cf8ccf0c20..ad76be1e309d 100644 --- a/crates/rpc/rpc-types/src/eth/engine/mod.rs +++ b/crates/rpc/rpc-types/src/eth/engine/mod.rs @@ -9,15 +9,18 @@ mod transition; pub use self::{cancun::*, forkchoice::*, payload::*, transition::*}; -/// The list of supported Engine capabilities -pub const CAPABILITIES: [&str; 9] = [ +/// The list of all supported Engine capabilities available over the engine endpoint. +pub const CAPABILITIES: [&str; 12] = [ "engine_forkchoiceUpdatedV1", "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", "engine_exchangeTransitionConfigurationV1", "engine_getPayloadV1", "engine_getPayloadV2", + "engine_getPayloadV3", "engine_newPayloadV1", "engine_newPayloadV2", + "engine_newPayloadV3", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", ]; From c531c1b60aac27392fb1b76194cdbf2033a84264 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 13 Sep 2023 23:48:56 +0200 Subject: [PATCH 671/722] fix: limit payload bodies range by best block (#4584) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/rpc/rpc-engine-api/src/engine_api.rs | 37 ++++++++++++++++++--- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 88513e8de3f5..1a670df59137 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -266,8 +266,18 @@ where let mut result = Vec::with_capacity(count as usize); - let end = start.saturating_add(count); - for num in start..end { + // -1 so range is inclusive + let mut end = start.saturating_add(count - 1); + + // > Client software MUST NOT return trailing null values if the request extends past the current latest known block. + // truncate the end if it's greater than the last block + if let Ok(best_block) = inner.provider.best_block_number() { + if end > best_block { + end = best_block; + } + } + + for num in start..=end { let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); match block_result { Ok(block) => { @@ -763,10 +773,11 @@ mod tests { let expected = blocks .iter() .cloned() + // filter anything after the second missing range to ensure we don't expect trailing + // `None`s + .filter(|b| !second_missing_range.contains(&b.number)) .map(|b| { - if first_missing_range.contains(&b.number) || - second_missing_range.contains(&b.number) - { + if first_missing_range.contains(&b.number) { None } else { Some(b.unseal().into()) @@ -777,6 +788,22 @@ mod tests { let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); assert_eq!(res, expected); + let expected = blocks + .iter() + .cloned() + // ensure we still return trailing `None`s here because by-hash will not be aware + // of the missing block's number, and cannot compare it to the current best block + .map(|b| { + if first_missing_range.contains(&b.number) || + second_missing_range.contains(&b.number) + { + None + } else { + Some(b.unseal().into()) + } + }) + .collect::>(); + let hashes = blocks.iter().map(|b| b.hash()).collect(); let res = api.get_payload_bodies_by_hash(hashes).unwrap(); assert_eq!(res, expected); From c831d30d2d982d4e72f04609ad5215b97303e970 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Sep 2023 00:04:02 +0200 Subject: [PATCH 672/722] fix: use cached encoded length (#4577) --- crates/net/network/src/transactions.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index d0b8b93f1953..80d9a89bde1a 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -22,7 +22,6 @@ use reth_primitives::{ FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, TransactionSigned, TxHash, H256, }; -use reth_rlp::Encodable; use reth_transaction_pool::{ error::PoolResult, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, ValidPoolTransaction, @@ -205,7 +204,7 @@ where } } - /// Invoked when a new transaction is pending. + /// Invoked when a new transaction is pending in the local pool. /// /// When new transactions appear in the pool, we propagate them to the network using the /// `Transactions` and `NewPooledTransactionHashes` messages. The Transactions message relays @@ -224,15 +223,10 @@ where trace!(target: "net::tx", "Start propagating transactions"); + // This fetches all transaction from the pool, including the blob transactions, which are + // only ever sent as hashes. let propagated = self.propagate_transactions( - self.pool - .get_all(hashes) - .into_iter() - .map(|tx| { - let tx = Arc::new(tx.transaction.to_recovered_transaction().into_signed()); - PropagateTransaction::new(tx) - }) - .collect(), + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), ); // notify pool so events get fired @@ -672,8 +666,11 @@ impl PropagateTransaction { self.transaction.hash() } - fn new(transaction: Arc) -> Self { - Self { size: transaction.length(), transaction } + /// Create a new instance from a pooled transaction + fn new(tx: Arc>) -> Self { + let size = tx.encoded_length; + let transaction = Arc::new(tx.transaction.to_recovered_transaction().into_signed()); + Self { size, transaction } } } From 937269ecacbbd0b14bed5a0ff2c7292aee4a7c40 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Sep 2023 12:53:30 +0200 Subject: [PATCH 673/722] chore: new lint name (#4597) --- crates/primitives/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 5b63b1a073af..3d7356942735 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -10,7 +10,7 @@ no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] -#![allow(clippy::incorrect_clone_impl_on_copy_type)] +#![allow(clippy::non_canonical_clone_impl)] //! Commonly used types in reth. //! From 09b5f1012c49911bb0bd91b29b83fb18c068e1b0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Sep 2023 13:17:18 +0200 Subject: [PATCH 674/722] chore: new clippy lints (#4598) --- crates/net/network/src/test_utils/testnet.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index e2cbeaca27e8..443b7057e7af 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -130,13 +130,10 @@ where let mut net = self; let handle = tokio::task::spawn(async move { let mut tx = None; - loop { - tokio::select! { - _ = &mut net => { break} - inc = rx => { - tx = inc.ok(); - break - } + tokio::select! { + _ = &mut net => {} + inc = rx => { + tx = inc.ok(); } } if let Some(tx) = tx { From 3d564aa393c72c95d31a8e3e44b09578ddbcfc55 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Sep 2023 15:40:46 +0200 Subject: [PATCH 675/722] fix: remove popped tx from total set (#4599) --- crates/transaction-pool/src/pool/txpool.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index a4e3ab1f3e6a..8badbaa57c39 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -637,9 +637,17 @@ impl TxPool { .$limit .is_exceeded($this.$pool.len(), $this.$pool.size()) { + // pops the worst transaction from the sub-pool if let Some(tx) = $this.$pool.pop_worst() { let id = tx.transaction_id; + + // now that the tx is removed from the sub-pool, we need to remove it also from the total set + $this.all_transactions.remove_transaction(&id); + + // record the removed transaction removed.push(tx); + + // this might have introduced a nonce gap, so we also discard any descendants $this.remove_descendants(&id, &mut $removed); } } From 723036b3109e3af17be296040e436ea62f503010 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 14 Sep 2023 16:23:46 +0200 Subject: [PATCH 676/722] test(txpool): add replacement test (#4596) --- crates/transaction-pool/src/pool/mod.rs | 18 ++++++++ crates/transaction-pool/src/pool/parked.rs | 6 +++ crates/transaction-pool/src/pool/pending.rs | 6 +++ crates/transaction-pool/src/pool/txpool.rs | 51 ++++++++++++++++++++- crates/transaction-pool/src/traits.rs | 10 ++++ 5 files changed, 89 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 670e9e874789..9786d3e9867a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -912,6 +912,24 @@ impl AddedTransaction { } } } + + /// Returns the subpool this transaction was added to + #[cfg(test)] + pub(crate) fn subpool(&self) -> SubPool { + match self { + AddedTransaction::Pending(_) => SubPool::Pending, + AddedTransaction::Parked { subpool, .. } => *subpool, + } + } + + /// Returns the [TransactionId] of the added transaction + #[cfg(test)] + pub(crate) fn id(&self) -> &TransactionId { + match self { + AddedTransaction::Pending(added) => added.transaction.id(), + AddedTransaction::Parked { transaction, .. } => transaction.id(), + } + } } /// Contains all state changes after a [`CanonicalStateUpdate`] was processed diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 740af02a9717..e04070908d22 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -107,6 +107,12 @@ impl ParkedPool { pub(crate) fn is_empty(&self) -> bool { self.by_id.is_empty() } + + /// Returns `true` if the transaction with the given id is already included in this pool. + #[cfg(test)] + pub(crate) fn contains(&self, id: &TransactionId) -> bool { + self.by_id.contains_key(id) + } } impl ParkedPool> { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 91beed0883c9..2239cbe6fa6e 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -294,6 +294,12 @@ impl PendingPool { pub(crate) fn is_empty(&self) -> bool { self.by_id.is_empty() } + + /// Returns `true` if the transaction with the given id is already included in this pool. + #[cfg(test)] + pub(crate) fn contains(&self, id: &TransactionId) -> bool { + self.by_id.contains_key(id) + } } /// A transaction that is ready to be included in a block. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 8badbaa57c39..b760dac5d45c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -257,6 +257,16 @@ impl TxPool { self.all_transactions.contains(tx_hash) } + /// Returns `true` if the transaction with the given id is already included in the given subpool + #[cfg(test)] + pub(crate) fn subpool_contains(&self, subpool: SubPool, id: &TransactionId) -> bool { + match subpool { + SubPool::Queued => self.queued_pool.contains(id), + SubPool::Pending => self.pending_pool.contains(id), + SubPool::BaseFee => self.basefee_pool.contains(id), + } + } + /// Returns the transaction for the given hash. pub(crate) fn get( &self, @@ -376,13 +386,15 @@ impl TxPool { match self.all_transactions.insert_tx(tx, on_chain_balance, on_chain_nonce) { Ok(InsertOk { transaction, move_to, replaced_tx, updates, .. }) => { + // replace the new tx and remove the replaced in the subpool(s) self.add_new_transaction(transaction.clone(), replaced_tx.clone(), move_to); // Update inserted transactions metric self.metrics.inserted_transactions.increment(1); let UpdateOutcome { promoted, discarded } = self.process_updates(updates); - // This transaction was moved to the pending pool. let replaced = replaced_tx.map(|(tx, _)| tx); + + // This transaction was moved to the pending pool. let res = if move_to.is_pending() { AddedTransaction::Pending(AddedPendingTransaction { transaction, @@ -678,6 +690,14 @@ impl TxPool { } } +#[cfg(any(test, feature = "test-utils"))] +impl TxPool { + /// Creates a mock instance for testing. + pub fn mock() -> Self { + Self::new(crate::test_utils::MockOrdering::default(), PoolConfig::default()) + } +} + // Additional test impls #[cfg(any(test, feature = "test-utils"))] #[allow(missing_docs)] @@ -1698,7 +1718,7 @@ mod tests { let mut pool = AllTransactions::default(); let tx = MockTransaction::eip1559().inc_price().inc_limit(); let first = f.validated(tx.clone()); - let _res = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce); + let _ = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap(); let replacement = f.validated(tx.rng_hash().inc_price()); let InsertOk { updates, replaced_tx, .. } = pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap(); @@ -1706,11 +1726,38 @@ mod tests { let replaced = replaced_tx.unwrap(); assert_eq!(replaced.0.hash(), first.hash()); + // ensure replaced tx is fully removed assert!(!pool.contains(first.hash())); assert!(pool.contains(replacement.hash())); assert_eq!(pool.len(), 1); } + #[test] + fn insert_replace_txpool() { + let on_chain_balance = U256::ZERO; + let on_chain_nonce = 0; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::mock(); + + let tx = MockTransaction::eip1559().inc_price().inc_limit(); + let first = f.validated(tx.clone()); + let first_added = + pool.add_transaction(first.clone(), on_chain_balance, on_chain_nonce).unwrap(); + let replacement = f.validated(tx.rng_hash().inc_price()); + let replacement_added = + pool.add_transaction(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap(); + + // // ensure replaced tx removed + assert!(!pool.contains(first_added.hash())); + // but the replacement is still there + assert!(pool.subpool_contains(replacement_added.subpool(), replacement_added.id())); + + assert!(pool.contains(replacement.hash())); + let size = pool.size(); + assert_eq!(size.total, 1); + size.assert_invariants(); + } + #[test] fn insert_replace_underpriced() { let on_chain_balance = U256::ZERO; diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 7b4e7c03d279..a3408b93c3ca 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -961,6 +961,16 @@ pub struct PoolSize { pub total: usize, } +// === impl PoolSize === + +impl PoolSize { + /// Asserts that the invariants of the pool size are met. + #[cfg(test)] + pub(crate) fn assert_invariants(&self) { + assert_eq!(self.total, self.pending + self.basefee + self.queued); + } +} + /// Represents the current status of the pool. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct BlockInfo { From a8e0880a98142cc339c7d6c6e9d060082094e764 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 14 Sep 2023 17:19:42 +0100 Subject: [PATCH 677/722] chore(ci): run check and clippy on tests and benches too (#4587) --- .github/workflows/bench.yml | 20 ++------------- .github/workflows/ci.yml | 4 +-- crates/prune/src/pruner.rs | 28 +++++++++++---------- crates/rpc/rpc-engine-api/src/engine_api.rs | 2 +- crates/trie/src/trie.rs | 1 + 5 files changed, 21 insertions(+), 34 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index db26044dfa56..ea9e3cd87aa6 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -59,26 +59,10 @@ jobs: run: | ./pr/.github/scripts/compare_iai.sh - # Checks that benchmarks not run in CI compile - bench-check: - name: check - runs-on: - group: Reth - steps: - - uses: actions/checkout@v3 - - name: Install toolchain - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - name: Check if benchmarks build - run: cargo check --workspace --benches --all-features - bench-success: if: always() name: bench success - needs: bench-check runs-on: ubuntu-20.04 steps: - - name: Decide whether the needed jobs succeeded or failed - uses: re-actors/alls-green@release/v1 - with: - jobs: ${{ toJSON(needs) }} + # Note: This check is a dummy because we don't have any bench checks enabled. + - run: echo OK. \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index acd32150d30b..6b6ba44b874d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: check - args: --all --all-features + args: --all --all-features --benches --tests - name: cargo fmt uses: actions-rs/cargo@v1 @@ -44,7 +44,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all --all-features + args: --all --all-features --benches --tests doc-lint: name: doc lint diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 123e2eb3470b..a47a36a46da9 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -1282,10 +1282,11 @@ mod tests { .iter() .enumerate() .flat_map(|(block_number, changeset)| { - changeset.into_iter().map(move |change| (block_number, change)) + changeset.iter().map(move |change| (block_number, change)) }) .collect::>(); + #[allow(clippy::skip_while_next)] let pruned = changesets .iter() .enumerate() @@ -1308,11 +1309,13 @@ mod tests { .map(|(block_number, _)| if done { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) .unwrap_or(to_block); - let pruned_changesets = - pruned_changesets.fold(BTreeMap::new(), |mut acc, (block_number, change)| { - acc.entry(block_number).or_insert_with(Vec::new).push(change); + let pruned_changesets = pruned_changesets.fold( + BTreeMap::<_, Vec<_>>::new(), + |mut acc, (block_number, change)| { + acc.entry(block_number).or_default().push(change); acc - }); + }, + ); assert_eq!( tx.table::().unwrap().len(), @@ -1409,12 +1412,13 @@ mod tests { .iter() .enumerate() .flat_map(|(block_number, changeset)| { - changeset.into_iter().flat_map(move |(address, _, entries)| { - entries.into_iter().map(move |entry| (block_number, address, entry)) + changeset.iter().flat_map(move |(address, _, entries)| { + entries.iter().map(move |entry| (block_number, address, entry)) }) }) .collect::>(); + #[allow(clippy::skip_while_next)] let pruned = changesets .iter() .enumerate() @@ -1438,9 +1442,9 @@ mod tests { .unwrap_or(to_block); let pruned_changesets = pruned_changesets.fold( - BTreeMap::new(), + BTreeMap::<_, Vec<_>>::new(), |mut acc, (block_number, address, entry)| { - acc.entry((block_number, address)).or_insert_with(Vec::new).push(entry); + acc.entry((block_number, address)).or_default().push(entry); acc }, ); @@ -1544,9 +1548,7 @@ mod tests { .inner() .get_prune_checkpoint(PrunePart::ContractLogs) .unwrap() - .and_then(|checkpoint| { - Some((checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) - }) + .map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) .unwrap_or_default(); // All receipts are in the end of the block @@ -1558,7 +1560,7 @@ mod tests { ((pruned_tx + 1) - unprunable) as usize ); - return done + done }; while !run_prune() {} diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 1a670df59137..eed5de58b16f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -772,10 +772,10 @@ mod tests { let expected = blocks .iter() - .cloned() // filter anything after the second missing range to ensure we don't expect trailing // `None`s .filter(|b| !second_missing_range.contains(&b.number)) + .cloned() .map(|b| { if first_missing_range.contains(&b.number) { None diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index cc443d668009..42522cb45574 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -533,6 +533,7 @@ where } #[cfg(test)] +#[allow(clippy::mutable_key_type)] mod tests { use super::*; use crate::test_utils::{ From 455af5603b8500a6be845de7f012519ec53b8d5a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 15 Sep 2023 12:17:54 +0300 Subject: [PATCH 678/722] fix(ci): valgrind installation (#4605) --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index ea9e3cd87aa6..ef4ccaa309a0 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -33,7 +33,7 @@ jobs: - name: Install Valgrind run: | - sudo apt install valgrind + sudo apt update && sudo apt install valgrind - name: Install toolchain uses: dtolnay/rust-toolchain@stable From 6b8db8ace484f079db3769761895f8fb1b0f76fd Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 15 Sep 2023 15:47:54 +0300 Subject: [PATCH 679/722] fix(txpool): pending subpool basefee update (#4610) --- crates/transaction-pool/src/pool/pending.rs | 73 +++++++++++---------- crates/transaction-pool/src/pool/size.rs | 7 ++ 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 2239cbe6fa6e..286b699b328a 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -65,6 +65,19 @@ impl PendingPool { } } + /// Clear all transactions from the pool without resetting other values. + /// Used for atomic reordering during basefee update. + /// + /// # Returns + /// + /// Returns all transactions by id. + fn clear_transactions(&mut self) -> BTreeMap> { + self.independent_transactions.clear(); + self.all.clear(); + self.size_of.reset(); + std::mem::take(&mut self.by_id) + } + /// Returns an iterator over all transactions that are _currently_ ready. /// /// 1. The iterator _always_ returns transaction in order: It never returns a transaction with @@ -150,44 +163,38 @@ impl PendingPool { &mut self, base_fee: u64, ) -> Vec>> { - // Create a collection for txs to remove . - let mut to_remove = Vec::new(); - - // Iterate over transactions, find the ones we need to remove and update others in place. - { - let mut iter = self.by_id.iter_mut().peekable(); - while let Some((id, tx)) = iter.next() { - if tx.transaction.max_fee_per_gas() < base_fee as u128 { - // This transaction no longer satisfies the basefee: remove it and all its - // descendants. - to_remove.push(*id); - 'this: while let Some((peek, _)) = iter.peek() { - if peek.sender != id.sender { - break 'this - } - to_remove.push(**peek); - iter.next(); + // Create a collection for removed transactions. + let mut removed = Vec::new(); + + // Drain and iterate over all transactions. + let mut transactions_iter = self.clear_transactions().into_iter().peekable(); + while let Some((id, mut tx)) = transactions_iter.next() { + if tx.transaction.max_fee_per_gas() < base_fee as u128 { + // Add this tx to the removed collection since it no longer satisfies the base fee + // condition. Decrease the total pool size. + removed.push(Arc::clone(&tx.transaction)); + + // Remove all dependent transactions. + 'this: while let Some((next_id, next_tx)) = transactions_iter.peek() { + if next_id.sender != id.sender { + break 'this } - } else { - // Update the transaction with new priority. - let new_priority = - self.ordering.priority(&tx.transaction.transaction, base_fee); - tx.priority = new_priority; + removed.push(Arc::clone(&next_tx.transaction)); + transactions_iter.next(); + } + } else { + // Re-insert the transaction with new priority. + tx.priority = self.ordering.priority(&tx.transaction.transaction, base_fee); - self.all.insert(tx.clone()); + self.size_of += tx.transaction.size(); + if self.ancestor(&id).is_none() { + self.independent_transactions.insert(tx.clone()); } + self.all.insert(tx.clone()); + self.by_id.insert(id, tx); } } - let mut removed = Vec::with_capacity(to_remove.len()); - for id in to_remove { - removed.push(self.remove_transaction(&id).expect("transaction exists")); - } - - // Clear ordered lists since the priority would be changed. - self.independent_transactions.clear(); - self.all.clear(); - removed } @@ -261,8 +268,8 @@ impl PendingPool { id: &TransactionId, ) -> Option>> { let tx = self.by_id.remove(id)?; - self.all.remove(&tx); self.size_of -= tx.transaction.size(); + self.all.remove(&tx); self.independent_transactions.remove(&tx); Some(tx.transaction) } diff --git a/crates/transaction-pool/src/pool/size.rs b/crates/transaction-pool/src/pool/size.rs index 0c4a63a1345d..93dfb9bc0bd7 100644 --- a/crates/transaction-pool/src/pool/size.rs +++ b/crates/transaction-pool/src/pool/size.rs @@ -10,6 +10,13 @@ use std::ops::{AddAssign, SubAssign}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct SizeTracker(isize); +impl SizeTracker { + /// Reset the size tracker. + pub fn reset(&mut self) { + self.0 = 0; + } +} + impl AddAssign for SizeTracker { fn add_assign(&mut self, rhs: usize) { self.0 += rhs as isize From ee85fa3d44f6385c2d826305d943f390de6d43b0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Sep 2023 16:28:52 +0200 Subject: [PATCH 680/722] test: add blob exclusive test e2e (#4606) --- .../transaction-pool/src/test_utils/mock.rs | 15 ++++++- crates/transaction-pool/tests/it/blobs.rs | 40 +++++++++++++++++++ crates/transaction-pool/tests/it/main.rs | 2 + 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 crates/transaction-pool/tests/it/blobs.rs diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index d42e45cdb49c..62ebb47e57e4 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -16,7 +16,8 @@ use reth_primitives::{ hex, Address, FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, - TxEip4844, TxHash, TxLegacy, TxType, H256, U128, U256, + TxEip4844, TxHash, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, + LEGACY_TX_TYPE_ID, U128, U256, }; use std::{ops::Range, sync::Arc, time::Instant}; @@ -351,6 +352,14 @@ impl MockTransaction { next.with_gas_limit(gas) } + pub fn tx_type(&self) -> u8 { + match self { + Self::Legacy { .. } => LEGACY_TX_TYPE_ID, + Self::Eip1559 { .. } => EIP1559_TX_TYPE_ID, + Self::Eip4844 { .. } => EIP4844_TX_TYPE_ID, + } + } + pub fn is_legacy(&self) -> bool { matches!(self, MockTransaction::Legacy { .. }) } @@ -715,6 +724,10 @@ impl MockTransactionFactory { pub fn create_eip1559(&mut self) -> MockValidTx { self.validated(MockTransaction::eip1559()) } + + pub fn create_eip4844(&mut self) -> MockValidTx { + self.validated(MockTransaction::eip4844()) + } } #[derive(Clone, Default)] diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs new file mode 100644 index 000000000000..cdcce9de3dad --- /dev/null +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -0,0 +1,40 @@ +//! Blob transaction tests + +use reth_transaction_pool::{ + error::PoolError, + test_utils::{testing_pool, MockTransaction, MockTransactionFactory}, + TransactionOrigin, TransactionPool, +}; + +#[tokio::test(flavor = "multi_thread")] +async fn blobs_exclusive() { + let txpool = testing_pool(); + let mut mock_tx_factory = MockTransactionFactory::default(); + let blob_tx = mock_tx_factory.create_eip4844(); + + let hash = txpool + .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) + .await + .unwrap(); + assert_eq!(hash, blob_tx.transaction.get_hash()); + + let mut best_txns = txpool.best_transactions(); + assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash()); + assert!(best_txns.next().is_none()); + + let eip1559_tx = MockTransaction::eip1559() + .set_sender(blob_tx.transaction.get_sender()) + .inc_price_by(10_000); + + let res = + txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err(); + + match res { + PoolError::ExistingConflictingTransactionType(addr, hash, tx_type) => { + assert_eq!(addr, eip1559_tx.get_sender()); + assert_eq!(hash, eip1559_tx.get_hash()); + assert_eq!(tx_type, eip1559_tx.tx_type()); + } + _ => unreachable!(), + } +} diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index 409be67792d4..49a3b058ea95 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -1,5 +1,7 @@ //! transaction-pool integration tests +#[cfg(feature = "test-utils")] +mod blobs; #[cfg(feature = "test-utils")] mod listeners; #[cfg(feature = "test-utils")] From b156cb98494463c5951d1ff6e17f98e5102f4980 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 15 Sep 2023 17:13:44 +0200 Subject: [PATCH 681/722] feat: add blob transactions subpool (#4608) --- crates/transaction-pool/src/pool/blob.rs | 167 ++++++++++++++++++++ crates/transaction-pool/src/pool/mod.rs | 1 + crates/transaction-pool/src/pool/parked.rs | 2 +- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 5 + 5 files changed, 175 insertions(+), 2 deletions(-) create mode 100644 crates/transaction-pool/src/pool/blob.rs diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs new file mode 100644 index 000000000000..4bda855e5d5e --- /dev/null +++ b/crates/transaction-pool/src/pool/blob.rs @@ -0,0 +1,167 @@ +#![allow(dead_code, unused)] +use crate::{ + identifier::TransactionId, pool::size::SizeTracker, PoolTransaction, ValidPoolTransaction, +}; +use std::{ + cmp::Ordering, + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; + +/// A set of __all__ validated blob transactions in the pool. +/// +/// The purpose of this pool is keep track of blob transactions that are either pending or queued +/// and to evict the worst blob transactions once the sub-pool is full. +/// +/// This expects that certain constraints are met: +/// - blob transactions are always gap less +pub(crate) struct BlobTransactions { + /// Keeps track of transactions inserted in the pool. + /// + /// This way we can determine when transactions were submitted to the pool. + submission_id: u64, + /// _All_ Transactions that are currently inside the pool grouped by their identifier. + by_id: BTreeMap>>, + /// _All_ transactions sorted by blob priority. + all: BTreeSet>, + /// Keeps track of the size of this pool. + /// + /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). + size_of: SizeTracker, +} + +// === impl BlobTransactions === + +impl BlobTransactions { + /// Adds a new transactions to the pending queue. + /// + /// # Panics + /// + /// - If the transaction is not a blob tx. + /// - If the transaction is already included. + pub(crate) fn add_transaction(&mut self, tx: Arc>) { + assert!(tx.is_eip4844(), "transaction is not a blob tx"); + let id = *tx.id(); + assert!( + !self.by_id.contains_key(&id), + "transaction already included {:?}", + self.by_id.contains_key(&id) + ); + let submission_id = self.next_id(); + + // keep track of size + self.size_of += tx.size(); + + self.by_id.insert(id, tx.clone()); + + let ord = BlobOrd { submission_id }; + let transaction = BlobTransaction { ord, transaction: tx }; + self.all.insert(transaction); + } + + /// Removes the transaction from the pool + pub(crate) fn remove_transaction( + &mut self, + id: &TransactionId, + ) -> Option>> { + // remove from queues + let tx = self.by_id.remove(id)?; + + // TODO: remove from ordered set + // self.best.remove(&tx); + + // keep track of size + self.size_of -= tx.transaction.size(); + + Some(tx) + } + + fn next_id(&mut self) -> u64 { + let id = self.submission_id; + self.submission_id = self.submission_id.wrapping_add(1); + id + } + + /// The reported size of all transactions in this pool. + pub(crate) fn size(&self) -> usize { + self.size_of.into() + } + + /// Number of transactions in the entire pool + pub(crate) fn len(&self) -> usize { + self.by_id.len() + } + + /// Returns `true` if the transaction with the given id is already included in this pool. + #[cfg(test)] + #[allow(unused)] + pub(crate) fn contains(&self, id: &TransactionId) -> bool { + self.by_id.contains_key(id) + } +} + +impl Default for BlobTransactions { + fn default() -> Self { + Self { + submission_id: 0, + by_id: Default::default(), + all: Default::default(), + size_of: Default::default(), + } + } +} + +/// A transaction that is ready to be included in a block. +struct BlobTransaction { + /// Actual blob transaction. + transaction: Arc>, + /// The value that determines the order of this transaction. + ord: BlobOrd, +} + +impl Eq for BlobTransaction {} + +impl PartialEq for BlobTransaction { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl PartialOrd for BlobTransaction { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BlobTransaction { + fn cmp(&self, other: &Self) -> Ordering { + self.ord.cmp(&other.ord) + } +} + +#[derive(Debug)] +struct BlobOrd { + /// Identifier that tags when transaction was submitted in the pool. + pub(crate) submission_id: u64, + // TODO(mattsse): add ord values +} + +impl Eq for BlobOrd {} + +impl PartialEq for BlobOrd { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl PartialOrd for BlobOrd { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BlobOrd { + fn cmp(&self, other: &Self) -> Ordering { + other.submission_id.cmp(&self.submission_id) + } +} diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 9786d3e9867a..aadca293a060 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -110,6 +110,7 @@ pub use listener::{AllTransactionsEvents, TransactionEvents}; use reth_rlp::Encodable; mod best; +mod blob; mod parked; pub(crate) mod pending; pub(crate) mod size; diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index e04070908d22..075f244f0edb 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -17,7 +17,7 @@ use std::{cmp::Ordering, collections::BTreeSet, ops::Deref, sync::Arc}; pub(crate) struct ParkedPool { /// Keeps track of transactions inserted in the pool. /// - /// This way we can determine when transactions where submitted to the pool. + /// This way we can determine when transactions were submitted to the pool. submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: FnvHashMap>, diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 286b699b328a..8ccd96efa807 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -28,7 +28,7 @@ pub(crate) struct PendingPool { ordering: T, /// Keeps track of transactions inserted in the pool. /// - /// This way we can determine when transactions where submitted to the pool. + /// This way we can determine when transactions were submitted to the pool. submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index b760dac5d45c..51e6d7e54564 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -6,6 +6,7 @@ use crate::{ metrics::TxPoolMetrics, pool::{ best::BestTransactions, + blob::BlobTransactions, parked::{BasefeeOrd, ParkedPool, QueuedOrd}, pending::PendingPool, state::{SubPool, TxState}, @@ -86,6 +87,9 @@ pub struct TxPool { /// Holds all parked transactions that currently violate the dynamic fee requirement but could /// be moved to pending if the base fee changes in their favor (decreases) in future blocks. basefee_pool: ParkedPool>, + /// All blob transactions in the pool + #[allow(unused)] + blob_transactions: BlobTransactions, /// All transactions in the pool. all_transactions: AllTransactions, /// Transaction pool metrics @@ -102,6 +106,7 @@ impl TxPool { pending_pool: PendingPool::new(ordering), queued_pool: Default::default(), basefee_pool: Default::default(), + blob_transactions: Default::default(), all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), From a572c3b7f29ba3db9deffc7f044f34ab6f86054b Mon Sep 17 00:00:00 2001 From: MetaB0y <95353780+MetaB0y@users.noreply.github.com> Date: Fri, 15 Sep 2023 17:16:41 +0100 Subject: [PATCH 682/722] Make tx pool mock public (#4611) --- crates/transaction-pool/src/test_utils/mock.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 62ebb47e57e4..9c76ea2d98b5 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -21,12 +21,12 @@ use reth_primitives::{ }; use std::{ops::Range, sync::Arc, time::Instant}; -pub(crate) type MockTxPool = TxPool; +pub type MockTxPool = TxPool; pub type MockValidTx = ValidPoolTransaction; /// Create an empty `TxPool` -pub(crate) fn mock_tx_pool() -> MockTxPool { +pub fn mock_tx_pool() -> MockTxPool { MockTxPool::new(Default::default(), Default::default()) } From 873b6f71d83cc0e28d8b84bc81ea8f6f7c6f0c38 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 15 Sep 2023 21:49:19 +0300 Subject: [PATCH 683/722] fix: holesky genesis (#4616) --- crates/primitives/res/genesis/holesky.json | 2 +- crates/primitives/src/chain/spec.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/primitives/res/genesis/holesky.json b/crates/primitives/res/genesis/holesky.json index 0ce62bd939f3..fcac5863e593 100644 --- a/crates/primitives/res/genesis/holesky.json +++ b/crates/primitives/res/genesis/holesky.json @@ -1,7 +1,7 @@ { "nonce": "0x1234", "timestamp": "1694786100", - "extraData": "0x686f77206d7563682069732074686520666973683f", + "extraData": "", "gasLimit": "0x17D7840", "difficulty": "0x01", "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 80687d4cbd4f..41a5410968bd 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -164,7 +164,7 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { genesis: serde_json::from_str(include_str!("../../res/genesis/holesky.json")) .expect("Can't deserialize Holesky genesis json"), genesis_hash: Some(H256(hex!( - "ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d" + "fd91bb7c01ae3f608b4d176078ca72bc7846791fdc02324481ca315ede4c9246" ))), paris_block_and_final_difficulty: Some((0, U256::from(1))), fork_timestamps: ForkTimestamps::default().shanghai(1694790240), From 357301cc29536096553fd0284591a85d2ee39622 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 15 Sep 2023 15:11:56 -0400 Subject: [PATCH 684/722] fix: return UnsupportedFork on V2 endpoints (#4593) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 91 ++++++++++++++++----- 1 file changed, 72 insertions(+), 19 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eed5de58b16f..f76d883bc72e 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -188,6 +188,18 @@ where &self, payload_id: PayloadId, ) -> EngineApiResult { + // First we fetch the payload attributes to check the timestamp + let attributes = self + .inner + .payload_store + .payload_attributes(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)??; + + // validate timestamp according to engine rules + self.validate_payload_timestamp(EngineApiMessageVersion::V2, attributes.timestamp)?; + + // Now resolve the payload Ok(self .inner .payload_store @@ -216,14 +228,8 @@ where .await .ok_or(EngineApiError::UnknownPayload)??; - // From the Engine API spec: - // - // - // 1. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of - // the built payload does not fall within the time frame of the Cancun fork. - if !self.inner.chain_spec.is_cancun_activated_at_timestamp(attributes.timestamp) { - return Err(EngineApiError::UnsupportedFork) - } + // validate timestamp according to engine rules + self.validate_payload_timestamp(EngineApiMessageVersion::V3, attributes.timestamp)?; // Now resolve the payload Ok(self @@ -376,6 +382,52 @@ where } } + /// Validates the timestamp depending on the version called: + /// + /// * If V2, this ensure that the payload timestamp is pre-Cancun. + /// * If V3, this ensures that the payload timestamp is within the Cancun timestamp. + /// + /// Otherwise, this will return [EngineApiError::UnsupportedFork]. + fn validate_payload_timestamp( + &self, + version: EngineApiMessageVersion, + timestamp: u64, + ) -> EngineApiResult<()> { + let is_cancun = self.inner.chain_spec.is_cancun_activated_at_timestamp(timestamp); + if version == EngineApiMessageVersion::V2 && is_cancun { + // From the Engine API spec: + // + // ### Update the methods of previous forks + // + // This document defines how Cancun payload should be handled by the [`Shanghai + // API`](https://github.com/ethereum/execution-apis/blob/ff43500e653abde45aec0f545564abfb648317af/src/engine/shanghai.md). + // + // For the following methods: + // + // - [`engine_forkchoiceUpdatedV2`](https://github.com/ethereum/execution-apis/blob/ff43500e653abde45aec0f545564abfb648317af/src/engine/shanghai.md#engine_forkchoiceupdatedv2) + // - [`engine_newPayloadV2`](https://github.com/ethereum/execution-apis/blob/ff43500e653abde45aec0f545564abfb648317af/src/engine/shanghai.md#engine_newpayloadV2) + // - [`engine_getPayloadV2`](https://github.com/ethereum/execution-apis/blob/ff43500e653abde45aec0f545564abfb648317af/src/engine/shanghai.md#engine_getpayloadv2) + // + // a validation **MUST** be added: + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the + // `timestamp` of payload or payloadAttributes greater or equal to the Cancun + // activation timestamp. + return Err(EngineApiError::UnsupportedFork) + } + + if version == EngineApiMessageVersion::V3 && !is_cancun { + // From the Engine API spec: + // + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the + // `timestamp` of the built payload does not fall within the time frame of the Cancun + // fork. + return Err(EngineApiError::UnsupportedFork) + } + Ok(()) + } + /// Validates the presence of the `withdrawals` field according to the payload timestamp. /// After Shanghai, withdrawals field must be [Some]. /// Before Shanghai, withdrawals field must be [None]; @@ -417,14 +469,14 @@ where /// Before Cancun, `parentBeaconBlockRoot` field must be [None]. /// /// If the engine API message version is V1 or V2, and the payload attribute's timestamp is - /// post-Cancun, then this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. - /// - /// If the engine API message version is V3, but the `parentBeaconBlockRoot` is [None], then - /// this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// post-Cancun, then this will return [EngineApiError::UnsupportedFork]. /// /// If the payload attribute's timestamp is before the Cancun fork and the engine API message /// version is V3, then this will return [EngineApiError::UnsupportedFork]. /// + /// If the engine API message version is V3, but the `parentBeaconBlockRoot` is [None], then + /// this will return [EngineApiError::NoParentBeaconBlockRootPostCancun]. + /// /// This implements the following Engine API spec rules: /// /// 1. Client software **MUST** check that provided set of parameters and their fields strictly @@ -440,26 +492,27 @@ where timestamp: u64, has_parent_beacon_block_root: bool, ) -> EngineApiResult<()> { - let is_cancun = self.inner.chain_spec.fork(Hardfork::Cancun).active_at_timestamp(timestamp); - + // 1. Client software **MUST** check that provided set of parameters and their fields + // strictly matches the expected one and return `-32602: Invalid params` error if this + // check fails. Any field having `null` value **MUST** be considered as not provided. match version { EngineApiMessageVersion::V1 | EngineApiMessageVersion::V2 => { if has_parent_beacon_block_root { return Err(EngineApiError::ParentBeaconBlockRootNotSupportedBeforeV3) } - if is_cancun { - return Err(EngineApiError::NoParentBeaconBlockRootPostCancun) - } } EngineApiMessageVersion::V3 => { if !has_parent_beacon_block_root { return Err(EngineApiError::NoParentBeaconBlockRootPostCancun) - } else if !is_cancun { - return Err(EngineApiError::UnsupportedFork) } } }; + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the + // `payloadAttributes` is set and the `payloadAttributes.timestamp` does not fall within + // the time frame of the Cancun fork. + self.validate_payload_timestamp(version, timestamp)?; + Ok(()) } From 62e7d98202a10bf1333836bf74f9e7e1827aa6d1 Mon Sep 17 00:00:00 2001 From: hack3r-0m <54898623+hack3r-0m@users.noreply.github.com> Date: Sat, 16 Sep 2023 04:37:24 +0530 Subject: [PATCH 685/722] feat(cli): allow multiple trusted peers (#4619) --- bin/reth/src/args/network_args.rs | 21 ++++++++++++++++++++- book/cli/node.md | 2 +- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/args/network_args.rs b/bin/reth/src/args/network_args.rs index 086373687124..21a658431396 100644 --- a/bin/reth/src/args/network_args.rs +++ b/bin/reth/src/args/network_args.rs @@ -19,7 +19,7 @@ pub struct NetworkArgs { /// Target trusted peer enodes /// --trusted-peers enode://abcd@192.168.0.1:30303 - #[arg(long)] + #[arg(long, value_delimiter = ',')] pub trusted_peers: Vec, /// Connect only to trusted peers @@ -194,4 +194,23 @@ mod tests { assert_eq!(args.max_outbound_peers, Some(75)); assert_eq!(args.max_inbound_peers, Some(15)); } + + #[test] + fn parse_trusted_peer_args() { + let args = + CommandParser::::parse_from([ + "reth", + "--trusted-peers", + "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303,enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303" + ]) + .args; + + assert_eq!( + args.trusted_peers, + vec![ + "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303".parse().unwrap(), + "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303".parse().unwrap() + ] + ); + } } diff --git a/book/cli/node.md b/book/cli/node.md index 2aa71d1ccefa..07da18faed13 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -57,7 +57,7 @@ Networking: The UDP port to use for P2P discovery/networking. default: 30303 --trusted-peers - Target trusted peer enodes --trusted-peers enode://abcd@192.168.0.1:30303 + Target trusted peer enodes --trusted-peers enode://abcd@192.168.0.1:30303,enode://cdef@192.168.0.2:30303 --trusted-only Connect only to trusted peers From f153d8f4d440d4cd8364c44e1ef639f84f852061 Mon Sep 17 00:00:00 2001 From: rakita Date: Sat, 16 Sep 2023 13:00:22 +0200 Subject: [PATCH 686/722] feat(revm): Integrate State (#3512) Co-authored-by: Roman Krasiuk Co-authored-by: Alexey Shekhirin Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Bjerg --- Cargo.lock | 589 +++-- Cargo.toml | 19 +- bin/reth/src/chain/import.rs | 1 + bin/reth/src/debug_cmd/execution.rs | 6 +- bin/reth/src/debug_cmd/in_memory_merkle.rs | 15 +- bin/reth/src/debug_cmd/merkle.rs | 6 +- bin/reth/src/init.rs | 81 +- bin/reth/src/node/mod.rs | 5 +- bin/reth/src/stage/dump/merkle.rs | 6 +- bin/reth/src/stage/run.rs | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 62 +- crates/blockchain-tree/src/chain.rs | 62 +- crates/blockchain-tree/src/lib.rs | 5 +- crates/blockchain-tree/src/post_state_data.rs | 22 +- crates/blockchain-tree/src/shareable.rs | 11 +- crates/config/src/config.rs | 9 +- crates/consensus/auto-seal/src/lib.rs | 84 +- crates/consensus/auto-seal/src/task.rs | 15 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 6 +- .../consensus/beacon/src/engine/test_utils.rs | 76 +- crates/consensus/common/src/calc.rs | 12 +- .../interfaces/src/blockchain_tree/error.rs | 1 + crates/interfaces/src/executor.rs | 10 +- crates/payload/basic/Cargo.toml | 1 + crates/payload/basic/src/lib.rs | 104 +- crates/payload/builder/src/database.rs | 20 +- crates/payload/builder/src/payload.rs | 10 +- crates/primitives/src/account.rs | 22 +- crates/primitives/src/bits.rs | 2 + crates/primitives/src/bloom.rs | 2 + crates/primitives/src/proofs.rs | 2 +- crates/primitives/src/prune/part.rs | 2 +- crates/primitives/src/storage.rs | 7 + crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/withdrawal.rs | 9 +- crates/revm/Cargo.toml | 6 +- .../revm/revm-inspectors/src/access_list.rs | 1 - .../revm-inspectors/src/stack/maybe_owned.rs | 30 +- crates/revm/revm-inspectors/src/stack/mod.rs | 21 +- .../src/tracing/builder/parity.rs | 8 +- .../revm-inspectors/src/tracing/fourbyte.rs | 1 - .../revm-inspectors/src/tracing/js/mod.rs | 12 +- .../revm/revm-inspectors/src/tracing/mod.rs | 23 +- .../revm-inspectors/src/tracing/opcount.rs | 1 - crates/revm/revm-primitives/src/compat.rs | 12 +- crates/revm/revm-primitives/src/env.rs | 3 +- crates/revm/src/database.rs | 44 +- crates/revm/src/executor.rs | 1340 ----------- crates/revm/src/factory.rs | 24 +- crates/revm/src/lib.rs | 7 +- crates/revm/src/processor.rs | 531 +++++ crates/revm/src/state_change.rs | 85 + crates/rpc/rpc/src/debug.rs | 20 +- crates/rpc/rpc/src/eth/api/call.rs | 10 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 47 +- crates/rpc/rpc/src/eth/api/transactions.rs | 12 +- crates/rpc/rpc/src/eth/cache/mod.rs | 8 +- crates/rpc/rpc/src/eth/error.rs | 8 +- crates/rpc/rpc/src/trace.rs | 18 +- crates/stages/Cargo.toml | 3 + crates/stages/src/pipeline/mod.rs | 4 +- crates/stages/src/stages/execution.rs | 94 +- crates/stages/src/stages/mod.rs | 7 +- crates/stages/src/stages/tx_lookup.rs | 2 - crates/storage/provider/Cargo.toml | 5 +- .../bundle_state_with_receipts.rs | 1198 ++++++++++ .../storage/provider/src/bundle_state/mod.rs | 11 + .../src/bundle_state/state_changes.rs | 88 + .../src/bundle_state/state_reverts.rs | 167 ++ crates/storage/provider/src/chain.rs | 96 +- crates/storage/provider/src/lib.rs | 19 +- .../provider/src/post_state/account.rs | 89 - crates/storage/provider/src/post_state/mod.rs | 2076 ----------------- .../provider/src/post_state/storage.rs | 156 -- ...e_provider.rs => bundle_state_provider.rs} | 54 +- .../src/providers/database/provider.rs | 209 +- crates/storage/provider/src/providers/mod.rs | 16 +- .../src/providers/state/historical.rs | 6 +- .../provider/src/providers/state/latest.rs | 8 +- .../provider/src/providers/state/macros.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 117 +- .../provider/src/test_utils/executor.rs | 56 +- .../storage/provider/src/test_utils/mock.rs | 9 +- .../storage/provider/src/test_utils/noop.rs | 12 +- crates/storage/provider/src/traits/block.rs | 15 +- crates/storage/provider/src/traits/chain.rs | 4 +- .../storage/provider/src/traits/executor.rs | 72 +- crates/storage/provider/src/traits/mod.rs | 4 +- crates/storage/provider/src/traits/state.rs | 19 +- crates/storage/provider/src/transaction.rs | 197 -- crates/transaction-pool/src/maintain.rs | 27 +- crates/transaction-pool/src/pool/state.rs | 4 +- 93 files changed, 3434 insertions(+), 4973 deletions(-) delete mode 100644 crates/revm/src/executor.rs create mode 100644 crates/revm/src/processor.rs create mode 100644 crates/revm/src/state_change.rs create mode 100644 crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs create mode 100644 crates/storage/provider/src/bundle_state/mod.rs create mode 100644 crates/storage/provider/src/bundle_state/state_changes.rs create mode 100644 crates/storage/provider/src/bundle_state/state_reverts.rs delete mode 100644 crates/storage/provider/src/post_state/account.rs delete mode 100644 crates/storage/provider/src/post_state/mod.rs delete mode 100644 crates/storage/provider/src/post_state/storage.rs rename crates/storage/provider/src/providers/{post_state_provider.rs => bundle_state_provider.rs} (59%) delete mode 100644 crates/storage/provider/src/transaction.rs diff --git a/Cargo.lock b/Cargo.lock index 3b01df05b5dc..352b0ebc2b13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,9 +195,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "anstyle-parse" @@ -242,8 +242,8 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -300,7 +300,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ - "quote 1.0.33", + "quote", "syn 1.0.109", ] @@ -310,7 +310,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.33", + "quote", "syn 1.0.109", ] @@ -322,7 +322,7 @@ checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "quote 1.0.33", + "quote", "syn 1.0.109", ] @@ -334,8 +334,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -432,9 +432,9 @@ version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -485,8 +485,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -537,9 +537,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" [[package]] name = "base64ct" @@ -592,8 +592,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "regex", "rustc-hash", "shlex", @@ -614,12 +614,12 @@ dependencies = [ "log", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.32", + "syn 2.0.29", "which", ] @@ -635,12 +635,12 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.32", + "syn 2.0.29", ] [[package]] @@ -669,6 +669,10 @@ name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +dependencies = [ + "arbitrary", + "serde", +] [[package]] name = "bitvec" @@ -822,9 +826,9 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3de43b7806061fccfba716fef51eea462d636de36803b62d10f902608ffef4" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", "synstructure 0.13.0", ] @@ -857,9 +861,9 @@ checksum = "a24f6aa1ecc56e797506437b1f9a172e4a5f207894e74196c682cb656d2c2d60" [[package]] name = "boyer-moore-magiclen" -version = "0.2.18" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "116d76fee857b03ecdd95d5f9555e46aa0cd34e5bb348a520e9445d151182a7e" +checksum = "6c77eb6b3a37f71fcd40e49b56c028ea8795c0e550afd8021e3e6a2369653035" dependencies = [ "debug-helper", ] @@ -920,9 +924,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.14.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" [[package]] name = "byteorder" @@ -1038,9 +1042,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1135,9 +1139,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" dependencies = [ "heck", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -1158,10 +1162,10 @@ version = "0.1.0-alpha.8" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "serde", - "syn 2.0.32", + "syn 2.0.29", ] [[package]] @@ -1202,7 +1206,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "bech32", "bs58", "digest 0.10.7", @@ -1506,9 +1510,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1546,9 +1550,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" dependencies = [ "cfg-if", "cpufeatures", @@ -1567,9 +1571,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -1600,8 +1604,8 @@ checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "strsim 0.9.3", "syn 1.0.109", ] @@ -1614,10 +1618,10 @@ checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "strsim 0.10.0", - "syn 2.0.32", + "syn 2.0.29", ] [[package]] @@ -1627,7 +1631,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", - "quote 1.0.33", + "quote", "syn 1.0.109", ] @@ -1638,8 +1642,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", - "quote 1.0.33", - "syn 2.0.32", + "quote", + "syn 2.0.29", ] [[package]] @@ -1711,8 +1715,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -1722,9 +1726,9 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -1735,8 +1739,8 @@ checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" dependencies = [ "darling 0.10.2", "derive_builder_core", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -1747,8 +1751,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -1759,8 +1763,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1896,9 +1900,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -1971,13 +1975,13 @@ dependencies = [ [[package]] name = "educe" -version = "0.4.23" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -2051,7 +2055,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "bytes", "ed25519-dalek", "hex", @@ -2073,8 +2077,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -2085,9 +2089,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -2098,9 +2102,9 @@ checksum = "e4f76552f53cefc9a7f64987c3701b99d982f7690606fd67de1d09712fbf52f1" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -2109,9 +2113,9 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -2253,13 +2257,13 @@ dependencies = [ "ethers-core", "eyre", "prettyplease", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "regex", "serde", "serde_json", - "syn 2.0.32", - "toml 0.7.8", + "syn 2.0.29", + "toml 0.7.6", "walkdir", ] @@ -2273,10 +2277,10 @@ dependencies = [ "const-hex", "ethers-contract-abigen", "ethers-core", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "serde_json", - "syn 2.0.32", + "syn 2.0.29", ] [[package]] @@ -2302,11 +2306,11 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.32", + "syn 2.0.29", "tempfile", "thiserror", "tiny-keccak", - "unicode-xid 0.2.4", + "unicode-xid", ] [[package]] @@ -2359,7 +2363,7 @@ checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.4", + "base64 0.21.3", "bytes", "const-hex", "enr", @@ -2505,9 +2509,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "findshlibs" @@ -2643,9 +2647,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -2874,7 +2878,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ "ahash 0.8.3", - "serde", ] [[package]] @@ -2885,6 +2888,7 @@ checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ "ahash 0.8.3", "allocator-api2", + "serde", ] [[package]] @@ -3036,9 +3040,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human_bytes" -version = "0.4.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" +checksum = "27e2b089f28ad15597b48d8c0a8fe94eeb1c1cb26ca99b6f66ac9582ae10c5e6" [[package]] name = "humantime" @@ -3216,8 +3220,8 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -3304,8 +3308,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -3324,8 +3328,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", ] [[package]] @@ -3439,7 +3443,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.13", + "rustix 0.38.11", "windows-sys 0.48.0", ] @@ -3610,8 +3614,8 @@ checksum = "985d4a3753a08aaf120429924567795b2764c5c691489316a7fd076178e708b4" dependencies = [ "heck", "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -3682,7 +3686,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "pem", "ring", "serde", @@ -3793,9 +3797,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "litemap" @@ -3923,7 +3927,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "hyper", "indexmap 1.9.3", "ipnet", @@ -3941,9 +3945,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -4045,8 +4049,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4066,8 +4070,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4252,9 +4256,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -4264,9 +4268,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -4280,9 +4284,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" dependencies = [ "memchr", ] @@ -4329,8 +4333,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4373,9 +4377,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.8" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88eaac72ead1b9bd4ce747d577dbd2ad31fb0a56a9a20c611bf27bd1b97fbed" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec", "bitvec", @@ -4388,13 +4392,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.8" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33bdcd446e9400b6ad9fc85b4aea68846c258b07c3efb994679ae82707b133f0" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4555,9 +4559,9 @@ checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" dependencies = [ "phf_generator", "phf_shared", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -4584,9 +4588,9 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -4765,12 +4769,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.66", - "syn 2.0.32", + "proc-macro2", + "syn 2.0.29", ] [[package]] @@ -4804,8 +4808,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", "version_check", ] @@ -4816,20 +4820,11 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "version_check", ] -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] - [[package]] name = "proc-macro2" version = "1.0.66" @@ -4872,25 +4867,14 @@ dependencies = [ "unarray", ] -[[package]] -name = "proptest-derive" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90b46295382dc76166cb7cf2bb4a97952464e4b7ed5a43e6cd34e1fec3349ddc" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", -] - [[package]] name = "proptest-derive" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4946,22 +4930,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ - "proc-macro2 1.0.66", + "proc-macro2", ] [[package]] @@ -5180,7 +5155,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "bytes", "encoding_rs", "futures-core", @@ -5285,7 +5260,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.7.8", + "toml 0.7.6", "tracing", "tui", "vergen", @@ -5315,6 +5290,7 @@ dependencies = [ "futures-core", "futures-util", "metrics", + "reth-interfaces", "reth-metrics", "reth-payload-builder", "reth-primitives", @@ -5386,7 +5362,7 @@ dependencies = [ "codecs-derive", "modular-bitfield", "proptest", - "proptest-derive 0.4.0", + "proptest-derive", "revm-primitives", "serde", "test-fuzz", @@ -5407,7 +5383,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "toml 0.7.8", + "toml 0.7.6", ] [[package]] @@ -5444,7 +5420,7 @@ dependencies = [ "postcard", "pprof", "proptest", - "proptest-derive 0.4.0", + "proptest-derive", "rand 0.8.5", "reth-codecs", "reth-db", @@ -5582,7 +5558,7 @@ dependencies = [ "metrics", "pin-project", "proptest", - "proptest-derive 0.4.0", + "proptest-derive", "rand 0.8.5", "reth-codecs", "reth-discv4", @@ -5695,11 +5671,11 @@ version = "0.1.0-alpha.8" dependencies = [ "metrics", "once_cell", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "regex", "serial_test", - "syn 2.0.32", + "syn 2.0.29", "trybuild", ] @@ -5836,7 +5812,7 @@ dependencies = [ "plain_hasher", "pprof", "proptest", - "proptest-derive 0.4.0", + "proptest-derive", "rand 0.8.5", "rayon", "reth-codecs", @@ -5857,7 +5833,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", - "toml 0.7.8", + "toml 0.7.6", "tracing", "triehash", "url", @@ -5870,16 +5846,17 @@ version = "0.1.0-alpha.8" dependencies = [ "assert_matches", "auto_impl", - "derive_more", "itertools 0.11.0", "parking_lot 0.12.1", "pin-project", + "rayon", "reth-db", "reth-interfaces", "reth-primitives", "reth-revm-primitives", "reth-rlp", "reth-trie", + "revm", "tempfile", "tokio", "tokio-stream", @@ -5908,14 +5885,12 @@ dependencies = [ name = "reth-revm" version = "0.1.0-alpha.8" dependencies = [ - "once_cell", "reth-consensus-common", "reth-interfaces", "reth-primitives", "reth-provider", "reth-revm-inspectors", "reth-revm-primitives", - "reth-rlp", "revm", "tracing", ] @@ -5967,9 +5942,9 @@ dependencies = [ name = "reth-rlp-derive" version = "0.1.0-alpha.8" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -6153,6 +6128,7 @@ dependencies = [ "reth-revm", "reth-rlp", "reth-trie", + "revm", "serde", "serde_json", "thiserror", @@ -6239,7 +6215,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" dependencies = [ "auto_impl", "revm-interpreter", @@ -6249,7 +6225,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" dependencies = [ "derive_more", "enumn", @@ -6260,7 +6236,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" dependencies = [ "k256", "num", @@ -6276,21 +6252,22 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm?branch=release/v25#6084e0fa2d457931cd8c9d29934bca0812b5b8d6" +source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" dependencies = [ "arbitrary", "auto_impl", + "bitflags 2.4.0", "bitvec", "bytes", "derive_more", "enumn", "fixed-hash", - "hashbrown 0.13.2", + "hashbrown 0.14.0", "hex", "hex-literal", "primitive-types", "proptest", - "proptest-derive 0.3.0", + "proptest-derive", "rlp", "ruint", "serde", @@ -6366,8 +6343,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -6470,14 +6447,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" dependencies = [ "bitflags 2.4.0", "errno 0.3.3", "libc", - "linux-raw-sys 0.4.7", + "linux-raw-sys 0.4.5", "windows-sys 0.48.0", ] @@ -6511,7 +6488,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", ] [[package]] @@ -6591,8 +6568,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -6766,16 +6743,16 @@ version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", @@ -6809,7 +6786,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "base64 0.21.4", + "base64 0.21.3", "chrono", "hex", "indexmap 1.9.3", @@ -6827,9 +6804,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" dependencies = [ "darling 0.20.3", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -6852,9 +6829,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -6933,9 +6910,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" @@ -7179,8 +7156,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "rustversion", "syn 1.0.109", ] @@ -7192,10 +7169,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" dependencies = [ "heck", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.29", ] [[package]] @@ -7238,9 +7215,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.4.0" +version = "12.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e0e9bc48b3852f36a84f8d0da275d50cb3c2b88b59b9ec35fdd8b7fa239e37d" +checksum = "167a4ffd7c35c143fd1030aa3c2caf76ba42220bd5a6b5f4781896434723b8c3" dependencies = [ "debugid", "memmap2", @@ -7250,45 +7227,34 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.4.0" +version = "12.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691e53bdc0702aba3a5abc2cffff89346fcbd4050748883c7e2f714b33a69045" +checksum = "e378c50e80686c1c5c205674e1f86a2858bec3d2a7dfdd690331a8a19330f293" dependencies = [ "cpp_demangle", "rustc-demangle", "symbolic-common", ] -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.32" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "unicode-ident", ] @@ -7298,10 +7264,10 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", - "unicode-xid 0.2.4", + "unicode-xid", ] [[package]] @@ -7310,10 +7276,10 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", - "unicode-xid 0.2.4", + "proc-macro2", + "quote", + "syn 2.0.29", + "unicode-xid", ] [[package]] @@ -7331,7 +7297,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.13", + "rustix 0.38.11", "windows-sys 0.48.0", ] @@ -7369,8 +7335,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0528a7ad0bc85f826aa831434a37833aea622a5ae155f5b5dd431b25244213" dependencies = [ "cargo_metadata 0.15.4", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "serde", "strum_macros 0.25.2", ] @@ -7385,10 +7351,10 @@ dependencies = [ "if_chain", "itertools 0.10.5", "lazy_static", - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "subprocess", - "syn 2.0.32", + "syn 2.0.29", "test-fuzz-internal", "toolchain_find", ] @@ -7428,9 +7394,9 @@ version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -7551,9 +7517,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -7617,9 +7583,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -7638,9 +7604,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -7690,7 +7656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "base64 0.21.4", + "base64 0.21.3", "bitflags 2.4.0", "bytes", "futures-core", @@ -7755,9 +7721,9 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -7931,9 +7897,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.84" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5c89fd17b7536f2cf66c97cff6e811e89e728ca0ed13caeed610c779360d8b4" +checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" dependencies = [ "basic-toml", "glob", @@ -8048,12 +8014,6 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" - [[package]] name = "unicode-xid" version = "0.2.4" @@ -8164,9 +8124,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", "winapi-util", @@ -8212,9 +8172,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -8236,7 +8196,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.33", + "quote", "wasm-bindgen-macro-support", ] @@ -8246,9 +8206,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8277,14 +8237,13 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "which" -version = "4.4.2" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", - "home", + "libc", "once_cell", - "rustix 0.38.13", ] [[package]] @@ -8551,9 +8510,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.18" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" +checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" [[package]] name = "xmltree" @@ -8588,8 +8547,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8609,8 +8568,8 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8630,9 +8589,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", - "syn 2.0.32", + "proc-macro2", + "quote", + "syn 2.0.29", ] [[package]] @@ -8652,8 +8611,8 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.33", + "proc-macro2", + "quote", "syn 1.0.109", "synstructure 0.12.6", ] diff --git a/Cargo.toml b/Cargo.toml index 343a5dbaf287..2bd859afca64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -97,10 +97,9 @@ reth-network = { path = "./crates/net/network" } reth-network-api = { path = "./crates/net/network-api" } reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat" } -revm = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-precompile = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +# revm +revm = { git = "https://github.com/bluealloy/revm" } +revm-primitives = { git = "https://github.com/bluealloy/revm" } ## eth ethers-core = { version = "2.0", default-features = false } @@ -117,7 +116,7 @@ boa_gc = "0.17" ## misc aquamarine = "0.3" -bytes = "1.4" +bytes = "1.5" bitflags = "2.3" tracing = "0.1.0" tracing-appender = "0.2" @@ -129,7 +128,7 @@ strum = "0.25" rayon = "1.7" itertools = "0.11" parking_lot = "0.12" -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation hex-literal = "0.4" ### proc-macros @@ -172,10 +171,4 @@ assert_matches = "1.5.0" proptest = "1.0" proptest-derive = "0.4" -serial_test = "2" - -[patch.crates-io] -revm = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-interpreter = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-precompile = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } -revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "release/v25" } +serial_test = "2" \ No newline at end of file diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 429e5c796194..19d6ec1d56ba 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -184,6 +184,7 @@ impl ImportCommand { ExecutionStageThresholds { max_blocks: config.stages.execution.max_blocks, max_changes: config.stages.execution.max_changes, + max_cumulative_gas: config.stages.execution.max_cumulative_gas, }, config .stages diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index db6f2146aa31..afd83856958a 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -135,7 +135,11 @@ impl Command { }) .set(ExecutionStage::new( factory, - ExecutionStageThresholds { max_blocks: None, max_changes: None }, + ExecutionStageThresholds { + max_blocks: None, + max_changes: None, + max_cumulative_gas: None, + }, stage_conf .merkle .clean_threshold diff --git a/bin/reth/src/debug_cmd/in_memory_merkle.rs b/bin/reth/src/debug_cmd/in_memory_merkle.rs index 837c31c752bd..bddece7650e6 100644 --- a/bin/reth/src/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/debug_cmd/in_memory_merkle.rs @@ -14,8 +14,9 @@ use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; use reth_provider::{ - AccountExtReader, BlockExecutor, BlockWriter, ExecutorFactory, HashingWriter, HeaderProvider, - LatestStateProviderRef, ProviderFactory, StageCheckpointReader, StorageReader, + AccountExtReader, BlockWriter, ExecutorFactory, HashingWriter, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StorageReader, }; use reth_tasks::TaskExecutor; use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, updates::TrieKey, StateRoot}; @@ -164,17 +165,19 @@ impl Command { .await?; let executor_factory = reth_revm::Factory::new(self.chain.clone()); - let mut executor = executor_factory.with_sp(LatestStateProviderRef::new(provider.tx_ref())); + let mut executor = + executor_factory.with_state(LatestStateProviderRef::new(provider.tx_ref())); let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); - let block_state = executor.execute_and_verify_receipt( + executor.execute_and_verify_receipt( &block.clone().unseal(), merkle_block_td + block.difficulty, None, )?; + let block_state = executor.take_output_state(); - // Unpacked `PostState::state_root_slow` function + // Unpacked `BundleState::state_root_slow` function let hashed_post_state = block_state.hash_state_slow().sorted(); let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); let tx = provider.tx_ref(); @@ -194,7 +197,7 @@ impl Command { // Insert block, state and hashes provider_rw.insert_block(block.clone(), None, None)?; - block_state.write_to_db(provider_rw.tx_ref(), block.number)?; + block_state.write_to_db(provider_rw.tx_ref(), OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plainstate_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index add25f6cd505..37e41e8fc6ed 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -205,7 +205,11 @@ impl Command { let factory = reth_revm::Factory::new(self.chain.clone()); let mut execution_stage = ExecutionStage::new( factory, - ExecutionStageThresholds { max_blocks: Some(1), max_changes: None }, + ExecutionStageThresholds { + max_blocks: Some(1), + max_changes: None, + max_cumulative_gas: None, + }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), ); diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 0f46139a675a..3f6d44b4e5a0 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -6,8 +6,15 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{stage::StageId, Account, Bytecode, ChainSpec, StorageEntry, H256, U256}; -use reth_provider::{DatabaseProviderRW, HashingWriter, HistoryWriter, PostState, ProviderFactory}; -use std::{collections::BTreeMap, sync::Arc}; +use reth_provider::{ + bundle_state::{BundleStateInit, RevertsInit}, + BundleStateWithReceipts, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, + ProviderFactory, +}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use tracing::debug; /// Database initialization error type. @@ -85,31 +92,63 @@ pub fn insert_genesis_state( tx: &>::TXMut, genesis: &reth_primitives::Genesis, ) -> Result<(), InitDatabaseError> { - let mut state = PostState::default(); + let mut state_init: BundleStateInit = HashMap::new(); + let mut reverts_init = HashMap::new(); + let mut contracts: HashMap = HashMap::new(); for (address, account) in &genesis.alloc { - let mut bytecode_hash = None; - if let Some(code) = &account.code { + let bytecode_hash = if let Some(code) = &account.code { let bytecode = Bytecode::new_raw(code.0.clone()); - // FIXME: Can bytecode_hash be Some(Bytes::new()) here? - bytecode_hash = Some(bytecode.hash); - state.add_bytecode(bytecode.hash, bytecode); - } - state.create_account( - 0, + let hash = bytecode.hash_slow(); + contracts.insert(hash, bytecode); + Some(hash) + } else { + None + }; + + // get state + let storage = account + .storage + .as_ref() + .map(|m| { + m.iter() + .map(|(key, value)| { + let value = U256::from_be_bytes(value.0); + (*key, (U256::ZERO, value)) + }) + .collect::>() + }) + .unwrap_or_default(); + + reverts_init.insert( *address, - Account { nonce: account.nonce.unwrap_or(0), balance: account.balance, bytecode_hash }, + (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()), + ); + + state_init.insert( + *address, + ( + None, + Some(Account { + nonce: account.nonce.unwrap_or_default(), + balance: account.balance, + bytecode_hash, + }), + storage, + ), ); - if let Some(storage) = &account.storage { - let mut storage_changes = reth_provider::post_state::StorageChangeset::new(); - for (&key, &value) in storage { - storage_changes - .insert(U256::from_be_bytes(key.0), (U256::ZERO, U256::from_be_bytes(value.0))); - } - state.change_storage(0, *address, storage_changes); - } } - state.write_to_db(tx, 0)?; + let all_reverts_init: RevertsInit = HashMap::from([(0, reverts_init)]); + + let bundle = BundleStateWithReceipts::new_init( + state_init, + all_reverts_init, + contracts.into_iter().collect(), + vec![], + 0, + ); + + bundle.write_to_db(tx, OriginalValuesKnown::Yes)?; Ok(()) } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 6f66d56772d7..8905dba4d0c5 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -691,7 +691,7 @@ impl NodeCommand { /// NOTE: The download is attempted with infinite retries. async fn lookup_or_fetch_tip( &self, - db: &DB, + db: DB, client: Client, tip: H256, ) -> Result @@ -707,7 +707,7 @@ impl NodeCommand { /// NOTE: The download is attempted with infinite retries. async fn fetch_tip( &self, - db: &DB, + db: DB, client: Client, tip: BlockHashOrNumber, ) -> Result @@ -846,6 +846,7 @@ impl NodeCommand { ExecutionStageThresholds { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, + max_cumulative_gas: stage_config.execution.max_cumulative_gas, }, stage_config .merkle diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 69ca457fd2dc..55eef819f1c5 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -69,7 +69,11 @@ async fn unwind_and_copy( // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( reth_revm::Factory::new(db_tool.chain.clone()), - ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None }, + ExecutionStageThresholds { + max_blocks: Some(u64::MAX), + max_changes: None, + max_cumulative_gas: None, + }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::all(), ); diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 079db67837cd..d06ee8e7b2f8 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -201,6 +201,7 @@ impl Command { ExecutionStageThresholds { max_blocks: Some(batch_size), max_changes: None, + max_cumulative_gas: None, }, config.stages.merkle.clean_threshold, config.prune.map(|prune| prune.parts).unwrap_or_default(), diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index ac17cd2f463b..7302baab47cd 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -3,7 +3,8 @@ use crate::{ canonical_chain::CanonicalChain, chain::{BlockChainId, BlockKind}, metrics::TreeMetrics, - AppendableChain, BlockBuffer, BlockIndices, BlockchainTreeConfig, PostStateData, TreeExternals, + AppendableChain, BlockBuffer, BlockIndices, BlockchainTreeConfig, BundleStateData, + TreeExternals, }; use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_interfaces::{ @@ -21,10 +22,9 @@ use reth_primitives::{ }; use reth_provider::{ chain::{ChainSplit, SplitAt}, - post_state::PostState, - BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, Chain, DatabaseProvider, - DisplayBlocksChain, ExecutorFactory, HeaderProvider, + BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, + CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, + DatabaseProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, }; use reth_stages::{MetricEvent, MetricEventsSender}; use std::{ @@ -233,7 +233,7 @@ impl BlockchainTree /// Returns the block's receipts with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<&[Receipt]> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let id = self.block_indices.get_blocks_chain_id(&block_hash)?; let chain = self.chains.get(&id)?; chain.receipts_by_block_hash(block_hash) @@ -254,11 +254,11 @@ impl BlockchainTree /// This includes: /// * `BlockHash` of canonical block that chain connects to. Needed for creating database /// provider for the rest of the state. - /// * `PostState` changes that happened at the asked `block_hash` + /// * `BundleState` changes that happened at the asked `block_hash` /// * `BTreeMap` list of past pending and canonical hashes, That are /// needed for evm `BLOCKHASH` opcode. /// Return none if block is not known. - pub fn post_state_data(&self, block_hash: BlockHash) -> Option { + pub fn post_state_data(&self, block_hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); // if it is part of the chain if let Some(chain_id) = self.block_indices.get_blocks_chain_id(&block_hash) { @@ -281,15 +281,15 @@ impl BlockchainTree // get canonical fork. let canonical_fork = self.canonical_fork(chain_id)?; - return Some(PostStateData { state, parent_block_hashed, canonical_fork }) + return Some(BundleStateData { state, parent_block_hashed, canonical_fork }) } // check if there is canonical block if let Some(canonical_number) = self.canonical_chain().canonical_number(block_hash) { trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on canonical chain"); - return Some(PostStateData { + return Some(BundleStateData { canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, - state: PostState::new(), + state: BundleStateWithReceipts::default(), parent_block_hashed: self.canonical_chain().inner().clone(), }) } @@ -844,12 +844,16 @@ impl BlockchainTree let chain = chain.into_inner(); match chain.split(split_at) { ChainSplit::Split { canonical, pending } => { + trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain"); // rest of split chain is inserted back with same chain_id. self.block_indices.insert_chain(chain_id, &pending); self.chains.insert(chain_id, AppendableChain::new(pending)); canonical } - ChainSplit::NoSplitCanonical(canonical) => canonical, + ChainSplit::NoSplitCanonical(canonical) => { + trace!(target: "blockchain_tree", "No split on canonical chain"); + canonical + } ChainSplit::NoSplitPending(_) => { unreachable!("Should not happen as block indices guarantee structure of blocks") } @@ -931,6 +935,8 @@ impl BlockchainTree }; let chain = self.chains.remove(&chain_id).expect("To be present"); + trace!(target: "blockchain_tree", ?chain, "Found chain to make canonical"); + // we are splitting chain at the block hash that we want to make canonical let canonical = self.split_chain(chain_id, chain, SplitAt::Hash(*block_hash)); @@ -942,6 +948,7 @@ impl BlockchainTree while let Some(chain_id) = self.block_indices.get_blocks_chain_id(&block_fork.hash) { let chain = self.chains.remove(&chain_id).expect("To fork to be present"); block_fork = chain.fork_block(); + // canonical chain is lower part of the chain. let canonical = self.split_chain(chain_id, chain, SplitAt::Number(block_fork_number)); block_fork_number = canonical.fork_block_number(); chains_to_promote.push(canonical); @@ -950,10 +957,17 @@ impl BlockchainTree let old_tip = self.block_indices.canonical_tip(); // Merge all chain into one chain. let mut new_canon_chain = chains_to_promote.pop().expect("There is at least one block"); + trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); + let mut chain_appended = false; for chain in chains_to_promote.into_iter().rev() { + chain_appended = true; + trace!(target: "blockchain_tree", ?chain, "Appending chain"); new_canon_chain.append_chain(chain).expect("We have just build the chain."); } + if chain_appended { + trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical appended chain"); + } // update canonical index self.block_indices.canonicalize_blocks(new_canon_chain.blocks()); @@ -963,6 +977,7 @@ impl BlockchainTree target: "blockchain_tree", "Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks()) ); + // if joins to the tip; if new_canon_chain.fork_block_hash() == old_tip.hash { chain_notification = @@ -1000,7 +1015,6 @@ impl BlockchainTree } Ok(val) => val, }; - // commit new canonical chain. self.commit_canonical(new_canon_chain.clone())?; @@ -1051,7 +1065,7 @@ impl BlockchainTree let (blocks, state) = chain.into_inner(); provider - .append_blocks_with_post_state( + .append_blocks_with_bundle_state( blocks.into_blocks().collect(), state, self.prune_modes.as_ref(), @@ -1106,7 +1120,7 @@ impl BlockchainTree if blocks_and_execution.is_empty() { Ok(None) } else { - Ok(Some(Chain::new(blocks_and_execution))) + Ok(Some(blocks_and_execution)) } } @@ -1148,14 +1162,13 @@ mod tests { proofs::EMPTY_ROOT, stage::StageCheckpoint, ChainSpecBuilder, H256, MAINNET, }; use reth_provider::{ - post_state::PostState, test_utils::{blocks::BlockChainTestData, TestExecutorFactory}, - BlockWriter, ProviderFactory, + BlockWriter, BundleStateWithReceipts, ProviderFactory, }; use std::{collections::HashSet, sync::Arc}; fn setup_externals( - exec_res: Vec, + exec_res: Vec, ) -> TreeExternals, Arc, TestExecutorFactory> { let db = create_test_rw_db(); let consensus = Arc::new(TestConsensus::default()); @@ -1289,10 +1302,10 @@ mod tests { BlockchainTree::new(externals, sender, config, None).expect("failed to create tree"); // genesis block 10 is already canonical - assert!(tree.make_canonical(&H256::zero()).is_ok()); + tree.make_canonical(&H256::zero()).unwrap(); // make sure is_block_hash_canonical returns true for genesis block - assert!(tree.is_block_hash_canonical(&H256::zero()).unwrap()); + tree.is_block_hash_canonical(&H256::zero()).unwrap(); // make genesis block 10 as finalized tree.finalize_block(10); @@ -1365,12 +1378,12 @@ mod tests { ); // make block1 canonical - assert!(tree.make_canonical(&block1.hash()).is_ok()); + tree.make_canonical(&block1.hash()).unwrap(); // check notification assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())])); // make block2 canonicals - assert!(tree.make_canonical(&block2.hash()).is_ok()); + tree.make_canonical(&block2.hash()).unwrap(); // check notification. assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); @@ -1502,8 +1515,7 @@ mod tests { assert!(tree.is_block_hash_canonical(&block1a.hash).unwrap()); // make b2 canonical - assert!(tree.make_canonical(&block2.hash()).is_ok()); - + tree.make_canonical(&block2.hash()).unwrap(); // Trie state: // b2 b2a (side chain) // | / @@ -1572,7 +1584,7 @@ mod tests { .assert(&tree); // commit b2a - assert!(tree.make_canonical(&block2.hash).is_ok()); + tree.make_canonical(&block2.hash).unwrap(); // Trie state: // b2 b2a (side chain) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 8a3b1b615556..258976fde3e1 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -3,7 +3,7 @@ //! A [`Chain`] contains the state of accounts for the chain after execution of its constituent //! blocks, as well as a list of the blocks the chain is composed of. use super::externals::TreeExternals; -use crate::{post_state::PostState, PostStateDataRef}; +use crate::BundleStateDataRef; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::error::{BlockchainTreeError, InsertBlockError}, @@ -14,8 +14,8 @@ use reth_primitives::{ BlockHash, BlockNumber, ForkBlock, SealedBlockWithSenders, SealedHeader, U256, }; use reth_provider::{ - providers::PostStateProvider, BlockExecutor, Chain, ExecutorFactory, PostStateDataProvider, - StateRootProvider, + providers::BundleStateProvider, BundleStateDataProvider, BundleStateWithReceipts, Chain, + ExecutorFactory, StateRootProvider, }; use std::{ collections::BTreeMap, @@ -72,17 +72,17 @@ impl AppendableChain { C: Consensus, EF: ExecutorFactory, { - let state = PostState::default(); + let state = BundleStateWithReceipts::default(); let empty = BTreeMap::new(); - let state_provider = PostStateDataRef { + let state_provider = BundleStateDataRef { state: &state, sidechain_block_hashes: &empty, canonical_block_hashes, canonical_fork, }; - let changeset = Self::validate_and_execute_canonical_head_descendant( + let bundle_state = Self::validate_and_execute_canonical_head_descendant( block.clone(), parent_header, state_provider, @@ -90,7 +90,7 @@ impl AppendableChain { ) .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; - Ok(Self { chain: Chain::new(vec![(block, changeset)]) }) + Ok(Self { chain: Chain::new(vec![block], bundle_state) }) } /// Create a new chain that forks off of the canonical chain. @@ -106,17 +106,17 @@ impl AppendableChain { C: Consensus, EF: ExecutorFactory, { - let state = PostState::default(); + let state = BundleStateWithReceipts::default(); let empty = BTreeMap::new(); - let state_provider = PostStateDataRef { + let state_provider = BundleStateDataRef { state: &state, sidechain_block_hashes: &empty, canonical_block_hashes, canonical_fork, }; - let changeset = Self::validate_and_execute_sidechain( + let bundle_state = Self::validate_and_execute_sidechain( block.clone(), parent_header, state_provider, @@ -124,7 +124,7 @@ impl AppendableChain { ) .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; - Ok(Self { chain: Chain::new(vec![(block, changeset)]) }) + Ok(Self { chain: Chain::new(vec![block], bundle_state) }) } /// Create a new chain that forks off of an existing sidechain. @@ -157,7 +157,7 @@ impl AppendableChain { state.revert_to(parent.number); // Revert changesets to get the state of the parent that we need to apply the change. - let post_state_data = PostStateDataRef { + let post_state_data = BundleStateDataRef { state: &state, sidechain_block_hashes: &side_chain_block_hashes, canonical_block_hashes, @@ -177,15 +177,15 @@ impl AppendableChain { /// Validate and execute the given block that _extends the canonical chain_, validating its /// state root after execution. - fn validate_and_execute( + fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, - post_state_data_provider: PSDP, + post_state_data_provider: BSDP, externals: &TreeExternals, block_kind: BlockKind, - ) -> Result + ) -> Result where - PSDP: PostStateDataProvider, + BSDP: BundleStateDataProvider, DB: Database, C: Consensus, EF: ExecutorFactory, @@ -201,15 +201,16 @@ impl AppendableChain { let canonical_fork = post_state_data_provider.canonical_fork(); let state_provider = db.history_by_block_number(canonical_fork.number)?; - let provider = PostStateProvider::new(state_provider, post_state_data_provider); + let provider = BundleStateProvider::new(state_provider, post_state_data_provider); - let mut executor = externals.executor_factory.with_sp(&provider); - let post_state = executor.execute_and_verify_receipt(&block, U256::MAX, Some(senders))?; + let mut executor = externals.executor_factory.with_state(&provider); + executor.execute_and_verify_receipt(&block, U256::MAX, Some(senders))?; + let bundle_state = executor.take_output_state(); // check state root if the block extends the canonical chain. if block_kind.extends_canonical_head() { // check state root - let state_root = provider.state_root(post_state.clone())?; + let state_root = provider.state_root(bundle_state.clone())?; if block.state_root != state_root { return Err(ConsensusError::BodyStateRootDiff { got: state_root, @@ -219,19 +220,19 @@ impl AppendableChain { } } - Ok(post_state) + Ok(bundle_state) } /// Validate and execute the given block that _extends the canonical chain_, validating its /// state root after execution. - fn validate_and_execute_canonical_head_descendant( + fn validate_and_execute_canonical_head_descendant( block: SealedBlockWithSenders, parent_block: &SealedHeader, - post_state_data_provider: PSDP, + post_state_data_provider: BSDP, externals: &TreeExternals, - ) -> Result + ) -> Result where - PSDP: PostStateDataProvider, + BSDP: BundleStateDataProvider, DB: Database, C: Consensus, EF: ExecutorFactory, @@ -246,14 +247,14 @@ impl AppendableChain { } /// Validate and execute the given sidechain block, skipping state root validation. - fn validate_and_execute_sidechain( + fn validate_and_execute_sidechain( block: SealedBlockWithSenders, parent_block: &SealedHeader, - post_state_data_provider: PSDP, + post_state_data_provider: BSDP, externals: &TreeExternals, - ) -> Result + ) -> Result where - PSDP: PostStateDataProvider, + BSDP: BundleStateDataProvider, DB: Database, C: Consensus, EF: ExecutorFactory, @@ -295,7 +296,7 @@ impl AppendableChain { { let (_, parent_block) = self.blocks.last_key_value().expect("Chain has at least one block"); - let post_state_data = PostStateDataRef { + let post_state_data = BundleStateDataRef { state: &self.state, sidechain_block_hashes: &side_chain_block_hashes, canonical_block_hashes, @@ -310,6 +311,7 @@ impl AppendableChain { block_kind, ) .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; + // extend the state. self.state.extend(block_state); self.blocks.insert(block.number, block); Ok(()) diff --git a/crates/blockchain-tree/src/lib.rs b/crates/blockchain-tree/src/lib.rs index 15aa1da16a45..9f48a41d819f 100644 --- a/crates/blockchain-tree/src/lib.rs +++ b/crates/blockchain-tree/src/lib.rs @@ -22,9 +22,6 @@ //! //! - `test-utils`: Export utilities for testing -/// Execution result types. -pub use reth_provider::post_state; - pub mod blockchain_tree; pub use blockchain_tree::{BlockHashes, BlockchainTree}; @@ -44,7 +41,7 @@ pub mod shareable; pub use shareable::ShareableBlockchainTree; pub mod post_state_data; -pub use post_state_data::{PostStateData, PostStateDataRef}; +pub use post_state_data::{BundleStateData, BundleStateDataRef}; /// Buffer of not executed blocks. pub mod block_buffer; diff --git a/crates/blockchain-tree/src/post_state_data.rs b/crates/blockchain-tree/src/post_state_data.rs index 324400281a86..b03f25e1556b 100644 --- a/crates/blockchain-tree/src/post_state_data.rs +++ b/crates/blockchain-tree/src/post_state_data.rs @@ -1,14 +1,14 @@ //! Substate for blockchain trees use reth_primitives::{BlockHash, BlockNumber, ForkBlock}; -use reth_provider::{post_state::PostState, PostStateDataProvider}; +use reth_provider::{BundleStateDataProvider, BundleStateWithReceipts}; use std::collections::BTreeMap; -/// Structure that bundles references of data needs to implement [`PostStateDataProvider`] +/// Structure that bundles references of data needs to implement [`BundleStateDataProvider`] #[derive(Clone, Debug)] -pub struct PostStateDataRef<'a> { +pub struct BundleStateDataRef<'a> { /// The wrapped state after execution of one or more transactions and/or blocks. - pub state: &'a PostState, + pub state: &'a BundleStateWithReceipts, /// The blocks in the sidechain. pub sidechain_block_hashes: &'a BTreeMap, /// The blocks in the canonical chain. @@ -17,8 +17,8 @@ pub struct PostStateDataRef<'a> { pub canonical_fork: ForkBlock, } -impl<'a> PostStateDataProvider for PostStateDataRef<'a> { - fn state(&self) -> &PostState { +impl<'a> BundleStateDataProvider for BundleStateDataRef<'a> { + fn state(&self) -> &BundleStateWithReceipts { self.state } @@ -36,11 +36,11 @@ impl<'a> PostStateDataProvider for PostStateDataRef<'a> { } } -/// Structure that contains data needs to implement [`PostStateDataProvider`] +/// Structure that contains data needs to implement [`BundleStateDataProvider`] #[derive(Clone, Debug)] -pub struct PostStateData { +pub struct BundleStateData { /// Post state with changes - pub state: PostState, + pub state: BundleStateWithReceipts, /// Parent block hashes needs for evm BLOCKHASH opcode. /// NOTE: it does not mean that all hashes are there but all until finalized are there. /// Other hashes can be obtained from provider @@ -49,8 +49,8 @@ pub struct PostStateData { pub canonical_fork: ForkBlock, } -impl PostStateDataProvider for PostStateData { - fn state(&self) -> &PostState { +impl BundleStateDataProvider for BundleStateData { + fn state(&self) -> &BundleStateWithReceipts { &self.state } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index ab191385ec9a..6b9c6380b818 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -15,8 +15,8 @@ use reth_primitives::{ SealedHeader, }; use reth_provider::{ - BlockchainTreePendingStateProvider, CanonStateSubscriptions, ExecutorFactory, - PostStateDataProvider, + BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateSubscriptions, + ExecutorFactory, }; use std::{ collections::{BTreeMap, HashSet}, @@ -181,13 +181,14 @@ impl BlockchainTreeViewer fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { let tree = self.tree.read(); let pending_block = tree.pending_block()?.clone(); - let receipts = tree.receipts_by_block_hash(pending_block.hash)?.to_vec(); + let receipts = + tree.receipts_by_block_hash(pending_block.hash)?.into_iter().cloned().collect(); Some((pending_block, receipts)) } fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let tree = self.tree.read(); - Some(tree.receipts_by_block_hash(block_hash)?.to_vec()) + Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect()) } } @@ -197,7 +198,7 @@ impl BlockchainTreePendingState fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option> { + ) -> Option> { trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); let provider = self.tree.read().post_state_data(block_hash)?; Some(Box::new(provider)) diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index d5e0f9bb29cc..5e2dbe39a16c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -210,11 +210,18 @@ pub struct ExecutionConfig { pub max_blocks: Option, /// The maximum amount of state changes to keep in memory before the execution stage commits. pub max_changes: Option, + /// The maximum gas to process before the execution stage commits. + pub max_cumulative_gas: Option, } impl Default for ExecutionConfig { fn default() -> Self { - Self { max_blocks: Some(500_000), max_changes: Some(5_000_000) } + Self { + max_blocks: Some(500_000), + max_changes: Some(5_000_000), + // 50k full blocks of 30M gas + max_cumulative_gas: Some(30_000_000 * 50_000), + } } } diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f68d1271a7b9..ea6b87f5e49f 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -31,8 +31,14 @@ use reth_primitives::{ Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, EMPTY_OMMER_ROOT, H256, U256, }; -use reth_provider::{BlockReaderIdExt, CanonStateNotificationSender, PostState, StateProvider}; -use reth_revm::executor::Executor; +use reth_provider::{ + BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, + StateProviderFactory, +}; +use reth_revm::{ + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, + processor::EVMProcessor, State, +}; use reth_transaction_pool::TransactionPool; use std::{ collections::HashMap, @@ -289,41 +295,51 @@ impl StorageInner { header } - /// Executes the block with the given block and senders, on the provided [Executor]. + /// Executes the block with the given block and senders, on the provided [EVMProcessor]. /// /// This returns the poststate from execution and post-block changes, as well as the gas used. - pub(crate) fn execute( + pub(crate) fn execute( &mut self, block: &Block, - executor: &mut Executor, + executor: &mut EVMProcessor<'_>, senders: Vec

, - ) -> Result<(PostState, u64), BlockExecutionError> { + ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - let (post_state, gas_used) = + let (receipts, gas_used) = executor.execute_transactions(block, U256::ZERO, Some(senders))?; - // apply post block changes - let post_state = executor.apply_post_block_changes(block, U256::ZERO, post_state)?; + // Save receipts. + executor.save_receipts(receipts)?; + + // add post execution state change + // Withdrawals, rewards etc. + executor.apply_post_execution_state_change(block, U256::ZERO)?; + + // merge transitions + executor.db_mut().merge_transitions(BundleRetention::Reverts); - Ok((post_state, gas_used)) + // apply post block changes + Ok((executor.take_output_state(), gas_used)) } - /// Fills in the post-execution header fields based on the given PostState and gas used. + /// Fills in the post-execution header fields based on the given BundleState and gas used. /// In doing this, the state root is calculated and the final header is returned. - pub(crate) fn complete_header( + pub(crate) fn complete_header( &self, mut header: Header, - post_state: &PostState, - executor: &mut Executor, + bundle_state: &BundleStateWithReceipts, + client: &S, gas_used: u64, - ) -> Header { - let receipts = post_state.receipts(header.number); + ) -> Result { + let receipts = bundle_state.receipts_by_block(header.number); header.receipts_root = if receipts.is_empty() { EMPTY_RECEIPTS } else { - let receipts_with_bloom = - receipts.iter().map(|r| r.clone().into()).collect::>(); + let receipts_with_bloom = receipts + .iter() + .map(|r| (*r).clone().expect("receipts have not been pruned").into()) + .collect::>(); header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::zero(), |bloom, r| bloom | r.bloom); proofs::calculate_receipt_root(&receipts_with_bloom) @@ -332,21 +348,25 @@ impl StorageInner { header.gas_used = gas_used; // calculate the state root - let state_root = executor.db().db.0.state_root(post_state.clone()).unwrap(); + let state_root = client + .latest() + .map_err(|_| BlockExecutionError::ProviderError)? + .state_root(bundle_state.clone()) + .unwrap(); header.state_root = state_root; - header + Ok(header) } - /// Builds and executes a new block with the given transactions, on the provided [Executor]. + /// Builds and executes a new block with the given transactions, on the provided [EVMProcessor]. /// /// This returns the header of the executed block, as well as the poststate from execution. - pub(crate) fn build_and_execute( + pub(crate) fn build_and_execute( &mut self, transactions: Vec, - executor: &mut Executor, + client: &impl StateProviderFactory, chain_spec: Arc, - ) -> Result<(SealedHeader, PostState), BlockExecutionError> { - let header = self.build_header_template(&transactions, chain_spec); + ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> { + let header = self.build_header_template(&transactions, chain_spec.clone()); let block = Block { header, body: transactions, ommers: vec![], withdrawals: None }; @@ -356,15 +376,21 @@ impl StorageInner { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); // now execute the block - let (post_state, gas_used) = self.execute(&block, executor, senders)?; + let db = State::builder() + .with_database_boxed(Box::new(StateProviderDatabase::new(client.latest().unwrap()))) + .with_bundle_update() + .build(); + let mut executor = EVMProcessor::new_with_state(chain_spec, db); + + let (bundle_state, gas_used) = self.execute(&block, &mut executor, senders)?; let Block { header, body, .. } = block; let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None }; - trace!(target: "consensus::auto", ?post_state, ?header, ?body, "executed block, calculating state root and completing header"); + trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header"); // fill in the rest of the fields - let header = self.complete_header(header, &post_state, executor, gas_used); + let header = self.complete_header(header, &bundle_state, client, gas_used)?; trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); @@ -374,6 +400,6 @@ impl StorageInner { // set new header with hash that should have been updated by insert_new_block let new_header = header.seal(self.best_hash); - Ok((new_header, post_state)) + Ok((new_header, bundle_state)) } } diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 4abdd19b12b1..2ace064890e2 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -4,10 +4,6 @@ use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; -use reth_revm::{ - database::{State, SubState}, - executor::Executor, -}; use reth_stages::PipelineEvent; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ @@ -127,13 +123,8 @@ where }) .unzip(); - // execute the new block - let substate = SubState::new(State::new(client.latest().unwrap())); - let mut executor = Executor::new(Arc::clone(&chain_spec), substate); - - match storage.build_and_execute(transactions.clone(), &mut executor, chain_spec) - { - Ok((new_header, post_state)) => { + match storage.build_and_execute(transactions.clone(), &client, chain_spec) { + Ok((new_header, bundle_state)) => { // clear all transactions from pool pool.remove_transactions( transactions.iter().map(|tx| tx.hash()).collect(), @@ -202,7 +193,7 @@ where debug!(target: "consensus::auto", header=?sealed_block_with_senders.hash(), "sending block notification"); let chain = - Arc::new(Chain::new(vec![(sealed_block_with_senders, post_state)])); + Arc::new(Chain::new(vec![sealed_block_with_senders], bundle_state)); // send block notification let _ = canon_state_notification diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 03d164d7dd36..71a3577f26f7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2057,7 +2057,7 @@ mod tests { } fn insert_blocks<'a, DB: Database>( - db: &DB, + db: DB, chain: Arc, mut blocks: impl Iterator, ) { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 24b4d04a3b4c..2042dd14293d 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -402,7 +402,7 @@ mod tests { constants::ETHEREUM_BLOCK_GAS_LIMIT, stage::StageCheckpoint, BlockBody, ChainSpec, ChainSpecBuilder, Header, SealedHeader, MAINNET, }; - use reth_provider::{test_utils::TestExecutorFactory, PostState}; + use reth_provider::{test_utils::TestExecutorFactory, BundleStateWithReceipts}; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, future::poll_fn, sync::Arc}; @@ -410,7 +410,7 @@ mod tests { struct TestPipelineBuilder { pipeline_exec_outputs: VecDeque>, - executor_results: Vec, + executor_results: Vec, max_block: Option, } @@ -435,7 +435,7 @@ mod tests { /// Set the executor results to use for the test consensus engine. #[allow(dead_code)] - fn with_executor_results(mut self, executor_results: Vec) -> Self { + fn with_executor_results(mut self, executor_results: Vec) -> Self { self.executor_results = executor_results; self } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index d3959f4495c1..cf2a47c3e9df 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -4,8 +4,7 @@ use crate::{ MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, post_state::PostState, BlockchainTree, - ShareableBlockchainTree, + config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_db::{test_utils::create_test_rw_db, DatabaseEnv}; use reth_downloaders::{ @@ -22,8 +21,8 @@ use reth_interfaces::{ use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, PruneBatchSizes, PruneModes, H256, U256}; use reth_provider::{ - providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, ExecutorFactory, - ProviderFactory, StateProvider, + providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, + BundleStateWithReceipts, ExecutorFactory, ProviderFactory, PrunableBlockExecutor, }; use reth_prune::Pruner; use reth_revm::Factory; @@ -143,7 +142,7 @@ impl Default for TestPipelineConfig { /// Represents either test executor results, or real executor configuration. enum TestExecutorConfig { /// Test executor results. - Test(Vec), + Test(Vec), /// Real executor configuration. Real, } @@ -172,18 +171,17 @@ pub enum EitherBlockExecutor { Right(B), } -impl BlockExecutor for EitherBlockExecutor +impl BlockExecutor for EitherBlockExecutor where - A: BlockExecutor, - B: BlockExecutor, - SP: StateProvider, + A: BlockExecutor, + B: BlockExecutor, { fn execute( &mut self, block: &reth_primitives::Block, total_difficulty: U256, senders: Option>, - ) -> Result { + ) -> Result<(), BlockExecutionError> { match self { EitherBlockExecutor::Left(a) => a.execute(block, total_difficulty, senders), EitherBlockExecutor::Right(b) => b.execute(block, total_difficulty, senders), @@ -195,7 +193,7 @@ where block: &reth_primitives::Block, total_difficulty: U256, senders: Option>, - ) -> Result { + ) -> Result<(), BlockExecutionError> { match self { EitherBlockExecutor::Left(a) => { a.execute_and_verify_receipt(block, total_difficulty, senders) @@ -205,6 +203,47 @@ where } } } + + fn take_output_state(&mut self) -> BundleStateWithReceipts { + match self { + EitherBlockExecutor::Left(a) => a.take_output_state(), + EitherBlockExecutor::Right(b) => b.take_output_state(), + } + } + + fn stats(&self) -> reth_provider::BlockExecutorStats { + match self { + EitherBlockExecutor::Left(a) => a.stats(), + EitherBlockExecutor::Right(b) => b.stats(), + } + } + + fn size_hint(&self) -> Option { + match self { + EitherBlockExecutor::Left(a) => a.size_hint(), + EitherBlockExecutor::Right(b) => b.size_hint(), + } + } +} + +impl PrunableBlockExecutor for EitherBlockExecutor +where + B: PrunableBlockExecutor, + A: PrunableBlockExecutor, +{ + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + match self { + EitherBlockExecutor::Left(a) => a.set_prune_modes(prune_modes), + EitherBlockExecutor::Right(b) => b.set_prune_modes(prune_modes), + } + } + + fn set_tip(&mut self, tip: BlockNumber) { + match self { + EitherBlockExecutor::Left(a) => a.set_tip(tip), + EitherBlockExecutor::Right(b) => b.set_tip(tip), + } + } } impl ExecutorFactory for EitherExecutorFactory @@ -212,8 +251,6 @@ where A: ExecutorFactory, B: ExecutorFactory, { - type Executor = EitherBlockExecutor, B::Executor>; - fn chain_spec(&self) -> &ChainSpec { match self { EitherExecutorFactory::Left(a) => a.chain_spec(), @@ -221,10 +258,13 @@ where } } - fn with_sp(&self, sp: SP) -> Self::Executor { + fn with_state<'a, SP: reth_provider::StateProvider + 'a>( + &'a self, + sp: SP, + ) -> Box { match self { - EitherExecutorFactory::Left(a) => EitherBlockExecutor::Left(a.with_sp(sp)), - EitherExecutorFactory::Right(b) => EitherBlockExecutor::Right(b.with_sp(sp)), + EitherExecutorFactory::Left(a) => a.with_state::<'a, SP>(sp), + EitherExecutorFactory::Right(b) => b.with_state::<'a, SP>(sp), } } } @@ -263,7 +303,7 @@ impl TestConsensusEngineBuilder { } /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { self.executor_config = TestExecutorConfig::Test(executor_results); self } @@ -343,7 +383,7 @@ where /// Set the executor results to use for the test consensus engine. #[allow(dead_code)] - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { self.base_config.executor_config = TestExecutorConfig::Test(executor_results); self } diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 304a081e5ce2..10e1409895f8 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -61,7 +61,7 @@ pub fn base_block_reward( /// // The base block reward is 5 ETH, and the ommer inclusion reward is 1/32th of 5 ETH. /// assert_eq!( /// reward.unwrap(), -/// U256::from(ETH_TO_WEI * 5 + ((ETH_TO_WEI * 5) >> 5)) +/// ETH_TO_WEI * 5 + ((ETH_TO_WEI * 5) >> 5) /// ); /// ``` /// @@ -70,8 +70,8 @@ pub fn base_block_reward( /// - Definition: [Yellow Paper][yp] (page 15, 11.3) /// /// [yp]: https://ethereum.github.io/yellowpaper/paper.pdf -pub fn block_reward(base_block_reward: u128, ommers: usize) -> U256 { - U256::from(base_block_reward + (base_block_reward >> 5) * ommers as u128) +pub fn block_reward(base_block_reward: u128, ommers: usize) -> u128 { + base_block_reward + (base_block_reward >> 5) * ommers as u128 } /// Calculate the reward for an ommer. @@ -98,8 +98,8 @@ pub fn ommer_reward( base_block_reward: u128, block_number: BlockNumber, ommer_block_number: BlockNumber, -) -> U256 { - U256::from(((8 + ommer_block_number - block_number) as u128 * base_block_reward) >> 3) +) -> u128 { + ((8 + ommer_block_number - block_number) as u128 * base_block_reward) >> 3 } #[cfg(test)] @@ -139,7 +139,7 @@ mod tests { ]; for (num_ommers, expected_reward) in cases { - assert_eq!(block_reward(base_reward, num_ommers), U256::from(expected_reward)); + assert_eq!(block_reward(base_reward, num_ommers), expected_reward); } } } diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index edcdb569baef..83bdf0023802 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -189,6 +189,7 @@ impl InsertBlockErrorKind { } // these are internal errors, not caused by an invalid block BlockExecutionError::ProviderError | + BlockExecutionError::Pruning(_) | BlockExecutionError::CanonicalRevert { .. } | BlockExecutionError::CanonicalCommit { .. } | BlockExecutionError::BlockHashNotFoundInChain { .. } | diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index f57d26fb58d4..cbddf4017e20 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -1,4 +1,4 @@ -use reth_primitives::{BlockHash, BlockNumHash, Bloom, H256}; +use reth_primitives::{BlockHash, BlockNumHash, Bloom, PrunePartError, H256}; use thiserror::Error; /// Transaction validation errors @@ -9,6 +9,8 @@ pub enum BlockValidationError { EVM { hash: H256, message: String }, #[error("Failed to recover sender for transaction")] SenderRecoveryError, + #[error("Incrementing balance in post execution failed")] + IncrementBalanceFailed, #[error("Receipt root {got:?} is different than expected {expected:?}.")] ReceiptRootDiff { got: H256, expected: H256 }, #[error("Header bloom filter {got:?} is different than expected {expected:?}.")] @@ -18,8 +20,8 @@ pub enum BlockValidationError { transaction_gas_limit: u64, block_available_gas: u64, }, - #[error("Block gas used {got} is different from expected gas used {expected}.")] - BlockGasUsed { got: u64, expected: u64 }, + #[error("Block gas used {got} is different from expected gas used {expected}.\nGas spent by each transaction: {gas_spent_by_tx:?}\n")] + BlockGasUsed { got: u64, expected: u64, gas_spent_by_tx: Vec<(u64, u64)> }, #[error("Block {hash:?} is pre merge")] BlockPreMerge { hash: H256 }, #[error("Missing total difficulty")] @@ -32,6 +34,8 @@ pub enum BlockValidationError { pub enum BlockExecutionError { #[error(transparent)] Validation(#[from] BlockValidationError), + #[error(transparent)] + Pruning(#[from] PrunePartError), // === misc provider error === #[error("Provider error")] diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 5af296de3e3c..c6d505b6d0ba 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -17,6 +17,7 @@ reth-rlp.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true reth-tasks.workspace = true +reth-interfaces.workspace = true ## ethereum revm.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 4b125238a2f8..c209e985877e 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -16,6 +16,7 @@ use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; use futures_util::FutureExt; +use reth_interfaces::Error; use reth_payload_builder::{ database::CachedReads, error::PayloadBuilderError, BuiltPayload, KeepPayloadJobAlive, PayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, @@ -30,21 +31,18 @@ use reth_primitives::{ proofs, Block, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, SealedBlock, Withdrawal, EMPTY_OMMER_ROOT, H256, U256, }; -use reth_provider::{BlockReaderIdExt, BlockSource, PostState, StateProviderFactory}; +use reth_provider::{BlockReaderIdExt, BlockSource, BundleStateWithReceipts, StateProviderFactory}; use reth_revm::{ - database::{State, SubState}, - env::tx_env_with_recovered, - executor::{ - commit_state_changes, increment_account_balance, post_block_withdrawals_balance_increments, - }, - into_reth_log, + database::StateProviderDatabase, env::tx_env_with_recovered, into_reth_log, + state_change::post_block_withdrawals_balance_increments, }; use reth_rlp::Encodable; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{ - db::{CacheDB, DatabaseRef}, + db::states::bundle_state::BundleRetention, primitives::{BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState}, + Database, DatabaseCommit, State, }; use std::{ future::Future, @@ -640,6 +638,10 @@ where { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + let state_provider = client.state_by_block_hash(config.parent_block.hash)?; + let state = StateProviderDatabase::new(&state_provider); + let mut db = + State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); let PayloadConfig { initialized_block_env, initialized_cfg, @@ -650,11 +652,6 @@ where } = config; debug!(parent_hash=?parent_block.hash, parent_number=parent_block.number, "building new payload"); - - let state = State::new(client.state_by_block_hash(parent_block.hash)?); - let mut db = CacheDB::new(cached_reads.as_db(&state)); - let mut post_state = PostState::default(); - let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); @@ -667,6 +664,7 @@ where let block_number = initialized_block_env.number.to::(); + let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { @@ -741,23 +739,19 @@ where }; let gas_used = result.gas_used(); - // commit changes - commit_state_changes(&mut db, &mut post_state, block_number, state, true); + db.commit(state); // add gas used by the transaction to cumulative gas used, before creating the receipt cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - post_state.add_receipt( - block_number, - Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.logs().into_iter().map(into_reth_log).collect(), - }, - ); + receipts.push(Some(Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().into_iter().map(into_reth_log).collect(), + })); // update add to total fees let miner_fee = @@ -774,20 +768,18 @@ where return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) } - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &mut post_state, - &chain_spec, - block_number, - attributes.timestamp, - attributes.withdrawals, - )?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + + // merge all transitions into bundle state. + db.merge_transitions(BundleRetention::PlainState); - let receipts_root = post_state.receipts_root(block_number); - let logs_bloom = post_state.logs_bloom(block_number); + let bundle = BundleStateWithReceipts::new(db.take_bundle(), vec![receipts], block_number); + let receipts_root = bundle.receipts_root_slow(block_number).expect("Number is in range"); + let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state.state().state_root(post_state)?; + let state_root = state_provider.state_root(bundle)?; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); @@ -844,6 +836,7 @@ where let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; let sealed_block = block.seal_slow(); + let mut payload = BuiltPayload::new(attributes.id, sealed_block, total_fees); if !blob_sidecars.is_empty() { @@ -874,24 +867,24 @@ where debug!(parent_hash=?parent_block.hash, parent_number=parent_block.number, "building empty payload"); let state = client.state_by_block_hash(parent_block.hash)?; - let mut db = SubState::new(State::new(state)); - let mut post_state = PostState::default(); + let mut db = State::builder() + .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_bundle_update() + .build(); let base_fee = initialized_block_env.basefee.to::(); let block_number = initialized_block_env.number.to::(); let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &mut post_state, - &chain_spec, - block_number, - attributes.timestamp, - attributes.withdrawals, - )?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + + // merge transition, this will apply the withdrawal balance changes. + db.merge_transitions(BundleRetention::PlainState); // calculate the state root - let state_root = db.db.0.state_root(post_state)?; + let bundle_state = BundleStateWithReceipts::new(db.take_bundle(), vec![], block_number); + let state_root = state.state_root(bundle_state)?; let header = Header { parent_hash: parent_block.hash, @@ -924,6 +917,7 @@ where /// Represents the outcome of committing withdrawals to the runtime database and post state. /// Pre-shanghai these are `None` values. +#[derive(Default)] struct WithdrawalsOutcome { withdrawals: Option>, withdrawals_root: Option, @@ -940,23 +934,17 @@ impl WithdrawalsOutcome { } } -/// Executes the withdrawals and commits them to the _runtime_ Database and PostState. +/// Executes the withdrawals and commits them to the _runtime_ Database and BundleState. /// /// Returns the withdrawals root. /// /// Returns `None` values pre shanghai -#[allow(clippy::too_many_arguments)] -fn commit_withdrawals( - db: &mut CacheDB, - post_state: &mut PostState, +fn commit_withdrawals>( + db: &mut State, chain_spec: &ChainSpec, - block_number: u64, timestamp: u64, withdrawals: Vec, -) -> Result::Error> -where - DB: DatabaseRef, -{ +) -> Result { if !chain_spec.is_shanghai_activated_at_timestamp(timestamp) { return Ok(WithdrawalsOutcome::pre_shanghai()) } @@ -968,9 +956,7 @@ where let balance_increments = post_block_withdrawals_balance_increments(chain_spec, timestamp, &withdrawals); - for (address, increment) in balance_increments { - increment_account_balance(db, post_state, block_number, address, increment)?; - } + db.increment_balances(balance_increments)?; let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index c69f7bafaae6..0de967c57de0 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -12,24 +12,22 @@ use std::{ /// A container type that caches reads from an underlying [DatabaseRef]. /// -/// This is intended to be used in conjunction with [CacheDB](reth_revm_primitives::db::CacheDB) +/// This is intended to be used in conjunction with [State](reth_revm_primitives::db::State) /// during payload building which repeatedly accesses the same data. /// /// # Example /// /// ``` -/// use revm_primitives::db::DatabaseRef; /// use reth_payload_builder::database::CachedReads; -/// use reth_revm_primitives::db::CacheDB; +/// use reth_revm_primitives::db::State; +/// use revm_primitives::db::DatabaseRef; /// /// fn build_payload(db: DB) { -/// let mut cached_reads = CachedReads::default(); -/// let db_ref = cached_reads.as_db(db); -/// -/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. -/// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. -/// let db = CacheDB::new(db_ref); -/// +/// let mut cached_reads = CachedReads::default(); +/// let db_ref = cached_reads.as_db(db); +/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. +/// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. +/// let db = State::builder().with_database_ref(db_ref).build(); /// } /// ``` #[derive(Debug, Clone, Default)] @@ -121,7 +119,7 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { /// A [DatabaseRef] that caches reads inside [CachedReads]. /// /// This is intended to be used as the [DatabaseRef] for -/// [CacheDB](reth_revm_primitives::db::CacheDB) for repeated payload build jobs. +/// [State](reth_revm_primitives::db::State) for repeated payload build jobs. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { inner: RefCell>, diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 65e7b0ad2fa0..9170bc5343a6 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -159,12 +159,10 @@ impl PayloadBuilderAttributes { /// NOTE: This is only intended for beacon consensus (after merge). pub fn cfg_and_block_env(&self, chain_spec: &ChainSpec, parent: &Header) -> (CfgEnv, BlockEnv) { // configure evm env based on parent block - let cfg = CfgEnv { - chain_id: U256::from(chain_spec.chain().id()), - // ensure we're not missing any timestamp based hardforks - spec_id: revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp), - ..Default::default() - }; + let mut cfg = CfgEnv::default(); + cfg.chain_id = chain_spec.chain().id(); + // ensure we're not missing any timestamp based hardforks + cfg.spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp); let block_env = BlockEnv { number: U256::from(parent.number + 1), diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index 708ebd186787..876c8e30c325 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -48,9 +48,6 @@ impl Account { /// Bytecode for an account. /// /// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -/// -/// Note: Upon decoding bytecode from the database, you *should* set the code hash using -/// [`Self::with_code_hash`]. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct Bytecode(pub RevmBytecode); @@ -61,18 +58,6 @@ impl Bytecode { pub fn new_raw(bytes: Bytes) -> Self { Self(RevmBytecode::new_raw(bytes)) } - - /// Create new bytecode from raw bytes and its hash. - pub fn new_raw_with_hash(bytes: Bytes, code_hash: H256) -> Self { - let revm_bytecode = unsafe { RevmBytecode::new_raw_with_hash(bytes, code_hash) }; - Self(revm_bytecode) - } - - /// Set the hash of the inner bytecode. - pub fn with_code_hash(mut self, code_hash: H256) -> Self { - self.0.hash = code_hash; - self - } } impl Deref for Bytecode { @@ -121,15 +106,10 @@ impl Compact for Bytecode { let decoded = match variant { 0 => Bytecode(RevmBytecode::new_raw(bytes)), 1 => Bytecode(unsafe { - RevmBytecode::new_checked( - bytes, - buf.read_u64::().unwrap() as usize, - None, - ) + RevmBytecode::new_checked(bytes, buf.read_u64::().unwrap() as usize) }), 2 => Bytecode(RevmBytecode { bytecode: bytes, - hash: KECCAK_EMPTY, state: BytecodeState::Analysed { len: buf.read_u64::().unwrap() as usize, jump_map: JumpMap::from_slice(buf), diff --git a/crates/primitives/src/bits.rs b/crates/primitives/src/bits.rs index 2ffd8f3d47ea..daa377b4710e 100644 --- a/crates/primitives/src/bits.rs +++ b/crates/primitives/src/bits.rs @@ -1,4 +1,6 @@ //! Fixed hash types +#![allow(clippy::non_canonical_clone_impl)] + use bytes::Buf; use derive_more::{AsRef, Deref}; use fixed_hash::construct_fixed_hash; diff --git a/crates/primitives/src/bloom.rs b/crates/primitives/src/bloom.rs index b973cef334c6..947a2a1286f5 100644 --- a/crates/primitives/src/bloom.rs +++ b/crates/primitives/src/bloom.rs @@ -2,6 +2,8 @@ //! //! Adapted from #![allow(missing_docs)] +#![allow(clippy::non_canonical_clone_impl)] + use crate::{impl_fixed_hash_type, keccak256, Log}; use bytes::Buf; use core::{mem, ops}; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 95fd279d12b5..cdfef6bb54e6 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -99,7 +99,7 @@ pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> H256 { /// Calculates the receipt root for a header for the reference type of [ReceiptWithBloom]. /// /// NOTE: Prefer [calculate_receipt_root] if you have log blooms memoized. -pub fn calculate_receipt_root_ref(receipts: &[T]) -> H256 +pub fn calculate_receipt_root_ref(receipts: &[&T]) -> H256 where for<'a> ReceiptWithBloomRef<'a>: From<&'a T>, { diff --git a/crates/primitives/src/prune/part.rs b/crates/primitives/src/prune/part.rs index 03b64b916309..7d1139c25728 100644 --- a/crates/primitives/src/prune/part.rs +++ b/crates/primitives/src/prune/part.rs @@ -21,7 +21,7 @@ pub enum PrunePart { } /// PrunePart error type. -#[derive(Debug, Error, PartialEq, Eq)] +#[derive(Debug, Error, PartialEq, Eq, Clone)] pub enum PrunePartError { /// Invalid configuration of a prune part. #[error("The configuration provided for {0} is invalid.")] diff --git a/crates/primitives/src/storage.rs b/crates/primitives/src/storage.rs index 07434f4b6fc8..89879aa49e6d 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives/src/storage.rs @@ -14,6 +14,13 @@ pub struct StorageEntry { pub value: U256, } +impl StorageEntry { + /// Create a new StorageEntry with given key and value. + pub fn new(key: H256, value: U256) -> Self { + Self { key, value } + } +} + impl From<(H256, U256)> for StorageEntry { fn from((key, value): (H256, U256)) -> Self { StorageEntry { key, value } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4d2e5d06c32c..f479ec6f2b3c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1083,7 +1083,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } /// Signed transaction with recovered signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default)] +#[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref, Default)] pub struct TransactionSignedEcRecovered { /// Signer of the transaction signer: Address, diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index 098110803c47..6df6d73ecb75 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,8 +1,7 @@ -use std::mem; - -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address, U256}; +use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address}; use reth_codecs::{main_codec, Compact}; use reth_rlp::{RlpDecodable, RlpEncodable}; +use std::mem; /// Withdrawal represents a validator withdrawal from the consensus layer. #[main_codec] @@ -23,8 +22,8 @@ pub struct Withdrawal { impl Withdrawal { /// Return the withdrawal amount in wei. - pub fn amount_wei(&self) -> U256 { - U256::from(self.amount) * U256::from(GWEI_TO_WEI) + pub fn amount_wei(&self) -> u128 { + self.amount as u128 * GWEI_TO_WEI as u128 } /// Calculate a heuristic for the in-memory size of the [Withdrawal]. diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 037a717d41af..c6032f436bd1 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -21,8 +21,4 @@ reth-consensus-common = { path = "../consensus/common" } revm.workspace = true # common -tracing.workspace = true - -[dev-dependencies] -reth-rlp.workspace = true -once_cell = "1.17.0" +tracing.workspace = true \ No newline at end of file diff --git a/crates/revm/revm-inspectors/src/access_list.rs b/crates/revm/revm-inspectors/src/access_list.rs index efe61cc23769..49b1913a43d5 100644 --- a/crates/revm/revm-inspectors/src/access_list.rs +++ b/crates/revm/revm-inspectors/src/access_list.rs @@ -66,7 +66,6 @@ where &mut self, interpreter: &mut Interpreter, _data: &mut EVMData<'_, DB>, - _is_static: bool, ) -> InstructionResult { match interpreter.current_opcode() { opcode::SLOAD | opcode::SSTORE => { diff --git a/crates/revm/revm-inspectors/src/stack/maybe_owned.rs b/crates/revm/revm-inspectors/src/stack/maybe_owned.rs index 897546f16723..1449093e2480 100644 --- a/crates/revm/revm-inspectors/src/stack/maybe_owned.rs +++ b/crates/revm/revm-inspectors/src/stack/maybe_owned.rs @@ -1,3 +1,4 @@ +use reth_primitives::U256; use revm::{ interpreter::{CallInputs, CreateInputs, Gas, InstructionResult, Interpreter}, primitives::{db::Database, Bytes, B160, B256}, @@ -72,11 +73,10 @@ where &mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, ) -> InstructionResult { match self { MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().initialize_interp(interp, data, is_static) + return insp.borrow_mut().initialize_interp(interp, data) } MaybeOwnedInspector::Stacked(_) => {} } @@ -84,16 +84,9 @@ where InstructionResult::Continue } - fn step( - &mut self, - interp: &mut Interpreter, - data: &mut EVMData<'_, DB>, - is_static: bool, - ) -> InstructionResult { + fn step(&mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>) -> InstructionResult { match self { - MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().step(interp, data, is_static) - } + MaybeOwnedInspector::Owned(insp) => return insp.borrow_mut().step(interp, data), MaybeOwnedInspector::Stacked(_) => {} } @@ -119,12 +112,11 @@ where &mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, eval: InstructionResult, ) -> InstructionResult { match self { MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().step_end(interp, data, is_static, eval) + return insp.borrow_mut().step_end(interp, data, eval) } MaybeOwnedInspector::Stacked(_) => {} } @@ -136,12 +128,9 @@ where &mut self, data: &mut EVMData<'_, DB>, inputs: &mut CallInputs, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { match self { - MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().call(data, inputs, is_static) - } + MaybeOwnedInspector::Owned(insp) => return insp.borrow_mut().call(data, inputs), MaybeOwnedInspector::Stacked(_) => {} } @@ -155,11 +144,10 @@ where remaining_gas: Gas, ret: InstructionResult, out: Bytes, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { match self { MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().call_end(data, inputs, remaining_gas, ret, out, is_static) + return insp.borrow_mut().call_end(data, inputs, remaining_gas, ret, out) } MaybeOwnedInspector::Stacked(_) => {} } @@ -198,10 +186,10 @@ where (ret, address, remaining_gas, out) } - fn selfdestruct(&mut self, contract: B160, target: B160) { + fn selfdestruct(&mut self, contract: B160, target: B160, value: U256) { match self { MaybeOwnedInspector::Owned(insp) => { - return insp.borrow_mut().selfdestruct(contract, target) + return insp.borrow_mut().selfdestruct(contract, target, value) } MaybeOwnedInspector::Stacked(_) => {} } diff --git a/crates/revm/revm-inspectors/src/stack/mod.rs b/crates/revm/revm-inspectors/src/stack/mod.rs index 7144c269d360..482e09d4ce95 100644 --- a/crates/revm/revm-inspectors/src/stack/mod.rs +++ b/crates/revm/revm-inspectors/src/stack/mod.rs @@ -1,6 +1,6 @@ use std::fmt::Debug; -use reth_primitives::{bytes::Bytes, Address, TxHash, H256}; +use reth_primitives::{bytes::Bytes, Address, TxHash, H256, U256}; use revm::{ inspectors::CustomPrintTracer, interpreter::{CallInputs, CreateInputs, Gas, InstructionResult, Interpreter}, @@ -105,10 +105,9 @@ where &mut self, interpreter: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, ) -> InstructionResult { call_inspectors!(inspector, [&mut self.custom_print_tracer], { - let status = inspector.initialize_interp(interpreter, data, is_static); + let status = inspector.initialize_interp(interpreter, data); // Allow inspectors to exit early if status != InstructionResult::Continue { @@ -123,10 +122,9 @@ where &mut self, interpreter: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, ) -> InstructionResult { call_inspectors!(inspector, [&mut self.custom_print_tracer], { - let status = inspector.step(interpreter, data, is_static); + let status = inspector.step(interpreter, data); // Allow inspectors to exit early if status != InstructionResult::Continue { @@ -153,11 +151,10 @@ where &mut self, interpreter: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, eval: InstructionResult, ) -> InstructionResult { call_inspectors!(inspector, [&mut self.custom_print_tracer], { - let status = inspector.step_end(interpreter, data, is_static, eval); + let status = inspector.step_end(interpreter, data, eval); // Allow inspectors to exit early if status != InstructionResult::Continue { @@ -172,10 +169,9 @@ where &mut self, data: &mut EVMData<'_, DB>, inputs: &mut CallInputs, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { call_inspectors!(inspector, [&mut self.custom_print_tracer], { - let (status, gas, retdata) = inspector.call(data, inputs, is_static); + let (status, gas, retdata) = inspector.call(data, inputs); // Allow inspectors to exit early if status != InstructionResult::Continue { @@ -193,11 +189,10 @@ where remaining_gas: Gas, ret: InstructionResult, out: Bytes, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { call_inspectors!(inspector, [&mut self.custom_print_tracer], { let (new_ret, new_gas, new_out) = - inspector.call_end(data, inputs, remaining_gas, ret, out.clone(), is_static); + inspector.call_end(data, inputs, remaining_gas, ret, out.clone()); // If the inspector returns a different ret or a revert with a non-empty message, // we assume it wants to tell us something @@ -247,9 +242,9 @@ where (ret, address, remaining_gas, out) } - fn selfdestruct(&mut self, contract: Address, target: Address) { + fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) { call_inspectors!(inspector, [&mut self.custom_print_tracer], { - Inspector::::selfdestruct(inspector, contract, target); + Inspector::::selfdestruct(inspector, contract, target, value); }); } } diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 294caa8c4778..b6420c37544f 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -424,7 +424,7 @@ impl ParityTraceBuilder { opcode::ADD | opcode::EXP | opcode::CALLER | - opcode::SHA3 | + opcode::KECCAK256 | opcode::SUB | opcode::ADDRESS | opcode::GAS | @@ -535,7 +535,7 @@ where /// /// iteratively fill the [VmTrace] code fields pub(crate) fn populate_vm_trace_bytecodes( - db: &DB, + db: DB, trace: &mut VmTrace, breadth_first_addresses: I, ) -> Result<(), DB::Error> @@ -571,8 +571,8 @@ where /// in the [ExecutionResult] state map and compares the balance and nonce against what's in the /// `db`, which should point to the beginning of the transaction. /// -/// It's expected that `DB` is a [CacheDB](revm::db::CacheDB) which at this point already contains -/// all the accounts that are in the state map and never has to fetch them from disk. +/// It's expected that `DB` is a revm [Database](revm::db::Database) which at this point already +/// contains all the accounts that are in the state map and never has to fetch them from disk. pub fn populate_account_balance_nonce_diffs( state_diff: &mut StateDiff, db: DB, diff --git a/crates/revm/revm-inspectors/src/tracing/fourbyte.rs b/crates/revm/revm-inspectors/src/tracing/fourbyte.rs index ffebf976c5d4..977d2a5595e4 100644 --- a/crates/revm/revm-inspectors/src/tracing/fourbyte.rs +++ b/crates/revm/revm-inspectors/src/tracing/fourbyte.rs @@ -51,7 +51,6 @@ where &mut self, _data: &mut EVMData<'_, DB>, call: &mut CallInputs, - _is_static: bool, ) -> (InstructionResult, Gas, Bytes) { if call.input.len() >= 4 { let selector = Selector::try_from(&call.input[..4]).expect("input is at least 4 bytes"); diff --git a/crates/revm/revm-inspectors/src/tracing/js/mod.rs b/crates/revm/revm-inspectors/src/tracing/js/mod.rs index d88b55ee6def..3be256be7f2d 100644 --- a/crates/revm/revm-inspectors/src/tracing/js/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/js/mod.rs @@ -287,12 +287,7 @@ impl Inspector for JsInspector where DB: Database, { - fn step( - &mut self, - interp: &mut Interpreter, - data: &mut EVMData<'_, DB>, - _is_static: bool, - ) -> InstructionResult { + fn step(&mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>) -> InstructionResult { if self.step_fn.is_none() { return InstructionResult::Continue } @@ -331,7 +326,6 @@ where &mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>, - _is_static: bool, eval: InstructionResult, ) -> InstructionResult { if self.step_fn.is_none() { @@ -364,7 +358,6 @@ where &mut self, data: &mut EVMData<'_, DB>, inputs: &mut CallInputs, - _is_static: bool, ) -> (InstructionResult, Gas, Bytes) { self.register_precompiles(&data.precompiles); @@ -408,7 +401,6 @@ where remaining_gas: Gas, ret: InstructionResult, out: Bytes, - _is_static: bool, ) -> (InstructionResult, Gas, Bytes) { if self.exit_fn.is_some() { let frame_result = @@ -476,7 +468,7 @@ where (ret, address, remaining_gas, out) } - fn selfdestruct(&mut self, _contract: B160, _target: B160) { + fn selfdestruct(&mut self, _contract: B160, _target: B160, _value: U256) { if self.enter_fn.is_some() { let call = self.active_call(); let frame = diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index aad2e9d9a615..75dfefdc5f62 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -379,19 +379,13 @@ where &mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, ) -> InstructionResult { - self.gas_inspector.initialize_interp(interp, data, is_static) + self.gas_inspector.initialize_interp(interp, data) } - fn step( - &mut self, - interp: &mut Interpreter, - data: &mut EVMData<'_, DB>, - is_static: bool, - ) -> InstructionResult { + fn step(&mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>) -> InstructionResult { if self.config.record_steps { - self.gas_inspector.step(interp, data, is_static); + self.gas_inspector.step(interp, data); self.start_step(interp, data); } @@ -420,11 +414,10 @@ where &mut self, interp: &mut Interpreter, data: &mut EVMData<'_, DB>, - is_static: bool, eval: InstructionResult, ) -> InstructionResult { if self.config.record_steps { - self.gas_inspector.step_end(interp, data, is_static, eval); + self.gas_inspector.step_end(interp, data, eval); self.fill_step_on_step_end(interp, data, eval); } InstructionResult::Continue @@ -434,9 +427,8 @@ where &mut self, data: &mut EVMData<'_, DB>, inputs: &mut CallInputs, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { - self.gas_inspector.call(data, inputs, is_static); + self.gas_inspector.call(data, inputs); // determine correct `from` and `to` based on the call scheme let (from, to) = match inputs.context.scheme { @@ -482,9 +474,8 @@ where gas: Gas, ret: InstructionResult, out: Bytes, - is_static: bool, ) -> (InstructionResult, Gas, Bytes) { - self.gas_inspector.call_end(data, inputs, gas, ret, out.clone(), is_static); + self.gas_inspector.call_end(data, inputs, gas, ret, out.clone()); self.fill_trace_on_call_end(data, ret, &gas, out.clone(), None); @@ -546,7 +537,7 @@ where (status, address, gas, retdata) } - fn selfdestruct(&mut self, _contract: Address, target: Address) { + fn selfdestruct(&mut self, _contract: Address, target: Address, _value: U256) { let trace_idx = self.last_trace_idx(); let trace = &mut self.traces.arena[trace_idx].trace; trace.selfdestruct_refund_target = Some(target) diff --git a/crates/revm/revm-inspectors/src/tracing/opcount.rs b/crates/revm/revm-inspectors/src/tracing/opcount.rs index c13154747716..623443579e4e 100644 --- a/crates/revm/revm-inspectors/src/tracing/opcount.rs +++ b/crates/revm/revm-inspectors/src/tracing/opcount.rs @@ -29,7 +29,6 @@ where &mut self, _interp: &mut Interpreter, _data: &mut EVMData<'_, DB>, - _is_static: bool, ) -> InstructionResult { self.count += 1; InstructionResult::Continue diff --git a/crates/revm/revm-primitives/src/compat.rs b/crates/revm/revm-primitives/src/compat.rs index 35aaa4eb5a42..507580c9b4c3 100644 --- a/crates/revm/revm-primitives/src/compat.rs +++ b/crates/revm/revm-primitives/src/compat.rs @@ -24,7 +24,7 @@ pub fn into_reth_log(log: Log) -> RethLog { /// Create reth primitive [Account] from [revm::primitives::AccountInfo]. /// Check if revm bytecode hash is [KECCAK_EMPTY] and put None to reth [Account] -pub fn to_reth_acc(revm_acc: &AccountInfo) -> Account { +pub fn into_reth_acc(revm_acc: AccountInfo) -> Account { let code_hash = revm_acc.code_hash; Account { balance: revm_acc.balance, @@ -32,3 +32,13 @@ pub fn to_reth_acc(revm_acc: &AccountInfo) -> Account { bytecode_hash: (code_hash != KECCAK_EMPTY).then_some(code_hash), } } + +/// Create revm primitive [AccountInfo] from [reth_primitives::Account]. +pub fn into_revm_acc(reth_acc: Account) -> AccountInfo { + AccountInfo { + balance: reth_acc.balance, + nonce: reth_acc.nonce, + code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY), + code: None, + } +} diff --git a/crates/revm/revm-primitives/src/env.rs b/crates/revm/revm-primitives/src/env.rs index 0d717a9e790e..2ddea714dd2a 100644 --- a/crates/revm/revm-primitives/src/env.rs +++ b/crates/revm/revm-primitives/src/env.rs @@ -36,9 +36,8 @@ pub fn fill_cfg_env( }, ); - cfg_env.chain_id = U256::from(chain_spec.chain().id()); + cfg_env.chain_id = chain_spec.chain().id(); cfg_env.spec_id = spec_id; - cfg_env.perf_all_precompiles_have_balance = false; cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; } diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 1cbb304acd85..030553597338 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -4,16 +4,20 @@ use reth_provider::StateProvider; use revm::{ db::{CacheDB, DatabaseRef}, primitives::{AccountInfo, Bytecode}, + Database, StateDBBox, }; /// SubState of database. Uses revm internal cache with binding to reth StateProvider trait. -pub type SubState = CacheDB>; +pub type SubState = CacheDB>; + +/// State boxed database with reth Error. +pub type RethStateDBBox<'a> = StateDBBox<'a, Error>; /// Wrapper around StateProvider that implements revm database trait #[derive(Debug, Clone)] -pub struct State(pub DB); +pub struct StateProviderDatabase(pub DB); -impl State { +impl StateProviderDatabase { /// Create new State with generic StateProvider. pub fn new(db: DB) -> Self { Self(db) @@ -35,9 +39,39 @@ impl State { } } -impl DatabaseRef for State { +impl Database for StateProviderDatabase { type Error = Error; + fn basic(&mut self, address: H160) -> Result, Self::Error> { + Ok(self.0.basic_account(address)?.map(|account| AccountInfo { + balance: account.balance, + nonce: account.nonce, + code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + code: None, + })) + } + + fn code_by_hash(&mut self, code_hash: H256) -> Result { + let bytecode = self.0.bytecode_by_hash(code_hash)?; + + Ok(bytecode.map(|b| b.0).unwrap_or_else(Bytecode::new)) + } + + fn storage(&mut self, address: H160, index: U256) -> Result { + let index = H256(index.to_be_bytes()); + let ret = self.0.storage(address, index)?.unwrap_or_default(); + Ok(ret) + } + + fn block_hash(&mut self, number: U256) -> Result { + // The `number` represents the block number, so it is safe to cast it to u64. + Ok(self.0.block_hash(number.try_into().unwrap())?.unwrap_or_default()) + } +} + +impl DatabaseRef for StateProviderDatabase { + type Error = ::Error; + fn basic(&self, address: H160) -> Result, Self::Error> { Ok(self.0.basic_account(address)?.map(|account| AccountInfo { balance: account.balance, @@ -51,7 +85,7 @@ impl DatabaseRef for State { let bytecode = self.0.bytecode_by_hash(code_hash)?; if let Some(bytecode) = bytecode { - Ok(bytecode.with_code_hash(code_hash).0) + Ok(bytecode.0) } else { Ok(Bytecode::new()) } diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs deleted file mode 100644 index 6ba25f4192aa..000000000000 --- a/crates/revm/src/executor.rs +++ /dev/null @@ -1,1340 +0,0 @@ -use crate::{ - database::SubState, - env::{fill_cfg_and_block_env, fill_tx_env}, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - into_reth_log, - stack::{InspectorStack, InspectorStackConfig}, - to_reth_acc, -}; -use reth_consensus_common::calc; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{ - Account, Address, Block, BlockNumber, Bloom, Bytecode, ChainSpec, Hardfork, Header, Receipt, - ReceiptWithBloom, TransactionSigned, Withdrawal, H256, U256, -}; -use reth_provider::{BlockExecutor, PostState, StateProvider}; -use revm::{ - db::{AccountState, CacheDB, DatabaseRef}, - primitives::{ - hash_map::{self, Entry}, - Account as RevmAccount, AccountInfo, ResultAndState, - }, - EVM, -}; -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; - -/// Main block executor -pub struct Executor -where - DB: StateProvider, -{ - /// The configured chain-spec - pub chain_spec: Arc, - evm: EVM>, - stack: InspectorStack, -} - -impl From> for Executor -where - DB: StateProvider, -{ - /// Instantiates a new executor from the chainspec. Must call - /// `with_db` to set the database before executing. - fn from(chain_spec: Arc) -> Self { - let evm = EVM::new(); - Executor { chain_spec, evm, stack: InspectorStack::new(InspectorStackConfig::default()) } - } -} - -impl Executor -where - DB: StateProvider, -{ - /// Creates a new executor from the given chain spec and database. - pub fn new(chain_spec: Arc, db: SubState) -> Self { - let mut evm = EVM::new(); - evm.database(db); - - Executor { chain_spec, evm, stack: InspectorStack::new(InspectorStackConfig::default()) } - } - - /// Configures the executor with the given inspectors. - pub fn with_stack(mut self, stack: InspectorStack) -> Self { - self.stack = stack; - self - } - - /// Gives a reference to the database - pub fn db(&mut self) -> &mut SubState { - self.evm.db().expect("db to not be moved") - } - - fn recover_senders( - &self, - body: &[TransactionSigned], - senders: Option>, - ) -> Result, BlockExecutionError> { - if let Some(senders) = senders { - if body.len() == senders.len() { - Ok(senders) - } else { - Err(BlockValidationError::SenderRecoveryError.into()) - } - } else { - TransactionSigned::recover_signers(body, body.len()) - .ok_or(BlockValidationError::SenderRecoveryError.into()) - } - } - - /// Initializes the config and block env. - fn init_env(&mut self, header: &Header, total_difficulty: U256) { - fill_cfg_and_block_env( - &mut self.evm.env.cfg, - &mut self.evm.env.block, - &self.chain_spec, - header, - total_difficulty, - ); - } - - /// Commit change to the run-time database, and update the given [PostState] with the changes - /// made in the transaction, which can be persisted to the database. - fn commit_changes( - &mut self, - block_number: BlockNumber, - changes: hash_map::HashMap, - has_state_clear_eip: bool, - post_state: &mut PostState, - ) { - let db = self.db(); - commit_state_changes(db, post_state, block_number, changes, has_state_clear_eip); - } - - /// Collect all balance changes at the end of the block. - /// - /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular - /// state changes (DAO fork). - fn post_block_balance_increments(&self, block: &Block, td: U256) -> HashMap { - post_block_balance_increments( - &self.chain_spec, - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - td, - &block.ommers, - block.withdrawals.as_deref(), - ) - } - - /// Irregular state change at Ethereum DAO hardfork - fn apply_dao_fork_changes( - &mut self, - block_number: BlockNumber, - post_state: &mut PostState, - ) -> Result<(), BlockExecutionError> { - let db = self.db(); - - let mut drained_balance = U256::ZERO; - - // drain all accounts ether - for address in DAO_HARDKFORK_ACCOUNTS { - let db_account = - db.load_account(address).map_err(|_| BlockExecutionError::ProviderError)?; - let old = to_reth_acc(&db_account.info); - // drain balance - drained_balance += core::mem::take(&mut db_account.info.balance); - let new = to_reth_acc(&db_account.info); - // assume it is changeset as it is irregular state change - post_state.change_account(block_number, address, old, new); - } - - // add drained ether to beneficiary. - let beneficiary = DAO_HARDFORK_BENEFICIARY; - self.increment_account_balance(block_number, beneficiary, drained_balance, post_state)?; - - Ok(()) - } - - /// Increment the balance for the given account in the [PostState]. - fn increment_account_balance( - &mut self, - block_number: BlockNumber, - address: Address, - increment: U256, - post_state: &mut PostState, - ) -> Result<(), BlockExecutionError> { - increment_account_balance(self.db(), post_state, block_number, address, increment) - .map_err(|_| BlockExecutionError::ProviderError) - } - - /// Runs a single transaction in the configured environment and proceeds - /// to return the result and state diff (without applying it). - /// - /// Assumes the rest of the block environment has been filled via `init_block_env`. - pub fn transact( - &mut self, - transaction: &TransactionSigned, - sender: Address, - ) -> Result { - // Fill revm structure. - fill_tx_env(&mut self.evm.env.tx, transaction, sender); - - let hash = transaction.hash(); - let out = if self.stack.should_inspect(&self.evm.env, hash) { - // execution with inspector. - let output = self.evm.inspect(&mut self.stack); - tracing::trace!( - target: "evm", - ?hash, ?output, ?transaction, env = ?self.evm.env, - "Executed transaction" - ); - output - } else { - // main execution. - self.evm.transact() - }; - out.map_err(|e| BlockValidationError::EVM { hash, message: format!("{e:?}") }.into()) - } - - /// Runs the provided transactions and commits their state to the run-time database. - /// - /// The returned [PostState] can be used to persist the changes to disk, and contains the - /// changes made by each transaction. - /// - /// The changes in [PostState] have a transition ID associated with them: there is one - /// transition ID for each transaction (with the first executed tx having transition ID 0, and - /// so on). - /// - /// The second returned value represents the total gas used by this block of transactions. - pub fn execute_transactions( - &mut self, - block: &Block, - total_difficulty: U256, - senders: Option>, - ) -> Result<(PostState, u64), BlockExecutionError> { - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((PostState::default(), 0)) - } - let senders = self.recover_senders(&block.body, senders)?; - - self.init_env(&block.header, total_difficulty); - - let mut cumulative_gas_used = 0; - let mut post_state = PostState::with_tx_capacity(block.number, block.body.len()); - for (transaction, sender) in block.body.iter().zip(senders) { - // The sum of the transaction’s gas limit, Tg, and the gas utilised in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, sender)?; - - // commit changes - self.commit_changes( - block.number, - state, - self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(block.number), - &mut post_state, - ); - - // append gas used - cumulative_gas_used += result.gas_used(); - - tracing::trace!( - target: "revm::executor", - hash = ?transaction.hash, - gas_used = result.gas_used(), - "transaction executed" - ); - - // Push transaction changeset and calculate header bloom filter for receipt. - post_state.add_receipt( - block.number, - Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(into_reth_log).collect(), - }, - ); - } - - Ok((post_state, cumulative_gas_used)) - } - - /// Applies the post-block changes, assuming the poststate is generated after executing - /// tranactions - pub fn apply_post_block_changes( - &mut self, - block: &Block, - total_difficulty: U256, - mut post_state: PostState, - ) -> Result { - // Add block rewards - let balance_increments = self.post_block_balance_increments(block, total_difficulty); - for (address, increment) in balance_increments.into_iter() { - self.increment_account_balance(block.number, address, increment, &mut post_state)?; - } - - // Perform DAO irregular state change - if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) { - self.apply_dao_fork_changes(block.number, &mut post_state)?; - } - Ok(post_state) - } -} - -impl BlockExecutor for Executor -where - DB: StateProvider, -{ - fn execute( - &mut self, - block: &Block, - total_difficulty: U256, - senders: Option>, - ) -> Result { - let (post_state, cumulative_gas_used) = - self.execute_transactions(block, total_difficulty, senders)?; - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - return Err(BlockValidationError::BlockGasUsed { - got: cumulative_gas_used, - expected: block.gas_used, - } - .into()) - } - - self.apply_post_block_changes(block, total_difficulty, post_state) - } - - fn execute_and_verify_receipt( - &mut self, - block: &Block, - total_difficulty: U256, - senders: Option>, - ) -> Result { - let post_state = self.execute(block, total_difficulty, senders)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - verify_receipt( - block.header.receipts_root, - block.header.logs_bloom, - post_state.receipts(block.number).iter(), - )?; - } - - Ok(post_state) - } -} - -/// Increment the balance for the given account in the [PostState]. -/// -/// Returns an error if the database encountered an error while loading the account. -pub fn increment_account_balance( - db: &mut CacheDB, - post_state: &mut PostState, - block_number: BlockNumber, - address: Address, - increment: U256, -) -> Result<(), ::Error> -where - DB: DatabaseRef, -{ - let beneficiary = db.load_account(address)?; - let old = to_reth_acc(&beneficiary.info); - // Increment beneficiary balance by mutating db entry in place. - beneficiary.info.balance += increment; - let new = to_reth_acc(&beneficiary.info); - match beneficiary.account_state { - AccountState::NotExisting => { - // if account was not existing that means that storage is not - // present. - beneficiary.account_state = AccountState::StorageCleared; - - // if account was not present append `Created` changeset - post_state.create_account( - block_number, - address, - Account { nonce: 0, balance: new.balance, bytecode_hash: None }, - ) - } - - AccountState::StorageCleared | AccountState::Touched | AccountState::None => { - // If account is None that means that EVM didn't touch it. - // we are changing the state to Touched as account can have - // storage in db. - if beneficiary.account_state == AccountState::None { - beneficiary.account_state = AccountState::Touched; - } - // if account was present, append changed changeset. - post_state.change_account(block_number, address, old, new); - } - } - - Ok(()) -} - -/// Commit change to the _run-time_ database [CacheDB], and update the given [PostState] with the -/// changes made in the transaction, which can be persisted to the database. -/// -/// Note: This does _not_ commit to the underlying database [DatabaseRef], but only to the -/// [CacheDB]. -pub fn commit_state_changes( - db: &mut CacheDB, - post_state: &mut PostState, - block_number: BlockNumber, - changes: hash_map::HashMap, - has_state_clear_eip: bool, -) where - DB: DatabaseRef, -{ - // iterate over all changed accounts - for (address, account) in changes { - if account.is_destroyed { - // get old account that we are destroying. - let db_account = match db.accounts.entry(address) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(_entry) => { - panic!("Left panic to critically jumpout if happens, as every account should be hot loaded."); - } - }; - - let account_exists = !matches!(db_account.account_state, AccountState::NotExisting); - if account_exists { - // Insert into `change` a old account and None for new account - // and mark storage to be wiped - post_state.destroy_account(block_number, address, to_reth_acc(&db_account.info)); - } - - // clear cached DB and mark account as not existing - db_account.storage.clear(); - db_account.account_state = AccountState::NotExisting; - db_account.info = AccountInfo::default(); - - continue - } else { - // check if account code is new or old. - // does it exist inside cached contracts if it doesn't it is new bytecode that - // we are inserting inside `change` - if let Some(ref code) = account.info.code { - if !code.is_empty() && !db.contracts.contains_key(&account.info.code_hash) { - db.contracts.insert(account.info.code_hash, code.clone()); - post_state.add_bytecode(account.info.code_hash, Bytecode(code.clone())); - } - } - - // get old account that is going to be overwritten or none if it does not exist - // and get new account that was just inserted. new account mut ref is used for - // inserting storage - let cached_account = match db.accounts.entry(address) { - Entry::Vacant(entry) => { - let entry = entry.insert(Default::default()); - entry.info = account.info.clone(); - entry.account_state = AccountState::NotExisting; // we will promote account state down the road - let new_account = to_reth_acc(&entry.info); - - #[allow(clippy::nonminimal_bool)] - // If account was touched before state clear EIP, create it. - if !has_state_clear_eip || - // If account was touched after state clear EIP, create it only if it is not empty. - (has_state_clear_eip && !new_account.is_empty()) - { - post_state.create_account(block_number, address, new_account); - } - - entry - } - Entry::Occupied(entry) => { - let entry = entry.into_mut(); - - let old_account = to_reth_acc(&entry.info); - let new_account = to_reth_acc(&account.info); - - let account_non_existent = - matches!(entry.account_state, AccountState::NotExisting); - - // Before state clear EIP, create account if it doesn't exist - if (!has_state_clear_eip && account_non_existent) - // After state clear EIP, create account only if it is not empty - || (has_state_clear_eip && entry.info.is_empty() && !new_account.is_empty()) - { - post_state.create_account(block_number, address, new_account); - } else if old_account != new_account { - post_state.change_account( - block_number, - address, - to_reth_acc(&entry.info), - new_account, - ); - } else if has_state_clear_eip && new_account.is_empty() && !account_non_existent - { - // The account was touched, but it is empty, so it should be deleted. - // This also deletes empty accounts which were created before state clear - // EIP. - post_state.destroy_account(block_number, address, new_account); - } - - entry.info = account.info.clone(); - entry - } - }; - - cached_account.account_state = if account.storage_cleared { - cached_account.storage.clear(); - AccountState::StorageCleared - } else if cached_account.account_state.is_storage_cleared() { - // the account already exists and its storage was cleared, preserve its previous - // state - AccountState::StorageCleared - } else if has_state_clear_eip && - matches!(cached_account.account_state, AccountState::NotExisting) && - cached_account.info.is_empty() - { - AccountState::NotExisting - } else { - AccountState::Touched - }; - - // Insert storage. - let mut storage_changeset = BTreeMap::new(); - - // insert storage into new db account. - cached_account.storage.extend(account.storage.into_iter().map(|(key, value)| { - if value.is_changed() { - storage_changeset.insert(key, (value.original_value(), value.present_value())); - } - (key, value.present_value()) - })); - - // Insert into change. - if !storage_changeset.is_empty() { - post_state.change_storage(block_number, address, storage_changeset); - } - } - } -} - -/// Verify receipts -pub fn verify_receipt<'a>( - expected_receipts_root: H256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Check receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - if receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff { - got: receipts_root, - expected: expected_receipts_root, - } - .into()) - } - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::zero(), |bloom, r| bloom | r.bloom); - if logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff { - expected: Box::new(expected_logs_bloom), - got: Box::new(logs_bloom), - } - .into()) - } - - Ok(()) -} - -/// Collect all balance changes at the end of the block. -/// -/// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular -/// state changes (DAO fork). -#[allow(clippy::too_many_arguments)] -#[inline] -pub fn post_block_balance_increments( - chain_spec: &ChainSpec, - block_number: u64, - block_difficulty: U256, - beneficiary: Address, - block_timestamp: u64, - total_difficulty: U256, - ommers: &[Header], - withdrawals: Option<&[Withdrawal]>, -) -> HashMap { - let mut balance_increments = HashMap::new(); - - // Add block rewards if they are enabled. - if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block_number, block_difficulty, total_difficulty) - { - // Ommer rewards - for ommer in ommers { - *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block_number, ommer.number); - } - - // Full block reward - *balance_increments.entry(beneficiary).or_default() += - calc::block_reward(base_block_reward, ommers.len()); - } - - // process withdrawals - insert_post_block_withdrawals_balance_increments( - chain_spec, - block_timestamp, - withdrawals, - &mut balance_increments, - ); - - balance_increments -} - -/// Returns a map of addresses to their balance increments if shanghai is active at the given -/// timestamp. -#[inline] -pub fn post_block_withdrawals_balance_increments( - chain_spec: &ChainSpec, - block_timestamp: u64, - withdrawals: &[Withdrawal], -) -> HashMap { - let mut balance_increments = HashMap::with_capacity(withdrawals.len()); - insert_post_block_withdrawals_balance_increments( - chain_spec, - block_timestamp, - Some(withdrawals), - &mut balance_increments, - ); - balance_increments -} - -/// Applies all withdrawal balance increments if shanghai is active at the given timestamp to the -/// given `balance_increments` map. -#[inline] -pub fn insert_post_block_withdrawals_balance_increments( - chain_spec: &ChainSpec, - block_timestamp: u64, - withdrawals: Option<&[Withdrawal]>, - balance_increments: &mut HashMap, -) { - // Process withdrawals - if chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(block_timestamp) { - if let Some(withdrawals) = withdrawals { - for withdrawal in withdrawals { - *balance_increments.entry(withdrawal.address).or_default() += - withdrawal.amount_wei(); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::database::State; - use once_cell::sync::Lazy; - use reth_consensus_common::calc; - use reth_primitives::{ - constants::ETH_TO_WEI, hex_literal::hex, keccak256, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpecBuilder, ForkCondition, StorageKey, H256, MAINNET, U256, - }; - use reth_provider::{ - post_state::{AccountChanges, Storage, StorageTransition, StorageWipe}, - AccountReader, BlockHashReader, StateProvider, StateRootProvider, - }; - use reth_rlp::Decodable; - use std::{collections::HashMap, str::FromStr}; - - static DEFAULT_REVM_ACCOUNT: Lazy = Lazy::new(|| RevmAccount { - info: AccountInfo::default(), - storage: hash_map::HashMap::default(), - is_destroyed: false, - is_touched: false, - storage_cleared: false, - is_not_existing: false, - }); - - #[derive(Debug, Default, Clone, Eq, PartialEq)] - struct StateProviderTest { - accounts: HashMap, Account)>, - contracts: HashMap, - block_hash: HashMap, - } - - impl StateProviderTest { - /// Insert account. - fn insert_account( - &mut self, - address: Address, - mut account: Account, - bytecode: Option, - storage: HashMap, - ) { - if let Some(bytecode) = bytecode { - let hash = keccak256(&bytecode); - account.bytecode_hash = Some(hash); - self.contracts.insert(hash, Bytecode::new_raw(bytecode.into())); - } - self.accounts.insert(address, (storage, account)); - } - } - - impl AccountReader for StateProviderTest { - fn basic_account(&self, address: Address) -> reth_interfaces::Result> { - let ret = Ok(self.accounts.get(&address).map(|(_, acc)| *acc)); - ret - } - } - - impl BlockHashReader for StateProviderTest { - fn block_hash(&self, number: u64) -> reth_interfaces::Result> { - Ok(self.block_hash.get(&number).cloned()) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> reth_interfaces::Result> { - let range = start..end; - Ok(self - .block_hash - .iter() - .filter_map(|(block, hash)| range.contains(block).then_some(*hash)) - .collect()) - } - } - - impl StateRootProvider for StateProviderTest { - fn state_root(&self, _post_state: PostState) -> reth_interfaces::Result { - todo!() - } - } - - impl StateProvider for StateProviderTest { - fn storage( - &self, - account: Address, - storage_key: reth_primitives::StorageKey, - ) -> reth_interfaces::Result> { - Ok(self - .accounts - .get(&account) - .and_then(|(storage, _)| storage.get(&storage_key).cloned())) - } - - fn bytecode_by_hash(&self, code_hash: H256) -> reth_interfaces::Result> { - Ok(self.contracts.get(&code_hash).cloned()) - } - - fn proof( - &self, - _address: Address, - _keys: &[H256], - ) -> reth_interfaces::Result<(Vec, H256, Vec>)> { - todo!() - } - } - - #[test] - fn sanity_execution() { - // Got rlp block from: src/GeneralStateTestsFiller/stChainId/chainIdGasCostFiller.json - - let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let mut block = Block::decode(&mut block_rlp).unwrap(); - - let mut ommer = Header::default(); - let ommer_beneficiary = - Address::from_str("3000000000000000000000000000000000000000").unwrap(); - ommer.beneficiary = ommer_beneficiary; - ommer.number = block.number; - block.ommers = vec![ommer]; - - let mut db = StateProviderTest::default(); - - let account1 = Address::from_str("1000000000000000000000000000000000000000").unwrap(); - let account2 = Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(); - let account3 = Address::from_str("a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(); - - // pre state - db.insert_account( - account1, - Account { balance: U256::ZERO, nonce: 0x00, bytecode_hash: None }, - Some(hex!("5a465a905090036002900360015500").into()), - HashMap::new(), - ); - - let account3_old_info = Account { - balance: U256::from(0x3635c9adc5dea00000u128), - nonce: 0x00, - bytecode_hash: None, - }; - - db.insert_account( - account3, - Account { - balance: U256::from(0x3635c9adc5dea00000u128), - nonce: 0x00, - bytecode_hash: None, - }, - None, - HashMap::new(), - ); - - // spec at berlin fork - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()); - - let db = SubState::new(State::new(db)); - - // execute chain and verify receipts - let mut executor = Executor::new(chain_spec, db); - let post_state = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap(); - - let base_block_reward = ETH_TO_WEI * 2; - let block_reward = calc::block_reward(base_block_reward, 1); - - let account1_info = Account { balance: U256::ZERO, nonce: 0x00, bytecode_hash: None }; - let account2_info = Account { - // Block reward decrease - balance: U256::from(0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128), - nonce: 0x00, - bytecode_hash: None, - }; - let account2_info_with_block_reward = Account { - balance: account2_info.balance + block_reward, - nonce: 0x00, - bytecode_hash: None, - }; - let account3_info = Account { - balance: U256::from(0x3635c9adc5de996b46u128), - nonce: 0x01, - bytecode_hash: None, - }; - let ommer_beneficiary_info = Account { - nonce: 0, - balance: calc::ommer_reward(base_block_reward, block.number, block.ommers[0].number), - bytecode_hash: None, - }; - - // Check if cache is set - // account1 - let db = executor.db(); - let cached_acc1 = db.accounts.get(&account1).unwrap(); - assert_eq!(cached_acc1.info.balance, account1_info.balance); - assert_eq!(cached_acc1.info.nonce, account1_info.nonce); - assert_eq!(cached_acc1.account_state, AccountState::Touched); - assert_eq!(cached_acc1.storage.len(), 1); - assert_eq!(cached_acc1.storage.get(&U256::from(1)), Some(&U256::from(2))); - - // account2 Block reward - let cached_acc2 = db.accounts.get(&account2).unwrap(); - assert_eq!(cached_acc2.info.balance, account2_info.balance + block_reward); - assert_eq!(cached_acc2.info.nonce, account2_info.nonce); - assert_eq!(cached_acc2.account_state, AccountState::Touched); - assert_eq!(cached_acc2.storage.len(), 0); - - // account3 - let cached_acc3 = db.accounts.get(&account3).unwrap(); - assert_eq!(cached_acc3.info.balance, account3_info.balance); - assert_eq!(cached_acc3.info.nonce, account3_info.nonce); - assert_eq!(cached_acc3.account_state, AccountState::Touched); - assert_eq!(cached_acc3.storage.len(), 0); - - assert!( - post_state.accounts().get(&account1).is_none(), - "Account should not be present in post-state since it was not changed" - ); - - // Clone and sort to make the test deterministic - assert_eq!( - post_state.account_changes().inner, - BTreeMap::from([( - block.number, - BTreeMap::from([ - // New account - (account2, None), - // Changed account - (account3, Some(account3_old_info)), - // Ommer reward - (ommer_beneficiary, None) - ]) - ),]), - "Account changeset did not match" - ); - assert_eq!( - post_state.storage_changes().inner, - BTreeMap::from([( - block.number, - BTreeMap::from([( - account1, - StorageTransition { - wipe: StorageWipe::None, - // Slot 1 changed from 0 to 2 - storage: BTreeMap::from([(U256::from(1), U256::ZERO)]) - } - )]) - )]), - "Storage changeset did not match" - ); - - // Check final post-state - assert_eq!( - post_state.storage(), - &BTreeMap::from([( - account1, - Storage { - times_wiped: 0, - storage: BTreeMap::from([(U256::from(1), U256::from(2))]) - } - )]), - "Should have changed 1 storage slot" - ); - assert_eq!(post_state.bytecodes().len(), 0, "Should have zero new bytecodes"); - - let accounts = post_state.accounts(); - assert_eq!( - accounts.len(), - 3, - "Should have 4 accounts (account 2, 3 and the ommer beneficiary)" - ); - assert_eq!( - accounts.get(&account2).unwrap(), - &Some(account2_info_with_block_reward), - "Account 2 state is wrong" - ); - assert_eq!( - accounts.get(&account3).unwrap(), - &Some(account3_info), - "Account 3 state is wrong" - ); - assert_eq!( - accounts.get(&ommer_beneficiary).unwrap(), - &Some(ommer_beneficiary_info), - "Ommer beneficiary state is wrong" - ); - } - - #[test] - fn dao_hardfork_irregular_state_change() { - let header = Header { number: 1, ..Header::default() }; - - let mut db = StateProviderTest::default(); - - let mut beneficiary_balance = 0; - for (i, dao_address) in DAO_HARDKFORK_ACCOUNTS.iter().enumerate() { - db.insert_account( - *dao_address, - Account { balance: U256::from(i), nonce: 0x00, bytecode_hash: None }, - None, - HashMap::new(), - ); - beneficiary_balance += i; - } - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .homestead_activated() - .with_fork(Hardfork::Dao, ForkCondition::Block(1)) - .build(), - ); - - let db = SubState::new(State::new(db)); - // execute chain and verify receipts - let mut executor = Executor::new(chain_spec, db); - let out = executor - .execute_and_verify_receipt( - &Block { header, body: vec![], ommers: vec![], withdrawals: None }, - U256::ZERO, - None, - ) - .unwrap(); - - // Check if cache is set - // beneficiary - let db = executor.db(); - let dao_beneficiary = db.accounts.get(&DAO_HARDFORK_BENEFICIARY).unwrap(); - - assert_eq!(dao_beneficiary.info.balance, U256::from(beneficiary_balance)); - for address in DAO_HARDKFORK_ACCOUNTS.iter() { - let account = db.accounts.get(address).unwrap(); - assert_eq!(account.info.balance, U256::ZERO); - } - - // check changesets - let beneficiary_state = out.accounts().get(&DAO_HARDFORK_BENEFICIARY).unwrap().unwrap(); - assert_eq!( - beneficiary_state, - Account { balance: U256::from(beneficiary_balance), ..Default::default() }, - ); - for address in DAO_HARDKFORK_ACCOUNTS.iter() { - let updated_account = out.accounts().get(address).unwrap().unwrap(); - assert_eq!(updated_account, Account { balance: U256::ZERO, ..Default::default() }); - } - } - - #[test] - fn test_selfdestruct() { - // Modified version of eth test. Storage is added for selfdestructed account to see - // that changeset is set. - // Got rlp block from: src/GeneralStateTestsFiller/stArgsZeroOneBalance/suicideNonConst.json - - let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); - let block = Block::decode(&mut block_rlp).unwrap(); - let mut db = StateProviderTest::default(); - - let address_caller = Address::from_str("a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(); - let address_selfdestruct = - Address::from_str("095e7baea6a6c7c4c2dfeb977efac326af552d87").unwrap(); - - // pre state - let pre_account_caller = Account { - balance: U256::from(0x0de0b6b3a7640000u64), - nonce: 0x00, - bytecode_hash: None, - }; - - db.insert_account(address_caller, pre_account_caller, None, HashMap::new()); - - // insert account that will selfd - - let pre_account_selfdestroyed = Account { - balance: U256::ZERO, - nonce: 0x00, - bytecode_hash: Some(H256(hex!( - "56a7d44a4ecf086c34482ad1feb1007087fc56fae6dbefbd3f416002933f1705" - ))), - }; - - let selfdestroyed_storage = - BTreeMap::from([(H256::zero(), U256::ZERO), (H256::from_low_u64_be(1), U256::from(1))]); - db.insert_account( - address_selfdestruct, - pre_account_selfdestroyed, - Some(hex!("73095e7baea6a6c7c4c2dfeb977efac326af552d8731ff00").into()), - selfdestroyed_storage.into_iter().collect::>(), - ); - - // spec at berlin fork - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()); - - let db = SubState::new(State::new(db)); - - // execute chain and verify receipts - let mut executor = Executor::new(chain_spec, db); - let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap(); - - assert_eq!(out.bytecodes().len(), 0, "Should have zero new bytecodes"); - - let post_account_caller = Account { - balance: U256::from(0x0de0b6b3a761cf60u64), - nonce: 0x01, - bytecode_hash: None, - }; - - assert_eq!( - out.accounts().get(&address_caller).unwrap().unwrap(), - post_account_caller, - "Caller account has changed and fee is deduced" - ); - - assert_eq!( - out.accounts().get(&address_selfdestruct).unwrap(), - &None, - "Selfdestructed account should have been deleted" - ); - assert!( - out.storage().get(&address_selfdestruct).unwrap().wiped(), - "Selfdestructed account should have its storage wiped" - ); - } - - // Test vector from https://github.com/ethereum/tests/blob/3156db5389921125bb9e04142d18e0e7b0cf8d64/BlockchainTests/EIPTests/bc4895-withdrawals/twoIdenticalIndexDifferentValidator.json - #[test] - fn test_withdrawals() { - let block_rlp = hex!("f9028cf90219a0151934ad9b654c50197f37018ee5ee9bb922dec0a1b5e24a6d679cb111cdb107a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa048cd9a5957e45beebf80278a5208b0cbe975ab4b4adb0da1509c67b26f2be3ffa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff8082079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a04a220ebe55034d51f8a58175bb504b6ebf883105010a1f6d42e557c18bbd5d69c0c0f86cda808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710da020194c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710"); - let block = Block::decode(&mut block_rlp.as_slice()).unwrap(); - let withdrawals = block.withdrawals.as_ref().unwrap(); - assert_eq!(withdrawals.len(), 4); - - let withdrawal_beneficiary = - Address::from_str("c94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(); - - // spec at shanghai fork - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - - let db = SubState::new(State::new(StateProviderTest::default())); - - // execute chain and verify receipts - let mut executor = Executor::new(chain_spec, db); - let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap(); - - let withdrawal_sum = withdrawals.iter().fold(U256::ZERO, |sum, w| sum + w.amount_wei()); - let beneficiary_account = executor.db().accounts.get(&withdrawal_beneficiary).unwrap(); - assert_eq!(beneficiary_account.info.balance, withdrawal_sum); - assert_eq!(beneficiary_account.info.nonce, 0); - assert_eq!(beneficiary_account.account_state, AccountState::StorageCleared); - - assert_eq!( - out.accounts().get(&withdrawal_beneficiary).unwrap(), - &Some(Account { nonce: 0, balance: withdrawal_sum, bytecode_hash: None }), - "Withdrawal account should have gotten its balance set" - ); - - // Execute same block again - let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap(); - - assert_eq!( - out.accounts().get(&withdrawal_beneficiary).unwrap(), - &Some(Account { - nonce: 0, - balance: withdrawal_sum + withdrawal_sum, - bytecode_hash: None - }), - "Withdrawal account should have gotten its balance set" - ); - } - - #[test] - fn test_account_state_preserved() { - let account = Address::from_str("c94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(); - - let mut db = StateProviderTest::default(); - db.insert_account(account, Account::default(), None, HashMap::default()); - - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().istanbul_activated().build()); - let db = SubState::new(State::new(db)); - - let mut executor = Executor::new(chain_spec, db); - // touch account - executor.commit_changes( - 1, - hash_map::HashMap::from([(account, DEFAULT_REVM_ACCOUNT.clone())]), - true, - &mut PostState::default(), - ); - // destroy account - executor.commit_changes( - 1, - hash_map::HashMap::from([( - account, - RevmAccount { - is_destroyed: true, - is_touched: true, - ..DEFAULT_REVM_ACCOUNT.clone() - }, - )]), - true, - &mut PostState::default(), - ); - // re-create account - executor.commit_changes( - 1, - hash_map::HashMap::from([( - account, - RevmAccount { - is_touched: true, - storage_cleared: true, - ..DEFAULT_REVM_ACCOUNT.clone() - }, - )]), - true, - &mut PostState::default(), - ); - // touch account - executor.commit_changes( - 1, - hash_map::HashMap::from([(account, DEFAULT_REVM_ACCOUNT.clone())]), - true, - &mut PostState::default(), - ); - - let db = executor.db(); - - let account = db.load_account(account).unwrap(); - assert_eq!(account.account_state, AccountState::StorageCleared); - } - - /// If the account is created and destroyed within the same transaction, we shouldn't generate - /// the changeset. - #[test] - fn test_account_created_destroyed() { - let address = Address::random(); - - let mut db = SubState::new(State::new(StateProviderTest::default())); - db.load_account(address).unwrap(); // hot load the non-existing account - - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let mut executor = Executor::new(chain_spec, db); - let mut post_state = PostState::default(); - - executor.commit_changes( - 1, - hash_map::HashMap::from([( - address, - RevmAccount { - is_destroyed: true, - storage_cleared: true, - ..DEFAULT_REVM_ACCOUNT.clone() - }, - )]), - true, - &mut post_state, - ); - - assert!(post_state.account_changes().is_empty()); - } - - /// If the account was touched, but remained unchanged over the course of multiple transactions, - /// no changeset should be generated. - #[test] - fn test_touched_unchanged_account() { - let address = Address::random(); - - let db = SubState::new(State::new(StateProviderTest::default())); - - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let mut executor = Executor::new(chain_spec, db); - let mut post_state = PostState::default(); - - executor.commit_changes( - 1, - hash_map::HashMap::from([( - address, - RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }, - )]), - true, - &mut post_state, - ); - assert!(post_state.account_changes().is_empty()); - - executor.commit_changes( - 1, - hash_map::HashMap::from([( - address, - RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }, - )]), - true, - &mut post_state, - ); - assert_eq!(post_state.account_changes(), &AccountChanges::default()); - } - - #[test] - fn test_state_clear_eip_touch_account() { - let address = Address::random(); - - let mut state_provider = StateProviderTest::default(); - state_provider.insert_account(address, Account::default(), None, HashMap::default()); - let mut db = SubState::new(State::new(state_provider)); - db.load_account(address).unwrap(); // hot load the account - - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let mut executor = Executor::new(chain_spec, db); - let mut post_state = PostState::default(); - - // Touch an empty account before state clearing EIP. Nothing should happen. - executor.commit_changes( - 1, - hash_map::HashMap::from([( - address, - RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }, - )]), - false, - &mut post_state, - ); - assert_eq!(post_state.accounts(), &BTreeMap::default()); - assert_eq!(post_state.account_changes(), &AccountChanges::default()); - - // Touch an empty account after state clearing EIP. The account should be destroyed. - executor.commit_changes( - 2, - hash_map::HashMap::from([( - address, - RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }, - )]), - true, - &mut post_state, - ); - assert_eq!(post_state.accounts(), &BTreeMap::from([(address, None)])); - assert_eq!( - post_state.account_changes(), - &AccountChanges { - size: 1, - inner: BTreeMap::from([(2, BTreeMap::from([(address, Some(Account::default()))]))]) - } - ); - } - - #[test] - fn test_state_clear_eip_create_account() { - let address1 = Address::random(); - let address2 = Address::random(); - let address3 = Address::random(); - let address4 = Address::random(); - - let state_provider = StateProviderTest::default(); - let mut db = SubState::new(State::new(state_provider)); - db.load_account(address1).unwrap(); // hot load account 1 - - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let mut executor = Executor::new(chain_spec, db); - - // Create empty accounts before state clearing EIP. - let mut post_state_before_state_clear = PostState::default(); - executor.commit_changes( - 1, - hash_map::HashMap::from([ - (address1, RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }), - (address2, RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }), - ]), - false, - &mut post_state_before_state_clear, - ); - assert_eq!( - post_state_before_state_clear.accounts(), - &BTreeMap::from([ - (address1, Some(Account::default())), - (address2, Some(Account::default())) - ]) - ); - assert_eq!( - post_state_before_state_clear.account_changes(), - &AccountChanges { - size: 2, - inner: BTreeMap::from([(1, BTreeMap::from([(address1, None), (address2, None)]))]) - } - ); - - // Empty accounts should not be created after state clearing EIP. - let mut post_state_after_state_clear = PostState::default(); - executor.commit_changes( - 2, - hash_map::HashMap::from([ - (address3, RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }), - (address4, RevmAccount { is_touched: true, ..DEFAULT_REVM_ACCOUNT.clone() }), - ]), - true, - &mut post_state_after_state_clear, - ); - assert_eq!(post_state_after_state_clear.accounts(), &BTreeMap::default()); - assert_eq!(post_state_after_state_clear.account_changes(), &AccountChanges::default()); - } -} diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs index b3a2fe59e851..6e326b5cd362 100644 --- a/crates/revm/src/factory.rs +++ b/crates/revm/src/factory.rs @@ -1,11 +1,10 @@ use crate::{ - database::{State, SubState}, + database::StateProviderDatabase, + processor::EVMProcessor, stack::{InspectorStack, InspectorStackConfig}, }; use reth_primitives::ChainSpec; -use reth_provider::{ExecutorFactory, StateProvider}; - -use crate::executor::Executor; +use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; use std::sync::Arc; /// Factory that spawn Executor. @@ -35,17 +34,16 @@ impl Factory { } impl ExecutorFactory for Factory { - type Executor = Executor; - - /// Executor with [`StateProvider`] - fn with_sp(&self, sp: SP) -> Self::Executor { - let substate = SubState::new(State::new(sp)); - - let mut executor = Executor::new(self.chain_spec.clone(), substate); + fn with_state<'a, SP: StateProvider + 'a>( + &'a self, + sp: SP, + ) -> Box { + let database_state = StateProviderDatabase::new(sp); + let mut evm = Box::new(EVMProcessor::new_with_db(self.chain_spec.clone(), database_state)); if let Some(ref stack) = self.stack { - executor = executor.with_stack(stack.clone()); + evm.set_stack(stack.clone()); } - executor + evm } /// Return internal chainspec diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 6a99b020381d..ac5da0b58bbf 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -17,9 +17,14 @@ pub mod database; /// revm implementation of reth block and transaction executors. -pub mod executor; mod factory; +/// new revm account state executor +pub mod processor; + +/// State changes that are not related to transactions. +pub mod state_change; + /// revm executor factory. pub use factory::Factory; diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs new file mode 100644 index 000000000000..9ded17dd1a6d --- /dev/null +++ b/crates/revm/src/processor.rs @@ -0,0 +1,531 @@ +use crate::{ + database::StateProviderDatabase, + env::{fill_cfg_and_block_env, fill_tx_env}, + eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + into_reth_log, + stack::{InspectorStack, InspectorStackConfig}, + state_change::post_block_balance_increments, +}; +use reth_interfaces::{ + executor::{BlockExecutionError, BlockValidationError}, + Error, +}; +use reth_primitives::{ + Address, Block, BlockNumber, Bloom, ChainSpec, Hardfork, Header, PruneMode, PruneModes, + PrunePartError, Receipt, ReceiptWithBloom, TransactionSigned, H256, MINIMUM_PRUNING_DISTANCE, + U256, +}; +use reth_provider::{ + BlockExecutor, BlockExecutorStats, BundleStateWithReceipts, PrunableBlockExecutor, + StateProvider, +}; +use revm::{ + db::{states::bundle_state::BundleRetention, StateDBBox}, + primitives::ResultAndState, + DatabaseCommit, State, EVM, +}; +use std::{sync::Arc, time::Instant}; +use tracing::{debug, trace}; + +/// EVMProcessor is a block executor that uses revm to execute blocks or multiple blocks. +/// +/// Output is obtained by calling `take_output_state` function. +/// +/// It is capable of pruning the data that will be written to the database +/// and implemented [PrunableBlockExecutor] traits. +/// +/// It implemented the [BlockExecutor] that give it the ability to take block +/// apply pre state (Cancun system contract call), execute transaction and apply +/// state change and then apply post execution changes (block reward, withdrawals, irregular DAO +/// hardfork state change). And if `execute_and_verify_receipt` is called it will verify the +/// receipt. +/// +/// InspectorStack are used for optional inspecting execution. And it contains +/// various duration of parts of execution. +pub struct EVMProcessor<'a> { + /// The configured chain-spec + chain_spec: Arc, + /// revm instance that contains database and env environment. + evm: EVM>, + /// Hook and inspector stack that we want to invoke on that hook. + stack: InspectorStack, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + receipts: Vec>>, + /// First block will be initialized to `None` + /// and be set to the block number of first block executed. + first_block: Option, + /// The maximum known block. + tip: Option, + /// Pruning configuration. + prune_modes: PruneModes, + /// Memoized address pruning filter. + /// Empty implies that there is going to be addresses to include in the filter in a future + /// block. None means there isn't any kind of configuration. + pruning_address_filter: Option<(u64, Vec
)>, + /// Execution stats + stats: BlockExecutorStats, +} + +impl<'a> EVMProcessor<'a> { + /// Return chain spec. + pub fn chain_spec(&self) -> &Arc { + &self.chain_spec + } + + /// Create a new pocessor with the given chain spec. + pub fn new(chain_spec: Arc) -> Self { + let evm = EVM::new(); + EVMProcessor { + chain_spec, + evm, + stack: InspectorStack::new(InspectorStackConfig::default()), + receipts: Vec::new(), + first_block: None, + tip: None, + prune_modes: PruneModes::none(), + pruning_address_filter: None, + stats: BlockExecutorStats::default(), + } + } + + /// Creates a new executor from the given chain spec and database. + pub fn new_with_db( + chain_spec: Arc, + db: StateProviderDatabase, + ) -> Self { + let state = State::builder() + .with_database_boxed(Box::new(db)) + .with_bundle_update() + .without_state_clear() + .build(); + EVMProcessor::new_with_state(chain_spec, state) + } + + /// Create a new EVM processor with the given revm state. + pub fn new_with_state(chain_spec: Arc, revm_state: StateDBBox<'a, Error>) -> Self { + let mut evm = EVM::new(); + evm.database(revm_state); + EVMProcessor { + chain_spec, + evm, + stack: InspectorStack::new(InspectorStackConfig::default()), + receipts: Vec::new(), + first_block: None, + tip: None, + prune_modes: PruneModes::none(), + pruning_address_filter: None, + stats: BlockExecutorStats::default(), + } + } + + /// Configures the executor with the given inspectors. + pub fn set_stack(&mut self, stack: InspectorStack) { + self.stack = stack; + } + + /// Returns a reference to the database + pub fn db_mut(&mut self) -> &mut StateDBBox<'a, Error> { + // Option will be removed from EVM in the future. + // as it is always some. + // https://github.com/bluealloy/revm/issues/697 + self.evm.db().expect("Database inside EVM is always set") + } + + fn recover_senders( + &mut self, + body: &[TransactionSigned], + senders: Option>, + ) -> Result, BlockExecutionError> { + if let Some(senders) = senders { + if body.len() == senders.len() { + Ok(senders) + } else { + Err(BlockValidationError::SenderRecoveryError.into()) + } + } else { + let time = Instant::now(); + let ret = TransactionSigned::recover_signers(body, body.len()) + .ok_or(BlockValidationError::SenderRecoveryError.into()); + self.stats.sender_recovery_duration += time.elapsed(); + ret + } + } + + /// Initializes the config and block env. + fn init_env(&mut self, header: &Header, total_difficulty: U256) { + // Set state clear flag. + let state_clear_flag = + self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(header.number); + + self.db_mut().set_state_clear_flag(state_clear_flag); + + fill_cfg_and_block_env( + &mut self.evm.env.cfg, + &mut self.evm.env.block, + &self.chain_spec, + header, + total_difficulty, + ); + } + + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO + /// hardfork state change. + pub fn apply_post_execution_state_change( + &mut self, + block: &Block, + total_difficulty: U256, + ) -> Result<(), BlockExecutionError> { + let mut balance_increments = post_block_balance_increments( + &self.chain_spec, + block.number, + block.difficulty, + block.beneficiary, + block.timestamp, + total_difficulty, + &block.ommers, + block.withdrawals.as_deref(), + ); + + // Irregular state change at Ethereum DAO hardfork + if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) { + // drain balances from hardcoded addresses. + let drained_balance: u128 = self + .db_mut() + .drain_balances(DAO_HARDKFORK_ACCOUNTS) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)? + .into_iter() + .sum(); + + // return balance to DAO beneficiary. + *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; + } + // increment balances + self.db_mut() + .increment_balances(balance_increments.into_iter().map(|(k, v)| (k, v))) + .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + + Ok(()) + } + + /// Runs a single transaction in the configured environment and proceeds + /// to return the result and state diff (without applying it). + /// + /// Assumes the rest of the block environment has been filled via `init_block_env`. + pub fn transact( + &mut self, + transaction: &TransactionSigned, + sender: Address, + ) -> Result { + // Fill revm structure. + fill_tx_env(&mut self.evm.env.tx, transaction, sender); + + let hash = transaction.hash(); + let out = if self.stack.should_inspect(&self.evm.env, hash) { + // execution with inspector. + let output = self.evm.inspect(&mut self.stack); + tracing::trace!( + target: "evm", + ?hash, ?output, ?transaction, env = ?self.evm.env, + "Executed transaction" + ); + output + } else { + // main execution. + self.evm.transact() + }; + out.map_err(|e| BlockValidationError::EVM { hash, message: format!("{e:?}") }.into()) + } + + /// Runs the provided transactions and commits their state to the run-time database. + /// + /// The returned [BundleStateWithReceipts] can be used to persist the changes to disk, and + /// contains the changes made by each transaction. + /// + /// The changes in [BundleStateWithReceipts] have a transition ID associated with them: there is + /// one transition ID for each transaction (with the first executed tx having transition ID + /// 0, and so on). + /// + /// The second returned value represents the total gas used by this block of transactions. + pub fn execute_transactions( + &mut self, + block: &Block, + total_difficulty: U256, + senders: Option>, + ) -> Result<(Vec, u64), BlockExecutionError> { + // perf: do not execute empty blocks + if block.body.is_empty() { + return Ok((Vec::new(), 0)) + } + + let senders = self.recover_senders(&block.body, senders)?; + + self.init_env(&block.header, total_difficulty); + + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(block.body.len()); + for (transaction, sender) in block.body.iter().zip(senders) { + let time = Instant::now(); + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = block.header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + let ResultAndState { result, state } = self.transact(transaction, sender)?; + trace!( + target: "evm", + ?transaction, ?result, ?state, + "Executed transaction" + ); + self.stats.execution_duration += time.elapsed(); + let time = Instant::now(); + + self.db_mut().commit(state); + + self.stats.apply_state_duration += time.elapsed(); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + // convert to reth log + logs: result.into_logs().into_iter().map(into_reth_log).collect(), + }); + } + + Ok((receipts, cumulative_gas_used)) + } + + /// Execute the block, verify gas usage and apply post-block state changes. + fn execute_inner( + &mut self, + block: &Block, + total_difficulty: U256, + senders: Option>, + ) -> Result, BlockExecutionError> { + let (receipts, cumulative_gas_used) = + self.execute_transactions(block, total_difficulty, senders)?; + + // Check if gas used matches the value set in header. + if block.gas_used != cumulative_gas_used { + return Err(BlockValidationError::BlockGasUsed { + got: cumulative_gas_used, + expected: block.gas_used, + gas_spent_by_tx: self + .receipts + .last() + .map(|block_r| { + block_r + .iter() + .enumerate() + .map(|(id, tx_r)| { + ( + id as u64, + tx_r.as_ref() + .expect("receipts have not been pruned") + .cumulative_gas_used, + ) + }) + .collect() + }) + .unwrap_or_default(), + } + .into()) + } + let time = Instant::now(); + self.apply_post_execution_state_change(block, total_difficulty)?; + self.stats.apply_post_execution_state_changes_duration += time.elapsed(); + + let time = Instant::now(); + let retention = if self.tip.map_or(true, |tip| { + !self.prune_modes.should_prune_account_history(block.number, tip) && + !self.prune_modes.should_prune_storage_history(block.number, tip) + }) { + BundleRetention::Reverts + } else { + BundleRetention::PlainState + }; + self.db_mut().merge_transitions(retention); + self.stats.merge_transitions_duration += time.elapsed(); + + if self.first_block.is_none() { + self.first_block = Some(block.number); + } + + Ok(receipts) + } + + /// Save receipts to the executor. + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + let mut receipts = receipts.into_iter().map(Option::Some).collect(); + // Prune receipts if necessary. + self.prune_receipts(&mut receipts)?; + // Save receipts. + self.receipts.push(receipts); + Ok(()) + } + + /// Prune receipts according to the pruning configuration. + fn prune_receipts( + &mut self, + receipts: &mut Vec>, + ) -> Result<(), PrunePartError> { + let (first_block, tip) = match self.first_block.zip(self.tip) { + Some((block, tip)) => (block, tip), + _ => return Ok(()), + }; + + let block_number = first_block + self.receipts.len() as u64; + + // Block receipts should not be retained + if self.prune_modes.receipts == Some(PruneMode::Full) || + // [`PrunePart::Receipts`] takes priority over [`PrunePart::ContractLogs`] + self.prune_modes.should_prune_receipts(block_number, tip) + { + receipts.clear(); + return Ok(()) + } + + // All receipts from the last 128 blocks are required for blockchain tree, even with + // [`PrunePart::ContractLogs`]. + let prunable_receipts = + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(block_number, tip); + if !prunable_receipts { + return Ok(()) + } + + let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; + + if !contract_log_pruner.is_empty() { + let (prev_block, filter) = self.pruning_address_filter.get_or_insert((0, Vec::new())); + for (_, addresses) in contract_log_pruner.range(*prev_block..=block_number) { + filter.extend(addresses.iter().copied()); + } + } + + for receipt in receipts.iter_mut() { + let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); + + // If there is an address_filter, and it does not contain any of the + // contract addresses, then remove this receipts + if let Some((_, filter)) = &self.pruning_address_filter { + if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { + receipt.take(); + } + } + } + + Ok(()) + } +} + +impl<'a> BlockExecutor for EVMProcessor<'a> { + fn execute( + &mut self, + block: &Block, + total_difficulty: U256, + senders: Option>, + ) -> Result<(), BlockExecutionError> { + let receipts = self.execute_inner(block, total_difficulty, senders)?; + self.save_receipts(receipts) + } + + fn execute_and_verify_receipt( + &mut self, + block: &Block, + total_difficulty: U256, + senders: Option>, + ) -> Result<(), BlockExecutionError> { + // execute block + let receipts = self.execute_inner(block, total_difficulty, senders)?; + + // TODO Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is needed for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { + let time = Instant::now(); + if let Err(error) = + verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) + { + debug!(target: "evm", ?error, ?receipts, "receipts verification failed"); + return Err(error) + }; + self.stats.receipt_root_duration += time.elapsed(); + } + + self.save_receipts(receipts) + } + + fn take_output_state(&mut self) -> BundleStateWithReceipts { + let receipts = std::mem::take(&mut self.receipts); + BundleStateWithReceipts::new( + self.evm.db().unwrap().take_bundle(), + receipts, + self.first_block.unwrap_or_default(), + ) + } + + fn stats(&self) -> BlockExecutorStats { + self.stats.clone() + } + + fn size_hint(&self) -> Option { + self.evm.db.as_ref().map(|db| db.bundle_size_hint()) + } +} + +impl<'a> PrunableBlockExecutor for EVMProcessor<'a> { + fn set_tip(&mut self, tip: BlockNumber) { + self.tip = Some(tip); + } + + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } +} + +/// Verify receipts +pub fn verify_receipt<'a>( + expected_receipts_root: H256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, +) -> Result<(), BlockExecutionError> { + // Check receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + if receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff { + got: receipts_root, + expected: expected_receipts_root, + } + .into()) + } + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::zero(), |bloom, r| bloom | r.bloom); + if logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff { + expected: Box::new(expected_logs_bloom), + got: Box::new(logs_bloom), + } + .into()) + } + + Ok(()) +} diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs new file mode 100644 index 000000000000..6b4cbeff55b5 --- /dev/null +++ b/crates/revm/src/state_change.rs @@ -0,0 +1,85 @@ +use reth_consensus_common::calc; +use reth_primitives::{Address, ChainSpec, Hardfork, Header, Withdrawal, U256}; +use std::collections::HashMap; + +/// Collect all balance changes at the end of the block. +/// +/// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular +/// state changes (DAO fork). +#[allow(clippy::too_many_arguments)] +#[inline] +pub fn post_block_balance_increments( + chain_spec: &ChainSpec, + block_number: u64, + block_difficulty: U256, + beneficiary: Address, + block_timestamp: u64, + total_difficulty: U256, + ommers: &[Header], + withdrawals: Option<&[Withdrawal]>, +) -> HashMap { + let mut balance_increments = HashMap::new(); + + // Add block rewards if they are enabled. + if let Some(base_block_reward) = + calc::base_block_reward(chain_spec, block_number, block_difficulty, total_difficulty) + { + // Ommer rewards + for ommer in ommers { + *balance_increments.entry(ommer.beneficiary).or_default() += + calc::ommer_reward(base_block_reward, block_number, ommer.number); + } + + // Full block reward + *balance_increments.entry(beneficiary).or_default() += + calc::block_reward(base_block_reward, ommers.len()); + } + + // process withdrawals + insert_post_block_withdrawals_balance_increments( + chain_spec, + block_timestamp, + withdrawals, + &mut balance_increments, + ); + + balance_increments +} + +/// Returns a map of addresses to their balance increments if shanghai is active at the given +/// timestamp. +#[inline] +pub fn post_block_withdrawals_balance_increments( + chain_spec: &ChainSpec, + block_timestamp: u64, + withdrawals: &[Withdrawal], +) -> HashMap { + let mut balance_increments = HashMap::with_capacity(withdrawals.len()); + insert_post_block_withdrawals_balance_increments( + chain_spec, + block_timestamp, + Some(withdrawals), + &mut balance_increments, + ); + balance_increments +} + +/// Applies all withdrawal balance increments if shanghai is active at the given timestamp to the +/// given `balance_increments` map. +#[inline] +pub fn insert_post_block_withdrawals_balance_increments( + chain_spec: &ChainSpec, + block_timestamp: u64, + withdrawals: Option<&[Withdrawal]>, + balance_increments: &mut HashMap, +) { + // Process withdrawals + if chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(block_timestamp) { + if let Some(withdrawals) = withdrawals { + for withdrawal in withdrawals { + *balance_increments.entry(withdrawal.address).or_default() += + withdrawal.amount_wei(); + } + } + } +} diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index a18a5cf6ff06..264c567365b8 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -17,7 +17,7 @@ use reth_primitives::{ }; use reth_provider::{BlockReaderIdExt, HeaderProvider, StateProviderBox}; use reth_revm::{ - database::{State, SubState}, + database::{StateProviderDatabase, SubState}, env::tx_env_with_recovered, tracing::{ js::{JsDbRequest, JsInspector}, @@ -96,7 +96,7 @@ where .eth_api .spawn_with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(transactions.len()); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -190,7 +190,7 @@ where // configure env for the target transaction let tx = transaction.into_recovered(); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); // replay all transactions prior to the targeted transaction replay_transactions_until( &mut db, @@ -289,7 +289,7 @@ where // because JSTracer and all JS types are not Send let (_, _, at) = self.inner.eth_api.evm_env_at(at).await?; let state = self.inner.eth_api.state_at(at)?; - let db = SubState::new(State::new(state)); + let db = SubState::new(StateProviderDatabase::new(state)); let has_state_overrides = overrides.has_state(); // If the caller provided state overrides we need to clone the DB so the js @@ -379,7 +379,7 @@ where .eth_api .spawn_with_state_at_block(at.into(), move |state| { let mut results = Vec::with_capacity(bundles.len()); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -585,9 +585,15 @@ where let db = if let Some(db) = db { let CacheDB { accounts, contracts, logs, block_hashes, .. } = db; - CacheDB { accounts, contracts, logs, block_hashes, db: State::new(state) } + CacheDB { + accounts, + contracts, + logs, + block_hashes, + db: StateProviderDatabase::new(state), + } } else { - CacheDB::new(State::new(state)) + CacheDB::new(StateProviderDatabase::new(state)) }; let mut stream = ReceiverStream::new(rx); diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 6f27d1a3b338..246ae4050928 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -19,7 +19,7 @@ use reth_provider::{ }; use reth_revm::{ access_list::AccessListInspector, - database::{State, SubState}, + database::{StateProviderDatabase, SubState}, env::tx_env_with_recovered, }; use reth_rpc_types::{ @@ -110,7 +110,7 @@ where self.spawn_with_state_at_block(at.into(), move |state| { let mut results = Vec::with_capacity(transactions.len()); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -199,7 +199,7 @@ where // Configure the evm env let mut env = build_call_evm_env(cfg, block, request)?; - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); // if the request is a simple transfer we can optimize if env.tx.data.is_empty() { @@ -358,7 +358,7 @@ where // env.cfg.disable_base_fee = true; - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); if request.gas.is_none() && env.tx.gas_price > U256::ZERO { // no gas limit was provided in the request, so we need to cap the request's gas limit @@ -400,7 +400,7 @@ where fn map_out_of_gas_err( env_gas_limit: U256, mut env: Env, - mut db: &mut CacheDB>, + mut db: &mut CacheDB>, ) -> EthApiError where S: StateProvider, diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index 3700de63d6f0..0cf7c36e1dbd 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -6,12 +6,10 @@ use reth_primitives::{ proofs, Block, Header, IntoRecoveredTransaction, Receipt, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT, H256, U256, }; -use reth_provider::{PostState, StateProviderFactory}; -use reth_revm::{ - database::State, env::tx_env_with_recovered, executor::commit_state_changes, into_reth_log, -}; +use reth_provider::{BundleStateWithReceipts, StateProviderFactory}; +use reth_revm::{database::StateProviderDatabase, env::tx_env_with_recovered, into_reth_log}; use reth_transaction_pool::TransactionPool; -use revm::db::CacheDB; +use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use revm_primitives::{BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState}; use std::time::Instant; @@ -40,9 +38,9 @@ impl PendingBlockEnv { let Self { cfg, block_env, origin } = self; let parent_hash = origin.build_target_hash(); - let state = State::new(client.history_by_block_hash(parent_hash)?); - let mut db = CacheDB::new(state); - let mut post_state = PostState::default(); + let state_provider = client.history_by_block_hash(parent_hash)?; + let state = StateProviderDatabase::new(&state_provider); + let mut db = State::builder().with_database(Box::new(state)).with_bundle_update().build(); let mut cumulative_gas_used = 0; let block_gas_limit: u64 = block_env.gas_limit.try_into().unwrap_or(u64::MAX); @@ -52,6 +50,8 @@ impl PendingBlockEnv { let mut executed_txs = Vec::new(); let mut best_txs = pool.best_transactions_with_base_fee(base_fee); + let mut receipts = Vec::new(); + while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { @@ -93,34 +93,35 @@ impl PendingBlockEnv { } } }; + // commit changes + db.commit(state); let gas_used = result.gas_used(); - // commit changes - commit_state_changes(&mut db, &mut post_state, block_number, state, true); - // add gas used by the transaction to cumulative gas used, before creating the receipt cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - post_state.add_receipt( - block_number, - Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.logs().into_iter().map(into_reth_log).collect(), - }, - ); + receipts.push(Some(Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().into_iter().map(into_reth_log).collect(), + })); + // append transaction to the list of executed transactions executed_txs.push(tx.into_signed()); } + // merge all transitions into bundle state. + db.merge_transitions(BundleRetention::PlainState); + + let bundle = BundleStateWithReceipts::new(db.take_bundle(), vec![receipts], block_number); - let receipts_root = post_state.receipts_root(block_number); - let logs_bloom = post_state.logs_bloom(block_number); + let receipts_root = bundle.receipts_root_slow(block_number).expect("Block is present"); + let logs_bloom = bundle.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root - let state_root = db.db.state().state_root(post_state)?; + let state_root = state_provider.state_root(bundle)?; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 3aa07d973d36..d28a8a960a0d 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -26,7 +26,7 @@ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; use reth_revm::{ - database::{State, SubState}, + database::{StateProviderDatabase, SubState}, env::{fill_block_env_with_coinbase, tx_env_with_recovered}, tracing::{TracingInspector, TracingInspectorConfig}, }; @@ -43,7 +43,7 @@ use revm::{ use revm_primitives::{utilities::create_address, Env, ResultAndState, SpecId}; /// Helper alias type for the state's [CacheDB] -pub(crate) type StateCacheDB<'r> = CacheDB>>; +pub(crate) type StateCacheDB<'r> = CacheDB>>; /// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace. /// @@ -530,7 +530,7 @@ where .tracing_call_pool .spawn(move || { let state = this.state_at(at)?; - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); let env = prepare_call_env( cfg, @@ -581,7 +581,7 @@ where F: FnOnce(TracingInspector, ResultAndState) -> EthResult, { self.with_state_at_block(at, |state| { - let db = SubState::new(State::new(state)); + let db = SubState::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); let (res, _) = inspect(db, env, &mut inspector)?; @@ -604,7 +604,7 @@ where R: Send + 'static, { self.spawn_with_state_at_block(at, move |state| { - let db = SubState::new(State::new(state)); + let db = SubState::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); let (res, _, db) = inspect_and_return_db(db, env, &mut inspector)?; @@ -662,7 +662,7 @@ where let block_txs = block.body; self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); // replay all transactions prior to the targeted transaction replay_transactions_until(&mut db, cfg.clone(), block_env.clone(), block_txs, tx.hash)?; diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 61424c1b1b25..dfea9821dc68 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -434,7 +434,9 @@ where for block_receipts in receipts { this.on_new_receipts( block_receipts.block_hash, - Ok(Some(block_receipts.receipts)), + Ok(Some( + block_receipts.receipts.into_iter().flatten().collect(), + )), ); } } @@ -460,7 +462,7 @@ enum CacheAction { struct BlockReceipts { block_hash: H256, - receipts: Vec, + receipts: Vec>, } /// Awaits for new chain events and directly inserts them into the cache so they're available @@ -481,7 +483,7 @@ where for block in &blocks { let block_receipts = BlockReceipts { block_hash: block.hash, - receipts: state.receipts(block.number).to_vec(), + receipts: state.receipts_by_block(block.number).to_vec(), }; receipts.push(block_receipts); } diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 6062b00100e6..29b5d3aac35f 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -278,6 +278,9 @@ pub enum RpcInvalidTransactionError { /// The transaction is before Spurious Dragon and has a chain ID #[error("Transactions before Spurious Dragon should not have a chain ID.")] OldLegacyChainId, + /// The transitions is before Berlin and has access list + #[error("Transactions before Berlin should not have access list")] + AccessListNotSupported, } impl RpcInvalidTransactionError { @@ -350,7 +353,7 @@ impl From for RpcInvalidTransactionError { RpcInvalidTransactionError::GasTooHigh } InvalidTransaction::RejectCallerWithCode => RpcInvalidTransactionError::SenderNoEOA, - InvalidTransaction::LackOfFundForGasLimit { .. } => { + InvalidTransaction::LackOfFundForMaxFee { .. } => { RpcInvalidTransactionError::InsufficientFunds } InvalidTransaction::OverflowPaymentInTransaction => { @@ -364,6 +367,9 @@ impl From for RpcInvalidTransactionError { } InvalidTransaction::NonceTooHigh { .. } => RpcInvalidTransactionError::NonceTooHigh, InvalidTransaction::NonceTooLow { .. } => RpcInvalidTransactionError::NonceTooLow, + InvalidTransaction::AccessListNotSupported => { + RpcInvalidTransactionError::AccessListNotSupported + } } } } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index ed67c66c6f7d..260bb61965d4 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -16,7 +16,7 @@ use reth_provider::{ BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; use reth_revm::{ - database::{State, SubState}, + database::{StateProviderDatabase, SubState}, env::tx_env_with_recovered, tracing::{ parity::populate_account_balance_nonce_diffs, TracingInspector, TracingInspectorConfig, @@ -145,7 +145,7 @@ where .eth_api .spawn_with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(calls.len()); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); let mut calls = calls.into_iter().peekable(); @@ -278,8 +278,8 @@ where /// 2. configures the EVM evn /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state - /// _after_ the transaction [State] and the database that points to the state right _before_ the - /// transaction. + /// _after_ the transaction [StateProviderDatabase] and the database that points to the state + /// right _before_ the transaction. async fn trace_block_with( &self, block_id: BlockId, @@ -293,7 +293,7 @@ where TracingInspector, ExecutionResult, &'a revm_primitives::State, - &'a CacheDB>>, + &'a CacheDB>>, ) -> EthResult + Send + 'static, @@ -321,7 +321,7 @@ where .eth_api .spawn_with_state_at_block(state_at.into(), move |state| { let mut results = Vec::with_capacity(transactions.len()); - let mut db = SubState::new(State::new(state)); + let mut db = SubState::new(StateProviderDatabase::new(state)); let mut transactions = transactions.into_iter().enumerate().peekable(); @@ -404,8 +404,10 @@ where RewardAction { author: block.header.beneficiary, reward_type: RewardType::Uncle, - value: block_reward(base_block_reward, block.ommers.len()) - - U256::from(base_block_reward), + value: U256::from( + block_reward(base_block_reward, block.ommers.len()) - + base_block_reward, + ), }, )); } diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index a1770fa76c32..6f8ef3482338 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -23,6 +23,9 @@ reth-codecs = { path = "../storage/codecs" } reth-provider.workspace = true reth-trie = { path = "../trie" } +# revm +revm.workspace = true + # async tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index d15fefacf679..8b617a178a57 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -424,7 +424,7 @@ where .max(1); Ok(ControlFlow::Unwind { target: unwind_to, bad_block: local_head }) } else if let StageError::Validation { block, error } = err { - warn!( + error!( target: "sync::pipeline", stage = %stage_id, bad_block = %block.number, @@ -456,7 +456,7 @@ where error: BlockExecutionError::Validation(error), } = err { - warn!( + error!( target: "sync::pipeline", stage = %stage_id, bad_block = %block.number, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index ef9e1ce49014..e2e57810de09 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -18,10 +18,13 @@ use reth_primitives::{ BlockNumber, Header, PruneModes, U256, }; use reth_provider::{ - post_state::PostState, BlockExecutor, BlockReader, DatabaseProviderRW, ExecutorFactory, - HeaderProvider, LatestStateProviderRef, ProviderError, + BlockReader, DatabaseProviderRW, ExecutorFactory, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, +}; +use std::{ + ops::RangeInclusive, + time::{Duration, Instant}, }; -use std::{ops::RangeInclusive, time::Instant}; use tracing::*; /// The execution stage executes all transactions and @@ -120,18 +123,24 @@ impl ExecutionStage { // Build executor let mut executor = - self.executor_factory.with_sp(LatestStateProviderRef::new(provider.tx_ref())); + self.executor_factory.with_state(LatestStateProviderRef::new(provider.tx_ref())); + executor.set_prune_modes(prune_modes); + executor.set_tip(max_block); // Progress tracking let mut stage_progress = start_block; let mut stage_checkpoint = execution_checkpoint(provider, start_block, max_block, input.checkpoint())?; + let mut fetch_block_duration = Duration::default(); + let mut execution_duration = Duration::default(); + debug!(target: "sync::stages::execution", start = start_block, end = max_block, "Executing range"); // Execute block range - let mut state = PostState::default(); - state.add_prune_modes(prune_modes); + + let mut cumulative_gas = 0; for block_number in start_block..=max_block { + let time = Instant::now(); let td = provider .header_td_by_number(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; @@ -139,17 +148,21 @@ impl ExecutionStage { .block_with_senders(block_number)? .ok_or_else(|| ProviderError::BlockNotFound(block_number.into()))?; + fetch_block_duration += time.elapsed(); + + cumulative_gas += block.gas_used; + // Configure the executor to use the current state. trace!(target: "sync::stages::execution", number = block_number, txs = block.body.len(), "Executing block"); + let time = Instant::now(); // Execute the block let (block, senders) = block.into_components(); - let block_state = executor - .execute_and_verify_receipt(&block, td, Some(senders)) - .map_err(|error| StageError::ExecutionError { - block: block.header.clone().seal_slow(), - error, - })?; + executor.execute_and_verify_receipt(&block, td, Some(senders)).map_err(|error| { + StageError::ExecutionError { block: block.header.clone().seal_slow(), error } + })?; + + execution_duration += time.elapsed(); // Gas metrics if let Some(metrics_tx) = &mut self.metrics_tx { @@ -157,23 +170,32 @@ impl ExecutionStage { metrics_tx.send(MetricEvent::ExecutionStageGas { gas: block.header.gas_used }); } - // Merge state changes - state.extend(block_state); stage_progress = block_number; + stage_checkpoint.progress.processed += block.gas_used; // Check if we should commit now - if self.thresholds.is_end_of_batch(block_number - start_block, state.size_hint() as u64) - { + let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64; + if self.thresholds.is_end_of_batch( + block_number - start_block, + bundle_size_hint, + cumulative_gas, + ) { break } } + let time = Instant::now(); + let state = executor.take_output_state(); + let write_preparation_duration = time.elapsed(); - // Write remaining changes - trace!(target: "sync::stages::execution", accounts = state.accounts().len(), "Writing updated state to database"); - let start = Instant::now(); - state.write_to_db(provider.tx_ref(), max_block)?; - trace!(target: "sync::stages::execution", took = ?start.elapsed(), "Wrote state"); + let time = Instant::now(); + // write output + state.write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes)?; + let db_write_duration = time.elapsed(); + info!(target: "sync::stages::execution", block_fetch=?fetch_block_duration, execution=?execution_duration, + write_preperation=?write_preparation_duration, write=?db_write_duration, " Execution duration."); + + executor.stats().log_info(); let done = stage_progress == max_block; Ok(ExecOutput { @@ -442,26 +464,42 @@ impl Stage for ExecutionStage { /// /// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage /// commits all pending changes to the database. -#[derive(Debug)] +/// +/// A third threshold, `max_changesets`, can be set to periodically write changesets to the +/// current database transaction, which frees up memory. +#[derive(Debug, Clone)] pub struct ExecutionStageThresholds { /// The maximum number of blocks to process before the execution stage commits. pub max_blocks: Option, /// The maximum amount of state changes to keep in memory before the execution stage commits. pub max_changes: Option, + /// The maximum amount of cumultive gas used in the batch. + pub max_cumulative_gas: Option, } impl Default for ExecutionStageThresholds { fn default() -> Self { - Self { max_blocks: Some(500_000), max_changes: Some(5_000_000) } + Self { + max_blocks: Some(500_000), + max_changes: Some(5_000_000), + // 30M block per gas on 50k blocks + max_cumulative_gas: Some(30_000_000 * 50_000), + } } } impl ExecutionStageThresholds { /// Check if the batch thresholds have been hit. #[inline] - pub fn is_end_of_batch(&self, blocks_processed: u64, changes_processed: u64) -> bool { + pub fn is_end_of_batch( + &self, + blocks_processed: u64, + changes_processed: u64, + cumulative_gas_used: u64, + ) -> bool { blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) || - changes_processed >= self.max_changes.unwrap_or(u64::MAX) + changes_processed >= self.max_changes.unwrap_or(u64::MAX) || + cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) } } @@ -485,7 +523,11 @@ mod tests { Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())); ExecutionStage::new( factory, - ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + ExecutionStageThresholds { + max_blocks: Some(100), + max_changes: None, + max_cumulative_gas: None, + }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), ) diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 70ce03c5038b..deb25b3c0618 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -66,6 +66,7 @@ mod tests { use std::sync::Arc; #[tokio::test] + #[ignore] async fn test_prune() { let test_tx = TestTransaction::default(); let factory = Arc::new(ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone())); @@ -129,7 +130,11 @@ mod tests { // configuration let mut execution_stage = ExecutionStage::new( Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), - ExecutionStageThresholds { max_blocks: Some(100), max_changes: None }, + ExecutionStageThresholds { + max_blocks: Some(100), + max_changes: None, + max_cumulative_gas: None, + }, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, prune_modes.clone(), ); diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 43700ab3d171..d0bdacb6ea64 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -120,7 +120,6 @@ impl Stage for TransactionLookupStage { } }); } - let mut tx_list = Vec::with_capacity(transaction_count); // Iterate over channels and append the tx hashes to be sorted out later @@ -147,7 +146,6 @@ impl Stage for TransactionLookupStage { .unwrap_or_default(); // if txhash_cursor.last() is None we will do insert. `zip` would return none if any item is // none. if it is some and if first is smaller than last, we will do append. - for (tx_hash, id) in tx_list { if insert { txhash_cursor.insert(tx_hash, id)?; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 45382391b567..32732f57472f 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -27,16 +27,19 @@ tracing.workspace = true auto_impl = "1.0" itertools.workspace = true pin-project.workspace = true -derive_more = "0.99" parking_lot.workspace = true # test-utils reth-rlp = { workspace = true, optional = true } +# parallel utils +rayon = "1.7" + [dev-dependencies] reth-db = { path = "../db", features = ["test-utils"] } reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } reth-rlp.workspace = true +revm.workspace = true reth-trie = { path = "../../trie", features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } parking_lot.workspace = true diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs new file mode 100644 index 000000000000..5936cb940109 --- /dev/null +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -0,0 +1,1198 @@ +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW}, + tables, + transaction::{DbTx, DbTxMut}, +}; +use reth_interfaces::db::DatabaseError; +use reth_primitives::{ + bloom::logs_bloom, keccak256, proofs::calculate_receipt_root_ref, Account, Address, + BlockNumber, Bloom, Bytecode, Log, Receipt, StorageEntry, H256, U256, +}; +use reth_revm_primitives::{ + db::states::BundleState, into_reth_acc, into_revm_acc, primitives::AccountInfo, +}; +use reth_trie::{ + hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, + StateRoot, StateRootError, +}; +use std::collections::HashMap; + +pub use reth_revm_primitives::db::states::OriginalValuesKnown; + +use crate::{StateChanges, StateReverts}; + +/// Bundle state of post execution changes and reverts +#[derive(Default, Debug, Clone, PartialEq, Eq)] +pub struct BundleStateWithReceipts { + /// Bundle state with reverts. + bundle: BundleState, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + receipts: Vec>>, + /// First block of bundle state. + first_block: BlockNumber, +} + +/// Type used to initialize revms bundle state. +pub type BundleStateInit = + HashMap, Option, HashMap)>; + +/// Types used inside RevertsInit to initialize revms reverts. +pub type AccountRevertInit = (Option>, Vec); + +/// Type used to initialize revms reverts. +pub type RevertsInit = HashMap>; + +impl BundleStateWithReceipts { + /// Create Bundle State. + pub fn new( + bundle: BundleState, + receipts: Vec>>, + first_block: BlockNumber, + ) -> Self { + Self { bundle, receipts, first_block } + } + + /// Create new bundle state with receipts. + pub fn new_init( + state_init: BundleStateInit, + revert_init: RevertsInit, + contracts_init: Vec<(H256, Bytecode)>, + receipts: Vec>>, + first_block: BlockNumber, + ) -> Self { + // sort reverts by block number + let mut reverts = revert_init.into_iter().collect::>(); + reverts.sort_unstable_by_key(|a| a.0); + + // initialize revm bundle + let bundle = BundleState::new( + state_init.into_iter().map(|(address, (original, present, storage))| { + ( + address, + original.map(into_revm_acc), + present.map(into_revm_acc), + storage.into_iter().map(|(k, s)| (k.into(), s)).collect(), + ) + }), + reverts.into_iter().map(|(_, reverts)| { + // does not needs to be sorted, it is done when taking reverts. + reverts.into_iter().map(|(address, (original, storage))| { + ( + address, + original.map(|i| i.map(into_revm_acc)), + storage.into_iter().map(|entry| (entry.key.into(), entry.value)), + ) + }) + }), + contracts_init.into_iter().map(|(code_hash, bytecode)| (code_hash, bytecode.0)), + ); + + Self { bundle, receipts, first_block } + } + + /// Return revm bundle state. + pub fn state(&self) -> &BundleState { + &self.bundle + } + + /// Set first block. + pub fn set_first_block(&mut self, first_block: BlockNumber) { + self.first_block = first_block; + } + + /// Return iterator over all accounts + pub fn accounts_iter(&self) -> impl Iterator)> { + self.bundle.state().iter().map(|(a, acc)| (*a, acc.info.as_ref())) + } + + /// Get account if account is known. + pub fn account(&self, address: &Address) -> Option> { + self.bundle.account(address).map(|a| a.info.clone().map(into_reth_acc)) + } + + /// Get storage if value is known. + /// + /// This means that depending on status we can potentially return U256::ZERO. + pub fn storage(&self, address: &Address, storage_key: U256) -> Option { + self.bundle.account(address).and_then(|a| a.storage_slot(storage_key)) + } + + /// Return bytecode if known. + pub fn bytecode(&self, code_hash: &H256) -> Option { + self.bundle.bytecode(code_hash).map(Bytecode) + } + + /// Hash all changed accounts and storage entries that are currently stored in the post state. + /// + /// # Returns + /// + /// The hashed post state. + pub fn hash_state_slow(&self) -> HashedPostState { + //let mut storages = BTreeMap::default(); + let mut hashed_state = HashedPostState::default(); + + for (address, account) in self.bundle.state() { + let hashed_address = keccak256(address); + if let Some(account) = &account.info { + hashed_state.insert_account(hashed_address, into_reth_acc(account.clone())) + } else { + hashed_state.insert_cleared_account(hashed_address); + } + + // insert storage. + let mut hashed_storage = HashedStorage::new(account.status.was_destroyed()); + + for (key, value) in account.storage.iter() { + let hashed_key = keccak256(H256(key.to_be_bytes())); + if value.present_value == U256::ZERO { + hashed_storage.insert_zero_valued_slot(hashed_key); + } else { + hashed_storage.insert_non_zero_valued_storage(hashed_key, value.present_value); + } + } + hashed_state.insert_hashed_storage(hashed_address, hashed_storage) + } + hashed_state.sorted() + } + + /// Calculate the state root for this [BundleState]. + /// Internally, function calls [Self::hash_state_slow] to obtain the [HashedPostState]. + /// Afterwards, it retrieves the prefixsets from the [HashedPostState] and uses them to + /// calculate the incremental state root. + /// + /// # Example + /// + /// ``` + /// use reth_primitives::{Account, U256}; + /// use reth_provider::BundleStateWithReceipts; + /// use reth_db::{test_utils::create_test_rw_db, database::Database}; + /// use std::collections::HashMap; + /// + /// // Initialize the database + /// let db = create_test_rw_db(); + /// + /// // Initialize the bundle state + /// let bundle = BundleStateWithReceipts::new_init( + /// HashMap::from([( + /// [0x11;20].into(), + /// ( + /// None, + /// Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + /// HashMap::from([]), + /// ), + /// )]), + /// HashMap::from([]), + /// vec![], + /// vec![], + /// 0, + /// ); + /// + /// // Calculate the state root + /// let tx = db.tx().expect("failed to create transaction"); + /// let state_root = bundle.state_root_slow(&tx); + /// ``` + /// + /// # Returns + /// + /// The state root for this [BundleState]. + pub fn state_root_slow<'a, 'tx, TX: DbTx<'tx>>( + &self, + tx: &'a TX, + ) -> Result { + let hashed_post_state = self.hash_state_slow(); + let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); + let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_post_state); + StateRoot::new(tx) + .with_hashed_cursor_factory(&hashed_cursor_factory) + .with_changed_account_prefixes(account_prefix_set) + .with_changed_storage_prefixes(storage_prefix_set) + .root() + } + + /// Transform block number to the index of block. + fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + if self.first_block > block_number { + return None + } + let index = block_number - self.first_block; + if index >= self.receipts.len() as u64 { + return None + } + Some(index as usize) + } + + /// Returns an iterator over all block logs. + pub fn logs(&self, block_number: BlockNumber) -> Option> { + let index = self.block_number_to_index(block_number)?; + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) + } + + /// Return blocks logs bloom + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + Some(logs_bloom(self.logs(block_number)?)) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + pub fn receipts_root_slow(&self, block_number: BlockNumber) -> Option { + let index = self.block_number_to_index(block_number)?; + let block_receipts = + self.receipts[index].iter().map(Option::as_ref).collect::>>()?; + Some(calculate_receipt_root_ref(&block_receipts)) + } + + /// Return reference to receipts. + pub fn receipts(&self) -> &Vec>> { + &self.receipts + } + + /// Return all block receipts + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + let Some(index) = self.block_number_to_index(block_number) else { return &[] }; + &self.receipts[index] + } + + /// Is bundle state empty of blocks. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Number of blocks in bundle state. + pub fn len(&self) -> usize { + self.receipts.len() + } + + /// Return first block of the bundle + pub fn first_block(&self) -> BlockNumber { + self.first_block + } + + /// Return last block of the bundle. + pub fn last_block(&self) -> BlockNumber { + self.first_block + self.len() as BlockNumber + } + + /// Revert to given block number. + /// + /// If number is in future, or in the past return false + /// + /// Note: Given Block number will stay inside the bundle state. + pub fn revert_to(&mut self, block_number: BlockNumber) -> bool { + let Some(index) = self.block_number_to_index(block_number) else { return false }; + + // +1 is for number of blocks that we have as index is included. + let new_len = index + 1; + let rm_trx: usize = self.len() - new_len; + + // remove receipts + self.receipts.truncate(new_len); + // Revert last n reverts. + self.bundle.revert(rm_trx); + + true + } + + /// This will detach lower part of the chain and return it back. + /// Specified block number will be included in detachment + /// + /// This plain state will contains some additional information that + /// are is a artifacts of the lower part state. + /// + /// If block number is in future, return None. + pub fn split_at(&mut self, block_number: BlockNumber) -> Option { + let last_block = self.last_block(); + let first_block = self.first_block; + if block_number >= last_block { + return None + } + if block_number < first_block { + return Some(Self::default()) + } + + // detached number should be included so we are adding +1 to it. + // for example if block number is same as first_block then + // number of detached block shoud be 1. + let num_of_detached_block = (block_number - first_block) + 1; + + let mut detached_bundle_state: BundleStateWithReceipts = self.clone(); + detached_bundle_state.revert_to(block_number); + + // split is done as [0, num) and [num, len] + let (_, this) = self.receipts.split_at(num_of_detached_block as usize); + + self.receipts = this.to_vec().clone(); + self.bundle.take_n_reverts(num_of_detached_block as usize); + + self.first_block = block_number + 1; + + Some(detached_bundle_state) + } + + /// Extend one state from another + /// + /// For state this is very sensitive opperation and should be used only when + /// we know that other state was build on top of this one. + /// In most cases this would be true. + pub fn extend(&mut self, other: Self) { + self.bundle.extend(other.bundle); + self.receipts.extend(other.receipts); + } + + /// Write bundle state to database. + /// + /// `omit_changed_check` should be set to true of bundle has some of it data + /// detached, This would make some original values not known. + pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( + self, + tx: &TX, + is_value_known: OriginalValuesKnown, + ) -> Result<(), DatabaseError> { + let (plain_state, reverts) = self.bundle.into_plain_state_and_reverts(is_value_known); + + StateReverts(reverts).write_to_db(tx, self.first_block)?; + + // write receipts + let mut bodies_cursor = tx.cursor_read::()?; + let mut receipts_cursor = tx.cursor_write::()?; + + for (idx, receipts) in self.receipts.into_iter().enumerate() { + if !receipts.is_empty() { + let (_, body_indices) = bodies_cursor + .seek_exact(self.first_block + idx as u64)? + .expect("body indices exist"); + + let first_tx_index = body_indices.first_tx_num(); + for (tx_idx, receipt) in receipts.into_iter().enumerate() { + if let Some(receipt) = receipt { + receipts_cursor.append(first_tx_index + tx_idx as u64, receipt)?; + } + } + } + } + + StateChanges(plain_state).write_to_db(tx)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::{StateChanges, StateReverts}; + use crate::{AccountReader, BundleStateWithReceipts, ProviderFactory}; + use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + models::{AccountBeforeTx, BlockNumberAddress}, + tables, + test_utils::create_test_rw_db, + transaction::DbTx, + DatabaseEnv, + }; + use reth_primitives::{Address, Receipt, StorageEntry, H256, MAINNET, U256}; + use reth_revm_primitives::{into_reth_acc, primitives::HashMap}; + use revm::{ + db::{ + states::{ + bundle_state::{BundleRetention, OriginalValuesKnown}, + changes::PlainStorageRevert, + PlainStorageChangeset, + }, + BundleState, + }, + primitives::{Account, AccountInfo as RevmAccountInfo, AccountStatus, StorageSlot}, + CacheState, DatabaseCommit, State, + }; + use std::sync::Arc; + + #[test] + fn write_to_db_account_info() { + let db: Arc = create_test_rw_db(); + let factory = ProviderFactory::new(db, MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::zero(); + let address_b = Address::repeat_byte(0xff); + + let account_a = RevmAccountInfo { balance: U256::from(1), nonce: 1, ..Default::default() }; + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + let account_b_changed = + RevmAccountInfo { balance: U256::from(3), nonce: 3, ..Default::default() }; + + let mut cache_state = CacheState::new(true); + cache_state.insert_not_existing(address_a); + cache_state.insert_account(address_b, account_b.clone()); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + // 0x00.. is created + state.commit(HashMap::from([( + address_a, + Account { + info: account_a.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + storage: HashMap::default(), + }, + )])); + + // 0xff.. is changed (balance + 1, nonce + 1) + state.commit(HashMap::from([( + address_b, + Account { + info: account_b_changed.clone(), + status: AccountStatus::Touched, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + assert!(plain_state.storage.is_empty()); + assert!(plain_state.contracts.is_empty()); + StateChanges(plain_state) + .write_to_db(provider.tx_ref()) + .expect("Could not write plain state to DB"); + + assert_eq!(reverts.storage, [[]]); + StateReverts(reverts) + .write_to_db(provider.tx_ref(), 1) + .expect("Could not write reverts to DB"); + + let reth_account_a = into_reth_acc(account_a); + let reth_account_b = into_reth_acc(account_b); + let reth_account_b_changed = into_reth_acc(account_b_changed.clone()); + + // Check plain state + assert_eq!( + provider.basic_account(address_a).expect("Could not read account state"), + Some(reth_account_a), + "Account A state is wrong" + ); + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + Some(reth_account_b_changed), + "Account B state is wrong" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(1).expect("Could not read account change set"), + Some((1, AccountBeforeTx { address: address_a, info: None })), + "Account A changeset is wrong" + ); + assert_eq!( + changeset_cursor.next_dup().expect("Changeset table is malformed"), + Some((1, AccountBeforeTx { address: address_b, info: Some(reth_account_b) })), + "Account B changeset is wrong" + ); + + let mut cache_state = CacheState::new(true); + cache_state.insert_account(address_b, account_b_changed.clone()); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + // 0xff.. is destroyed + state.commit(HashMap::from([( + address_b, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_b_changed, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + // Account B selfdestructed so flag for it should be present. + assert_eq!( + plain_state.storage, + [PlainStorageChangeset { address: address_b, wipe_storage: true, storage: vec![] }] + ); + assert!(plain_state.contracts.is_empty()); + StateChanges(plain_state) + .write_to_db(provider.tx_ref()) + .expect("Could not write plain state to DB"); + + assert_eq!( + reverts.storage, + [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] + ); + StateReverts(reverts) + .write_to_db(provider.tx_ref(), 2) + .expect("Could not write reverts to DB"); + + // Check new plain state for account B + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + None, + "Account B should be deleted" + ); + + // Check change set + assert_eq!( + changeset_cursor.seek_exact(2).expect("Could not read account change set"), + Some((2, AccountBeforeTx { address: address_b, info: Some(reth_account_b_changed) })), + "Account B changeset is wrong after deletion" + ); + } + + #[test] + fn write_to_db_storage() { + let db: Arc = create_test_rw_db(); + let factory = ProviderFactory::new(db, MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::zero(); + let address_b = Address::repeat_byte(0xff); + + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + + let mut cache_state = CacheState::new(true); + cache_state.insert_not_existing(address_a); + cache_state.insert_account_with_storage( + address_b, + account_b.clone(), + HashMap::from([(U256::from(1), U256::from(1))]), + ); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + state.commit(HashMap::from([ + ( + address_a, + Account { + status: AccountStatus::Touched | AccountStatus::Created, + info: RevmAccountInfo::default(), + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::from(0), + StorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + StorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + ), + ( + address_b, + Account { + status: AccountStatus::Touched, + info: account_b, + // 0x01 => 1 => 2 + storage: HashMap::from([( + U256::from(1), + StorageSlot { + present_value: U256::from(2), + previous_or_original_value: U256::from(1), + }, + )]), + }, + ), + ])); + + state.merge_transitions(BundleRetention::Reverts); + + BundleStateWithReceipts::new(state.take_bundle(), Vec::new(), 1) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + // Check plain storage state + let mut storage_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + Some((address_a, StorageEntry { key: H256::zero(), value: U256::from(1) })), + "Slot 0 for account A should be 1" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + Some(( + address_a, + StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account A should only have 2 storage slots" + ); + + assert_eq!( + storage_cursor.seek_exact(address_b).unwrap(), + Some(( + address_b, + StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account B should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account B should only have 1 storage slot" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open storage changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: H256::zero(), value: U256::from(0) } + )), + "Slot 0 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } + )), + "Slot 1 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), + Some(( + BlockNumberAddress((1, address_b)), + StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } + )), + "Slot 1 for account B should have changed from 1" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account B should only be in the changeset 1 time" + ); + + // Delete account A + let mut cache_state = CacheState::new(true); + cache_state.insert_account(address_a, RevmAccountInfo::default()); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + state.commit(HashMap::from([( + address_a, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: RevmAccountInfo::default(), + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + BundleStateWithReceipts::new(state.take_bundle(), Vec::new(), 2) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + None, + "Account A should have no storage slots after deletion" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: H256::zero(), value: U256::from(1) } + )), + "Slot 0 for account A should have changed from 1 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should have changed from 2 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times on deletion" + ); + } + + #[test] + fn write_to_db_multiple_selfdestructs() { + let db: Arc = create_test_rw_db(); + let factory = ProviderFactory::new(db, MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut cache_state = CacheState::new(true); + cache_state.insert_not_existing(address1); + let mut init_state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + init_state.commit(HashMap::from([( + address1, + Account { + info: account_info.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + StorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + StorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + BundleStateWithReceipts::new(init_state.take_bundle(), Vec::new(), 0) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write init bundle state to DB"); + + let mut cache_state = CacheState::new(true); + cache_state.insert_account_with_storage( + address1, + account_info.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + // Block #1: change storage. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 1 => 2 + storage: HashMap::from([( + U256::ZERO, + StorageSlot { + previous_or_original_value: U256::from(1), + present_value: U256::from(2), + }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #2: destroy account. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #3: re-create account and change storage. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #4: change storage. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + // 0x02 => 0 => 4 + // 0x06 => 0 => 6 + storage: HashMap::from([ + ( + U256::ZERO, + StorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ( + U256::from(2), + StorageSlot { present_value: U256::from(4), ..Default::default() }, + ), + ( + U256::from(6), + StorageSlot { present_value: U256::from(6), ..Default::default() }, + ), + ]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #5: Destroy account again. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #6: Create, change, destroy and re-create in the same block. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + storage: HashMap::from([( + U256::ZERO, + StorageSlot { present_value: U256::from(2), ..Default::default() }, + )]), + }, + )])); + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #7: Change storage. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 9 + storage: HashMap::from([( + U256::ZERO, + StorageSlot { present_value: U256::from(9), ..Default::default() }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + let bundle = state.take_bundle(); + + BundleStateWithReceipts::new(bundle, Vec::new(), 1) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); + + // Iterate through all storage changes + + // Block + // : + // ... + + // Block #0 + // 0x00: 0 + // 0x01: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: H256::from_low_u64_be(1), value: U256::ZERO } + ))) + ); + + // Block #1 + // 0x00: 1 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(1) } + ))) + ); + + // Block #2 (destroyed) + // 0x00: 2 + // 0x01: 2 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: H256::from_low_u64_be(1), value: U256::from(2) } + ))) + ); + + // Block #3 + // no storage changes + + // Block #4 + // 0x00: 0 + // 0x02: 0 + // 0x06: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: H256::from_low_u64_be(2), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: H256::from_low_u64_be(6), value: U256::ZERO } + ))) + ); + + // Block #5 (destroyed) + // 0x00: 2 + // 0x02: 4 + // 0x06: 6 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: H256::from_low_u64_be(2), value: U256::from(4) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: H256::from_low_u64_be(6), value: U256::from(6) } + ))) + ); + + // Block #6 + // no storage changes (only inter block changes) + + // Block #7 + // 0x00: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((7, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn storage_change_after_selfdestruct_within_block() { + let db: Arc = create_test_rw_db(); + let factory = ProviderFactory::new(db, MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut cache_state = CacheState::new(true); + cache_state.insert_not_existing(address1); + let mut init_state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + init_state.commit(HashMap::from([( + address1, + Account { + info: account1.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + StorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + StorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + BundleStateWithReceipts::new(init_state.take_bundle(), Vec::new(), 0) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write init bundle state to DB"); + + let mut cache_state = CacheState::new(true); + cache_state.insert_account_with_storage( + address1, + account1.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + let mut state = + State::builder().with_cached_prestate(cache_state).with_bundle_update().build(); + + // Block #1: Destroy, re-create, change storage. + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + Account { + status: AccountStatus::Touched, + info: account1.clone(), + // 0x01 => 0 => 5 + storage: HashMap::from([( + U256::from(1), + StorageSlot { present_value: U256::from(5), ..Default::default() }, + )]), + }, + )])); + + // Commit block #1 changes to the database. + state.merge_transitions(BundleRetention::Reverts); + BundleStateWithReceipts::new(state.take_bundle(), Vec::new(), 1) + .write_to_db(provider.tx_ref(), OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let range = BlockNumberAddress::range(1..=1); + let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); + + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(1) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: H256::from_low_u64_be(1), value: U256::from(2) } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn revert_to_indices() { + let base = BundleStateWithReceipts { + bundle: BundleState::default(), + receipts: vec![vec![Some(Receipt::default()); 2]; 7], + first_block: 10, + }; + + let mut this = base.clone(); + assert!(this.revert_to(10)); + assert_eq!(this.receipts.len(), 1); + + let mut this = base.clone(); + assert!(!this.revert_to(9)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base.clone(); + assert!(this.revert_to(15)); + assert_eq!(this.receipts.len(), 6); + + let mut this = base.clone(); + assert!(this.revert_to(16)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base.clone(); + assert!(!this.revert_to(17)); + assert_eq!(this.receipts.len(), 7); + } +} diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs new file mode 100644 index 000000000000..88b17ad563f0 --- /dev/null +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -0,0 +1,11 @@ +//! Bundle state module. +//! This module contains all the logic related to bundle state. +mod bundle_state_with_receipts; +mod state_changes; +mod state_reverts; + +pub use bundle_state_with_receipts::{ + AccountRevertInit, BundleStateInit, BundleStateWithReceipts, OriginalValuesKnown, RevertsInit, +}; +pub use state_changes::StateChanges; +pub use state_reverts::StateReverts; diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs new file mode 100644 index 000000000000..fddfbdaac1a1 --- /dev/null +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -0,0 +1,88 @@ +use rayon::slice::ParallelSliceMut; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + tables, + transaction::{DbTx, DbTxMut}, +}; +use reth_interfaces::db::DatabaseError; +use reth_primitives::{Bytecode, StorageEntry, U256}; +use reth_revm_primitives::{ + db::states::{PlainStorageChangeset, StateChangeset}, + into_reth_acc, +}; + +/// A change to the state of the world. +#[derive(Default)] +pub struct StateChanges(pub StateChangeset); + +impl From for StateChanges { + fn from(revm: StateChangeset) -> Self { + Self(revm) + } +} + +impl StateChanges { + /// Write the post state to the database. + pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( + mut self, + tx: &TX, + ) -> Result<(), DatabaseError> { + // sort all entries so they can be written to database in more performant way. + // and take smaller memory footprint. + self.0.accounts.par_sort_by_key(|a| a.0); + self.0.storage.par_sort_by_key(|a| a.address); + self.0.contracts.par_sort_by_key(|a| a.0); + + // Write new account state + tracing::trace!(target: "provider::post_state", len = self.0.accounts.len(), "Writing new account state"); + let mut accounts_cursor = tx.cursor_write::()?; + // write account to database. + for (address, account) in self.0.accounts.into_iter() { + if let Some(account) = account { + tracing::trace!(target: "provider::post_state", ?address, "Updating plain state account"); + accounts_cursor.upsert(address, into_reth_acc(account))?; + } else if accounts_cursor.seek_exact(address)?.is_some() { + tracing::trace!(target: "provider::post_state", ?address, "Deleting plain state account"); + accounts_cursor.delete_current()?; + } + } + + // Write bytecode + tracing::trace!(target: "provider::post_state", len = self.0.contracts.len(), "Writing bytecodes"); + let mut bytecodes_cursor = tx.cursor_write::()?; + for (hash, bytecode) in self.0.contracts.into_iter() { + bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; + } + + // Write new storage state and wipe storage if needed. + tracing::trace!(target: "provider::post_state", len = self.0.storage.len(), "Writing new storage state"); + let mut storages_cursor = tx.cursor_dup_write::()?; + for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage.into_iter() { + // Wiping of storage. + if wipe_storage && storages_cursor.seek_exact(address)?.is_some() { + storages_cursor.delete_current_duplicates()?; + } + // cast storages to H256. + let mut storage = storage + .into_iter() + .map(|(k, value)| StorageEntry { key: k.into(), value }) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.key); + + for entry in storage.into_iter() { + tracing::trace!(target: "provider::post_state", ?address, ?entry.key, "Updating plain state storage"); + if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { + if db_entry.key == entry.key { + storages_cursor.delete_current()?; + } + } + + if entry.value != U256::ZERO { + storages_cursor.upsert(address, entry)?; + } + } + } + Ok(()) + } +} diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs new file mode 100644 index 000000000000..e3a178ca0eec --- /dev/null +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -0,0 +1,167 @@ +use rayon::slice::ParallelSliceMut; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, + models::{AccountBeforeTx, BlockNumberAddress}, + tables, + transaction::{DbTx, DbTxMut}, +}; +use reth_interfaces::db::DatabaseError; +use reth_primitives::{BlockNumber, StorageEntry, H256, U256}; +use reth_revm_primitives::{ + db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}, + into_reth_acc, +}; +use std::iter::Peekable; + +/// Revert of the state. +#[derive(Default)] +pub struct StateReverts(pub PlainStateReverts); + +impl From for StateReverts { + fn from(revm: PlainStateReverts) -> Self { + Self(revm) + } +} + +impl StateReverts { + /// Write reverts to database. + /// + /// Note:: Reverts will delete all wiped storage from plain state. + pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( + self, + tx: &TX, + first_block: BlockNumber, + ) -> Result<(), DatabaseError> { + // Write storage changes + tracing::trace!(target: "provider::reverts", "Writing storage changes"); + let mut storages_cursor = tx.cursor_dup_write::()?; + let mut storage_changeset_cursor = tx.cursor_dup_write::()?; + for (block_index, mut storage_changes) in self.0.storage.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + + tracing::trace!(target: "provider::reverts", block_number, "Writing block change"); + // sort changes by address. + storage_changes.par_sort_unstable_by_key(|a| a.address); + for PlainStorageRevert { address, wiped, storage_revert } in storage_changes.into_iter() + { + let storage_id = BlockNumberAddress((block_number, address)); + + let mut storage = storage_revert + .into_iter() + .map(|(k, v)| (H256(k.to_be_bytes()), v)) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.0); + + // If we are writing the primary storage wipe transition, the pre-existing plain + // storage state has to be taken from the database and written to storage history. + // See [StorageWipe::Primary] for more details. + let mut wiped_storage = Vec::new(); + if wiped { + tracing::trace!(target: "provider::reverts", ?address, "Wiping storage"); + if let Some((_, entry)) = storages_cursor.seek_exact(address)? { + wiped_storage.push((entry.key, entry.value)); + while let Some(entry) = storages_cursor.next_dup_val()? { + wiped_storage.push((entry.key, entry.value)) + } + } + } + + tracing::trace!(target: "provider::reverts", ?address, ?storage, "Writing storage reverts"); + for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { + storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; + } + } + } + + // Write account changes + tracing::trace!(target: "provider::reverts", "Writing account changes"); + let mut account_changeset_cursor = tx.cursor_dup_write::()?; + for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + // Sort accounts by address. + account_block_reverts.par_sort_by_key(|a| a.0); + for (address, info) in account_block_reverts { + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(into_reth_acc) }, + )?; + } + } + + Ok(()) + } +} + +/// Iterator over storage reverts. +/// See [StorageRevertsIter::next] for more details. +struct StorageRevertsIter { + reverts: Peekable, + wiped: Peekable, +} + +impl StorageRevertsIter +where + R: Iterator, + W: Iterator, +{ + fn new( + reverts: impl IntoIterator, + wiped: impl IntoIterator, + ) -> Self { + Self { reverts: reverts.into_iter().peekable(), wiped: wiped.into_iter().peekable() } + } + + /// Consume next revert and return it. + fn next_revert(&mut self) -> Option<(H256, U256)> { + self.reverts.next().map(|(key, revert)| (key, revert.to_previous_value())) + } + + /// Consume next wiped storage and return it. + fn next_wiped(&mut self) -> Option<(H256, U256)> { + self.wiped.next() + } +} + +impl Iterator for StorageRevertsIter +where + R: Iterator, + W: Iterator, +{ + type Item = (H256, U256); + + /// Iterate over storage reverts and wiped entries and return items in the sorted order. + /// NOTE: The implementation assumes that inner iterators are already sorted. + fn next(&mut self) -> Option { + match (self.reverts.peek(), self.wiped.peek()) { + (Some(revert), Some(wiped)) => { + // Compare the keys and return the lesser. + use std::cmp::Ordering; + match revert.0.cmp(&wiped.0) { + Ordering::Less => self.next_revert(), + Ordering::Greater => self.next_wiped(), + Ordering::Equal => { + // Keys are the same, decide which one to return. + let (key, revert_to) = *revert; + + let value = match revert_to { + // If the slot is some, prefer the revert value. + RevertToSlot::Some(value) => value, + // If the slot was destroyed, prefer the database value. + RevertToSlot::Destroyed => wiped.1, + }; + + // Consume both values from inner iterators. + self.next_revert(); + self.next_wiped(); + + Some((key, value)) + } + } + } + (Some(_revert), None) => self.next_revert(), + (None, Some(_wiped)) => self.next_wiped(), + (None, None) => None, + } + } +} diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 1633d360f065..fc81b3b496d9 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -1,6 +1,6 @@ //! Contains [Chain], a chain of blocks and their final state. -use crate::PostState; +use crate::bundle_state::BundleStateWithReceipts; use reth_interfaces::{executor::BlockExecutionError, Error}; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, SealedBlockWithSenders, @@ -20,7 +20,7 @@ pub struct Chain { /// [Chain::first] to [Chain::tip], inclusive. /// /// This state also contains the individual changes that lead to the current state. - pub state: PostState, + pub state: BundleStateWithReceipts, /// All blocks in this chain. pub blocks: BTreeMap, } @@ -42,7 +42,7 @@ impl Chain { } /// Get post state of this chain - pub fn state(&self) -> &PostState { + pub fn state(&self) -> &BundleStateWithReceipts { &self.state } @@ -64,7 +64,7 @@ impl Chain { } /// Return post state of the block at the `block_number` or None if block is not known - pub fn state_at_block(&self, block_number: BlockNumber) -> Option { + pub fn state_at_block(&self, block_number: BlockNumber) -> Option { if self.tip().number == block_number { return Some(self.state.clone()) } @@ -79,13 +79,13 @@ impl Chain { /// Destructure the chain into its inner components, the blocks and the state at the tip of the /// chain. - pub fn into_inner(self) -> (ChainBlocks<'static>, PostState) { + pub fn into_inner(self) -> (ChainBlocks<'static>, BundleStateWithReceipts) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.state) } /// Destructure the chain into its inner components, the blocks and the state at the tip of the /// chain. - pub fn inner(&self) -> (ChainBlocks<'_>, &PostState) { + pub fn inner(&self) -> (ChainBlocks<'_>, &BundleStateWithReceipts) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.state) } @@ -125,15 +125,8 @@ impl Chain { } /// Create new chain with given blocks and post state. - pub fn new(blocks: Vec<(SealedBlockWithSenders, PostState)>) -> Self { - let mut state = PostState::default(); - let mut block_num_hash = BTreeMap::new(); - for (block, block_state) in blocks.into_iter() { - state.extend(block_state); - block_num_hash.insert(block.number, block); - } - - Self { state, blocks: block_num_hash } + pub fn new(blocks: Vec, state: BundleStateWithReceipts) -> Self { + Self { state, blocks: blocks.into_iter().map(|b| (b.number, b)).collect() } } /// Returns length of the chain. @@ -142,9 +135,9 @@ impl Chain { } /// Get all receipts for the given block. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<&[Receipt]> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; - Some(self.state.receipts(num)) + self.state.receipts_by_block(num).iter().map(Option::as_ref).collect() } /// Get all receipts with attachment. @@ -152,13 +145,14 @@ impl Chain { /// Attachment includes block number, block hash, transaction hash and transaction index. pub fn receipts_with_attachment(&self) -> Vec { let mut receipt_attch = Vec::new(); - for (block_num, block) in self.blocks().iter() { - let mut receipts = self.state.receipts(*block_num).iter(); + for ((block_num, block), receipts) in self.blocks().iter().zip(self.state.receipts().iter()) + { let mut tx_receipts = Vec::new(); - for tx in block.body.iter() { - if let Some(receipt) = receipts.next() { - tx_receipts.push((tx.hash(), receipt.clone())); - } + for (tx, receipt) in block.body.iter().zip(receipts.iter()) { + tx_receipts.push(( + tx.hash(), + receipt.as_ref().expect("receipts have not been pruned").clone(), + )); } let block_num_hash = BlockNumHash::new(*block_num, block.hash()); receipt_attch.push(BlockReceipts { block: block_num_hash, tx_receipts }); @@ -188,7 +182,7 @@ impl Chain { /// Split this chain at the given block. /// - /// The given block will be the first block in the first returned chain. + /// The given block will be the last block in the first returned chain. /// /// If the given block is not found, [`ChainSplit::NoSplitPending`] is returned. /// Split chain at the number or hash, block with given number will be included at first chain. @@ -196,7 +190,7 @@ impl Chain { /// /// # Note /// - /// The block number to transition ID mapping is only found in the second chain, making it + /// The plain state is only found in the second chain, making it /// impossible to perform any state reverts on the first chain. /// /// The second chain only contains the changes that were reverted on the first chain; however, @@ -229,13 +223,13 @@ impl Chain { let higher_number_blocks = self.blocks.split_off(&(block_number + 1)); - let mut canonical_state = std::mem::take(&mut self.state); - let new_state = canonical_state.split_at(block_number); - self.state = new_state; + let mut state = std::mem::take(&mut self.state); + let canonical_state = + state.split_at(block_number).expect("Detach block number to be in range"); ChainSplit::Split { canonical: Chain { state: canonical_state, blocks: self.blocks }, - pending: Chain { state: self.state, blocks: higher_number_blocks }, + pending: Chain { state, blocks: higher_number_blocks }, } } } @@ -365,7 +359,11 @@ pub enum ChainSplit { #[cfg(test)] mod tests { use super::*; - use reth_primitives::{Account, H160, H256}; + use reth_primitives::{H160, H256}; + use reth_revm_primitives::{ + db::BundleState, + primitives::{AccountInfo, HashMap}, + }; #[test] fn chain_append() { @@ -401,15 +399,25 @@ mod tests { #[test] fn test_number_split() { - let mut base_state = PostState::default(); - let account = Account { nonce: 10, ..Default::default() }; - base_state.create_account(1, H160([1; 20]), account); - - let mut block_state1 = PostState::default(); - block_state1.create_account(2, H160([2; 20]), Account::default()); + let block_state1 = BundleStateWithReceipts::new( + BundleState::new( + vec![(H160([2; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(H160([2; 20]), None, vec![])]], + vec![], + ), + vec![vec![]], + 1, + ); - let mut block_state2 = PostState::default(); - block_state2.create_account(3, H160([3; 20]), Account::default()); + let block_state2 = BundleStateWithReceipts::new( + BundleState::new( + vec![(H160([3; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(H160([3; 20]), None, vec![])]], + vec![], + ), + vec![vec![]], + 2, + ); let mut block1 = SealedBlockWithSenders::default(); let block1_hash = H256([15; 32]); @@ -423,13 +431,13 @@ mod tests { block2.hash = block2_hash; block2.senders.push(H160([4; 20])); - let chain = Chain::new(vec![ - (block1.clone(), block_state1.clone()), - (block2.clone(), block_state2.clone()), - ]); + let mut block_state_extended = block_state1.clone(); + block_state_extended.extend(block_state2.clone()); + + let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended); - let mut split1_state = chain.state.clone(); - let split2_state = split1_state.split_at(1); + let mut split2_state = chain.state.clone(); + let split1_state = split2_state.split_at(1).unwrap(); let chain_split1 = Chain { state: split1_state, blocks: BTreeMap::from([(1, block1.clone())]) }; diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 4e7048cd4621..310f663d18ce 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,12 +21,12 @@ /// Various provider traits. mod traits; pub use traits::{ - AccountExtReader, AccountReader, BlockExecutionWriter, BlockExecutor, BlockHashReader, - BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, BlockWriter, - BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, ExecutorFactory, HashingWriter, - HeaderProvider, HistoryWriter, PostStateDataProvider, PruneCheckpointReader, + AccountExtReader, AccountReader, BlockExecutionWriter, BlockExecutor, BlockExecutorStats, + BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, + BlockWriter, BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonChainTracker, + CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, + CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, ExecutorFactory, + HashingWriter, HeaderProvider, HistoryWriter, PrunableBlockExecutor, PruneCheckpointReader, PruneCheckpointWriter, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StageCheckpointWriter, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageReader, TransactionsProvider, WithdrawalsProvider, @@ -39,10 +39,6 @@ pub use providers::{ HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, }; -/// Execution result -pub mod post_state; -pub use post_state::PostState; - #[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking the Provider. pub mod test_utils; @@ -52,3 +48,6 @@ pub use reth_interfaces::provider::ProviderError; pub mod chain; pub use chain::{Chain, DisplayBlocksChain}; + +pub mod bundle_state; +pub use bundle_state::{BundleStateWithReceipts, OriginalValuesKnown, StateChanges, StateReverts}; diff --git a/crates/storage/provider/src/post_state/account.rs b/crates/storage/provider/src/post_state/account.rs deleted file mode 100644 index 8451dc75ab01..000000000000 --- a/crates/storage/provider/src/post_state/account.rs +++ /dev/null @@ -1,89 +0,0 @@ -use derive_more::Deref; -use reth_primitives::{Account, Address, BlockNumber}; -use std::collections::{btree_map::Entry, BTreeMap}; - -/// A mapping of `block -> address -> account` that represents what accounts were changed, and what -/// their state were prior to that change. -/// -/// If the prior state was `None`, then the account is new. -#[derive(Default, Clone, Eq, PartialEq, Debug, Deref)] -pub struct AccountChanges { - /// The inner mapping of block changes. - #[deref] - pub inner: BTreeMap>>, - /// Hand tracked change size. - pub size: usize, -} - -impl AccountChanges { - /// Insert account change at specified block number. The value is **not** updated if it already - /// exists. - pub fn insert( - &mut self, - block: BlockNumber, - address: Address, - old: Option, - new: Option, - ) { - match self.inner.entry(block).or_default().entry(address) { - Entry::Vacant(entry) => { - self.size += 1; - entry.insert(old); - } - Entry::Occupied(entry) => { - // If the account state is the same before and after this block, collapse the state - // changes. - if entry.get() == &new { - entry.remove(); - self.size -= 1; - } - } - } - } - - /// Insert account changes at specified block number. The values are **not** updated if they - /// already exist. - pub fn insert_for_block( - &mut self, - block: BlockNumber, - changes: BTreeMap>, - ) { - let block_entry = self.inner.entry(block).or_default(); - for (address, account) in changes { - if let Entry::Vacant(entry) = block_entry.entry(address) { - entry.insert(account); - self.size += 1; - } - } - } - - /// Drain and return any entries above the target block number. - pub fn drain_above( - &mut self, - target_block: BlockNumber, - ) -> BTreeMap>> { - let mut evicted = BTreeMap::new(); - self.inner.retain(|block_number, accounts| { - if *block_number > target_block { - self.size -= accounts.len(); - evicted.insert(*block_number, accounts.clone()); - false - } else { - true - } - }); - evicted - } - - /// Retain entries only above specified block number. - pub fn retain_above(&mut self, target_block: BlockNumber) { - self.inner.retain(|block_number, accounts| { - if *block_number > target_block { - true - } else { - self.size -= accounts.len(); - false - } - }); - } -} diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs deleted file mode 100644 index 069ad602a401..000000000000 --- a/crates/storage/provider/src/post_state/mod.rs +++ /dev/null @@ -1,2076 +0,0 @@ -//! Output of execution. -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - models::{AccountBeforeTx, BlockNumberAddress}, - tables, - transaction::{DbTx, DbTxMut}, - DatabaseError as DbError, -}; -use reth_interfaces::Error; -use reth_primitives::{ - bloom::logs_bloom, keccak256, proofs::calculate_receipt_root_ref, Account, Address, - BlockNumber, Bloom, Bytecode, Log, PruneMode, PruneModes, Receipt, StorageEntry, H256, - MINIMUM_PRUNING_DISTANCE, U256, -}; -use reth_trie::{ - hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, - StateRoot, StateRootError, -}; -use std::collections::{BTreeMap, BTreeSet}; - -mod account; -pub use account::AccountChanges; - -mod storage; -pub use storage::{Storage, StorageChanges, StorageChangeset, StorageTransition, StorageWipe}; - -// todo: rewrite all the docs for this -/// The state of accounts after execution of one or more transactions, including receipts and new -/// bytecode. -/// -/// The latest state can be found in `accounts`, `storage`, and `bytecode`. The receipts for the -/// transactions that lead to these changes can be found in `receipts`, and each change leading to -/// this state can be found in `changes`. -/// -/// # Wiped Storage -/// -/// The [Storage] type has a field, `wiped` which denotes whether the pre-existing storage in the -/// database should be cleared or not. -/// -/// If `wiped` is true, then the account was selfdestructed at some point, and the values contained -/// in `storage` should be the only values written to the database. -/// -/// # Transitions -/// -/// The block level transition includes: -/// -/// - Block rewards -/// - Ommer rewards -/// - Withdrawals -/// - The irregular state change for the DAO hardfork -/// -/// For multi-block [PostState]s it is not possible to figure out what transition ID maps on to a -/// transaction or a block. -/// -/// # Shaving Allocations -/// -/// Since most [PostState]s in reth are for multiple blocks it is better to pre-allocate capacity -/// for receipts and changes, which [PostState::new] does, and thus it (or -/// [PostState::with_tx_capacity]) should be preferred to using the [Default] implementation. -#[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct PostState { - /// The state of all modified accounts after execution. - /// - /// If the value contained is `None`, then the account should be deleted. - accounts: BTreeMap>, - /// The state of all modified storage after execution - /// - /// If the contained [Storage] is marked as wiped, then all storage values should be cleared - /// from the database. - storage: BTreeMap, - /// The state of accounts before they were changed in the given block. - /// - /// If the value is `None`, then the account is new, otherwise it is a change. - account_changes: AccountChanges, - /// The state of account storage before it was changed in the given block. - /// - /// This map only contains old values for storage slots. - storage_changes: StorageChanges, - /// New code created during the execution - bytecode: BTreeMap, - /// The receipt(s) of the executed transaction(s). - receipts: BTreeMap>, - /// Pruning configuration. - prune_modes: PruneModes, -} - -impl PostState { - /// Create an empty [PostState]. - pub fn new() -> Self { - Self::default() - } - - /// Create an empty [PostState] with pre-allocated space for a certain amount of transactions. - pub fn with_tx_capacity(block: BlockNumber, txs: usize) -> Self { - Self { receipts: BTreeMap::from([(block, Vec::with_capacity(txs))]), ..Default::default() } - } - - /// Add a pruning configuration. - pub fn add_prune_modes(&mut self, prune_modes: PruneModes) { - self.prune_modes = prune_modes; - } - - /// Return the current size of the poststate. - /// - /// Size is the sum of individual changes to accounts, storage, bytecode and receipts. - pub fn size_hint(&self) -> usize { - // The amount of plain state account entries to update. - self.accounts.len() - // The approximate amount of plain state storage entries to update. - // NOTE: This can be improved by manually keeping track of the storage size for each account. - + self.storage.len() - // The amount of bytecodes to insert. - + self.bytecode.len() - // The approximate amount of receipts. - // NOTE: This can be improved by manually keeping track of the receipt size for each block number. - + self.receipts.len() - // The approximate amount of changsets to update. - + self.changeset_size_hint() - } - - /// Return the current size of history changes in the poststate. - pub fn changeset_size_hint(&self) -> usize { - // The amount of account changesets to insert. - self.account_changes.size - // The approximate amount of storage changes to insert. - // NOTE: This does not include the entries for primary storage wipes, - // which need to be read from plain state. - + self.storage_changes.size - } - - /// Get the latest state of all changed accounts. - pub fn accounts(&self) -> &BTreeMap> { - &self.accounts - } - - /// Get a reference to all the account changes - pub fn account_changes(&self) -> &AccountChanges { - &self.account_changes - } - - /// Get a reference to all the storage changes - pub fn storage_changes(&self) -> &StorageChanges { - &self.storage_changes - } - - /// Get the latest state for a specific account. - /// - /// # Returns - /// - /// - `None` if the account does not exist - /// - `Some(&None)` if the account existed, but has since been deleted. - /// - `Some(..)` if the account currently exists - pub fn account(&self, address: &Address) -> Option<&Option> { - self.accounts.get(address) - } - - /// Get the latest state of storage. - pub fn storage(&self) -> &BTreeMap { - &self.storage - } - - /// Get the storage for an account. - pub fn account_storage(&self, address: &Address) -> Option<&Storage> { - self.storage.get(address) - } - - /// Get the newly created bytecodes - pub fn bytecodes(&self) -> &BTreeMap { - &self.bytecode - } - - /// Get a bytecode in the post-state. - pub fn bytecode(&self, code_hash: &H256) -> Option<&Bytecode> { - self.bytecode.get(code_hash) - } - - /// Get the receipts for the transactions executed to form this [PostState]. - pub fn receipts(&self, block: BlockNumber) -> &[Receipt] { - self.receipts.get(&block).map(Vec::as_slice).unwrap_or(&[]) - } - - /// Returns an iterator over all logs in this [PostState]. - pub fn logs(&self, block: BlockNumber) -> impl Iterator { - self.receipts(block).iter().flat_map(|r| r.logs.iter()) - } - - /// Returns the logs bloom for all recorded logs. - pub fn logs_bloom(&self, block: BlockNumber) -> Bloom { - logs_bloom(self.logs(block)) - } - - /// Returns the receipt root for all recorded receipts. - /// TODO: This function hides an expensive operation (bloom). We should probably make it more - /// explicit. - pub fn receipts_root(&self, block: BlockNumber) -> H256 { - calculate_receipt_root_ref(self.receipts(block)) - } - - /// Hash all changed accounts and storage entries that are currently stored in the post state. - /// - /// # Returns - /// - /// The hashed post state. - pub fn hash_state_slow(&self) -> HashedPostState { - let mut hashed_post_state = HashedPostState::default(); - - // Insert accounts with hashed keys from account changes. - for (address, account) in self.accounts() { - let hashed_address = keccak256(address); - if let Some(account) = account { - hashed_post_state.insert_account(hashed_address, *account); - } else { - hashed_post_state.insert_cleared_account(hashed_address); - } - } - - // Insert accounts and storages with hashed keys from storage changes. - for (address, storage) in self.storage() { - let mut hashed_storage = HashedStorage::new(storage.wiped()); - for (slot, value) in &storage.storage { - let hashed_slot = keccak256(H256(slot.to_be_bytes())); - if *value == U256::ZERO { - hashed_storage.insert_zero_valued_slot(hashed_slot); - } else { - hashed_storage.insert_non_zero_valued_storage(hashed_slot, *value); - } - } - - hashed_post_state.insert_hashed_storage(keccak256(address), hashed_storage); - } - - hashed_post_state - } - - /// Calculate the state root for this [PostState]. - /// Internally, function calls [Self::hash_state_slow] to obtain the [HashedPostState]. - /// Afterwards, it retrieves the [PrefixSets](reth_trie::prefix_set::PrefixSet) of changed keys - /// from the [HashedPostState] and uses them to calculate the incremental state root. - /// - /// # Example - /// - /// ``` - /// use reth_primitives::{Address, Account}; - /// use reth_provider::PostState; - /// use reth_db::{test_utils::create_test_rw_db, database::Database}; - /// - /// // Initialize the database - /// let db = create_test_rw_db(); - /// - /// // Initialize the post state - /// let mut post_state = PostState::new(); - /// - /// // Create an account - /// let block_number = 1; - /// let address = Address::random(); - /// post_state.create_account(1, address, Account { nonce: 1, ..Default::default() }); - /// - /// // Calculate the state root - /// let tx = db.tx().expect("failed to create transaction"); - /// let state_root = post_state.state_root_slow(&tx); - /// ``` - /// - /// # Returns - /// - /// The state root for this [PostState]. - pub fn state_root_slow<'a, 'tx, TX: DbTx<'tx>>( - &self, - tx: &'a TX, - ) -> Result { - let hashed_post_state = self.hash_state_slow().sorted(); - let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); - let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_post_state); - StateRoot::new(tx) - .with_hashed_cursor_factory(&hashed_cursor_factory) - .with_changed_account_prefixes(account_prefix_set) - .with_changed_storage_prefixes(storage_prefix_set) - .root() - } - - // todo: note overwrite behavior, i.e. changes in `other` take precedent - /// Extend this [PostState] with the changes in another [PostState]. - pub fn extend(&mut self, mut other: PostState) { - // Insert storage change sets - for (block_number, storage_changes) in std::mem::take(&mut other.storage_changes).inner { - for (address, their_storage_transition) in storage_changes { - let our_storage = self.storage.entry(address).or_default(); - let (wipe, storage) = if their_storage_transition.wipe.is_wiped() { - // Check existing storage change. - match self.storage_changes.get(&block_number).and_then(|ch| ch.get(&address)) { - Some(change) if change.wipe.is_wiped() => (), // already counted - _ => { - our_storage.times_wiped += 1; - } - }; - // Check if this is the first wipe. - let wipe = if our_storage.times_wiped == 1 { - StorageWipe::Primary - } else { - // Even if the wipe in other poststate was primary before, demote it to - // secondary. - StorageWipe::Secondary - }; - let mut wiped_storage = std::mem::take(&mut our_storage.storage); - wiped_storage.extend(their_storage_transition.storage); - (wipe, wiped_storage) - } else { - (StorageWipe::None, their_storage_transition.storage) - }; - self.storage_changes.insert_for_block_and_address( - block_number, - address, - wipe, - storage.into_iter(), - ); - } - } - - // Insert account change sets - for (block_number, account_changes) in std::mem::take(&mut other.account_changes).inner { - self.account_changes.insert_for_block(block_number, account_changes); - } - - // Update plain state - self.accounts.extend(other.accounts); - for (address, their_storage) in other.storage { - let our_storage = self.storage.entry(address).or_default(); - our_storage.storage.extend(their_storage.storage); - } - - self.receipts.extend(other.receipts); - - self.bytecode.extend(other.bytecode); - } - - /// Reverts each change up to the `target_block_number` (excluding). - /// - /// The reverted changes are removed from this post-state, and their effects are reverted. - pub fn revert_to(&mut self, target_block_number: BlockNumber) { - // Revert account state & changes - let removed_account_changes = self.account_changes.drain_above(target_block_number); - let changed_accounts = self - .account_changes - .iter() - .flat_map(|(_, account_changes)| account_changes.iter().map(|(address, _)| *address)) - .collect::>(); - let mut account_state: BTreeMap> = BTreeMap::default(); - for address in changed_accounts { - let info = removed_account_changes - .iter() - .find_map(|(_, changes)| { - changes.iter().find_map(|ch| (ch.0 == &address).then_some(*ch.1)) - }) - .unwrap_or(*self.accounts.get(&address).expect("exists")); - account_state.insert(address, info); - } - self.accounts = account_state; - - // Revert changes and recreate the storage state - let removed_storage_changes = self.storage_changes.drain_above(target_block_number); - let mut storage_state: BTreeMap = BTreeMap::default(); - for (_, storage_changes) in self.storage_changes.iter() { - for (address, storage_change) in storage_changes { - let entry = storage_state.entry(*address).or_default(); - if storage_change.wipe.is_wiped() { - entry.times_wiped += 1; - } - for (slot, _) in storage_change.storage.iter() { - let value = removed_storage_changes - .iter() - .find_map(|(_, changes)| { - changes.iter().find_map(|ch| { - if ch.0 == address { - match ch.1.storage.iter().find_map(|(changed_slot, value)| { - (slot == changed_slot).then_some(*value) - }) { - value @ Some(_) => Some(value), - None if ch.1.wipe.is_wiped() => Some(None), - None => None, - } - } else { - None - } - }) - }) - .unwrap_or_else(|| { - self.storage.get(address).and_then(|s| s.storage.get(slot).copied()) - }); - if let Some(value) = value { - entry.storage.insert(*slot, value); - } - } - } - } - self.storage = storage_state; - - // Revert receipts - self.receipts.retain(|block_number, _| *block_number <= target_block_number); - } - - /// Reverts each change up to and including any change that is part of `transition_id`. - /// - /// The reverted changes are removed from this post-state, and their effects are reverted. - /// - /// A new post-state containing the pre-revert state, as well as the reverted changes *only* is - /// returned. - /// - /// This effectively splits the post state in two: - /// - /// 1. This post-state has the changes reverted - /// 2. The returned post-state does *not* have the changes reverted, but only contains the - /// descriptions of the changes that were reverted in the first post-state. - pub fn split_at(&mut self, revert_to_block: BlockNumber) -> Self { - // Clone ourselves - let mut non_reverted_state = self.clone(); - - // Revert the desired changes - self.revert_to(revert_to_block); - - // Remove all changes in the returned post-state that were not reverted - non_reverted_state.account_changes.retain_above(revert_to_block); - let updated_times_wiped = non_reverted_state.storage_changes.retain_above(revert_to_block); - // Update or reset the number of times the account was wiped. - for (address, storage) in non_reverted_state.storage.iter_mut() { - storage.times_wiped = updated_times_wiped.get(address).cloned().unwrap_or_default(); - } - // Remove receipts - non_reverted_state.receipts.retain(|block_number, _| *block_number > revert_to_block); - - non_reverted_state - } - - /// Add a newly created account to the post-state. - pub fn create_account( - &mut self, - block_number: BlockNumber, - address: Address, - account: Account, - ) { - self.accounts.insert(address, Some(account)); - self.account_changes.insert(block_number, address, None, Some(account)); - } - - /// Add a changed account to the post-state. - /// - /// If the account also has changed storage values, [PostState::change_storage] should also be - /// called. - pub fn change_account( - &mut self, - block_number: BlockNumber, - address: Address, - old: Account, - new: Account, - ) { - self.accounts.insert(address, Some(new)); - self.account_changes.insert(block_number, address, Some(old), Some(new)); - } - - /// Mark an account as destroyed. - pub fn destroy_account( - &mut self, - block_number: BlockNumber, - address: Address, - account: Account, - ) { - self.accounts.insert(address, None); - self.account_changes.insert(block_number, address, Some(account), None); - - let storage = self.storage.entry(address).or_default(); - storage.times_wiped += 1; - let wipe = - if storage.times_wiped == 1 { StorageWipe::Primary } else { StorageWipe::Secondary }; - - let wiped_storage = std::mem::take(&mut storage.storage); - self.storage_changes.insert_for_block_and_address( - block_number, - address, - wipe, - wiped_storage.into_iter(), - ); - } - - /// Add changed storage values to the post-state. - pub fn change_storage( - &mut self, - block_number: BlockNumber, - address: Address, - changeset: StorageChangeset, - ) { - self.storage - .entry(address) - .or_default() - .storage - .extend(changeset.iter().map(|(slot, (_, new))| (*slot, *new))); - self.storage_changes.insert_for_block_and_address( - block_number, - address, - StorageWipe::None, - changeset.into_iter().map(|(slot, (old, _))| (slot, old)), - ); - } - - /// Add new bytecode to the post-state. - pub fn add_bytecode(&mut self, code_hash: H256, bytecode: Bytecode) { - // Assumption: `insert` will override the value if present, but since the code hash for a - // given bytecode will always be the same, we are overriding with the same value. - // - // In other words: if this entry already exists, replacing the bytecode will replace with - // the same value, which is wasteful. - self.bytecode.entry(code_hash).or_insert(bytecode); - } - - /// Add a transaction receipt to the post-state. - /// - /// Transactions should always include their receipts in the post-state. - pub fn add_receipt(&mut self, block: BlockNumber, receipt: Receipt) { - self.receipts.entry(block).or_default().push(receipt); - } - - /// Write changeset history to the database. - pub fn write_history_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( - &mut self, - tx: &TX, - tip: BlockNumber, - ) -> Result<(), DbError> { - // Write storage changes - tracing::trace!(target: "provider::post_state", "Writing storage changes"); - let mut storages_cursor = tx.cursor_dup_write::()?; - let mut storage_changeset_cursor = tx.cursor_dup_write::()?; - for (block_number, storage_changes) in - std::mem::take(&mut self.storage_changes).inner.into_iter() - { - if self.prune_modes.should_prune_storage_history(block_number, tip) { - continue - } - - for (address, mut storage) in storage_changes.into_iter() { - let storage_id = BlockNumberAddress((block_number, address)); - - // If the account was created and wiped at the same block, skip all storage changes - if storage.wipe.is_wiped() && - self.account_changes - .get(&block_number) - .and_then(|changes| changes.get(&address).map(|info| info.is_none())) - // No account info available, fallback to `false` - .unwrap_or_default() - { - continue - } - - // If we are writing the primary storage wipe transition, the pre-existing plain - // storage state has to be taken from the database and written to storage history. - // See [StorageWipe::Primary] for more details. - if storage.wipe.is_primary() { - if let Some((_, entry)) = storages_cursor.seek_exact(address)? { - tracing::trace!(target: "provider::post_state", ?storage_id, key = ?entry.key, "Storage wiped"); - let key = U256::from_be_bytes(entry.key.to_fixed_bytes()); - if !storage.storage.contains_key(&key) { - storage.storage.insert(entry.key.into(), entry.value); - } - - while let Some(entry) = storages_cursor.next_dup_val()? { - let key = U256::from_be_bytes(entry.key.to_fixed_bytes()); - if !storage.storage.contains_key(&key) { - storage.storage.insert(entry.key.into(), entry.value); - } - } - } - } - - for (slot, old_value) in storage.storage { - tracing::trace!(target: "provider::post_state", ?storage_id, ?slot, ?old_value, "Storage changed"); - storage_changeset_cursor.append_dup( - storage_id, - StorageEntry { key: H256(slot.to_be_bytes()), value: old_value }, - )?; - } - } - } - - // Write account changes - tracing::trace!(target: "provider::post_state", "Writing account changes"); - let mut account_changeset_cursor = tx.cursor_dup_write::()?; - for (block_number, account_changes) in - std::mem::take(&mut self.account_changes).inner.into_iter() - { - if self.prune_modes.should_prune_account_history(block_number, tip) { - continue - } - - for (address, info) in account_changes.into_iter() { - tracing::trace!(target: "provider::post_state", block_number, ?address, old = ?info, "Account changed"); - account_changeset_cursor - .append_dup(block_number, AccountBeforeTx { address, info })?; - } - } - - Ok(()) - } - - /// Write the post state to the database. - pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>( - mut self, - tx: &TX, - tip: BlockNumber, - ) -> Result<(), Error> { - self.write_history_to_db(tx, tip)?; - - // Write new storage state - tracing::trace!(target: "provider::post_state", len = self.storage.len(), "Writing new storage state"); - let mut storages_cursor = tx.cursor_dup_write::()?; - for (address, storage) in self.storage.into_iter() { - // If the storage was wiped at least once, remove all previous entries from the - // database. - if storage.wiped() { - tracing::trace!(target: "provider::post_state", ?address, "Wiping storage from plain state"); - if storages_cursor.seek_exact(address)?.is_some() { - storages_cursor.delete_current_duplicates()?; - } - } - - for (key, value) in storage.storage { - tracing::trace!(target: "provider::post_state", ?address, ?key, "Updating plain state storage"); - let key: H256 = key.into(); - if let Some(entry) = storages_cursor.seek_by_key_subkey(address, key)? { - if entry.key == key { - storages_cursor.delete_current()?; - } - } - - if value != U256::ZERO { - storages_cursor.upsert(address, StorageEntry { key, value })?; - } - } - } - - // Write new account state - tracing::trace!(target: "provider::post_state", len = self.accounts.len(), "Writing new account state"); - let mut accounts_cursor = tx.cursor_write::()?; - for (address, account) in self.accounts.into_iter() { - if let Some(account) = account { - tracing::trace!(target: "provider::post_state", ?address, "Updating plain state account"); - accounts_cursor.upsert(address, account)?; - } else if accounts_cursor.seek_exact(address)?.is_some() { - tracing::trace!(target: "provider::post_state", ?address, "Deleting plain state account"); - accounts_cursor.delete_current()?; - } - } - - // Write bytecode - tracing::trace!(target: "provider::post_state", len = self.bytecode.len(), "Writing bytecodes"); - let mut bytecodes_cursor = tx.cursor_write::()?; - for (hash, bytecode) in self.bytecode.into_iter() { - bytecodes_cursor.upsert(hash, bytecode)?; - } - - // Write the receipts of the transactions if not pruned - tracing::trace!(target: "provider::post_state", len = self.receipts.len(), "Writing receipts"); - if !self.receipts.is_empty() && self.prune_modes.receipts != Some(PruneMode::Full) { - let mut bodies_cursor = tx.cursor_read::()?; - let mut receipts_cursor = tx.cursor_write::()?; - - let contract_log_pruner = self - .prune_modes - .receipts_log_filter - .group_by_block(tip, None) - .map_err(|e| Error::Custom(e.to_string()))?; - - // Empty implies that there is going to be - // addresses to include in the filter in a future block. None means there isn't any kind - // of configuration. - let mut address_filter: Option<(u64, Vec<&Address>)> = None; - - for (block, receipts) in self.receipts { - // [`PrunePart::Receipts`] takes priority over [`PrunePart::ContractLogs`] - if receipts.is_empty() || self.prune_modes.should_prune_receipts(block, tip) { - continue - } - - // All receipts from the last 128 blocks are required for blockchain tree, even with - // [`PrunePart::ContractLogs`]. - let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(block, tip); - - if prunable_receipts && !contract_log_pruner.is_empty() { - if address_filter.is_none() { - address_filter = Some((0, vec![])); - } - - // Get all addresses higher than the previous checked block up to the current - // one - if let Some((prev_block, filter)) = &mut address_filter { - for (_, addresses) in contract_log_pruner.range(*prev_block..=block) { - filter.extend_from_slice(addresses.as_slice()) - } - - *prev_block = block; - } - } - - let (_, body_indices) = - bodies_cursor.seek_exact(block)?.expect("body indices exist"); - let tx_range = body_indices.tx_num_range(); - assert_eq!(receipts.len(), tx_range.clone().count(), "Receipt length mismatch"); - - for (tx_num, receipt) in tx_range.zip(receipts) { - if prunable_receipts { - // If there is an address_filter, and it does not contain any of the - // contract addresses, then skip writing this - // receipt. - if let Some((_, filter)) = &address_filter { - if !receipt.logs.iter().any(|log| filter.contains(&&log.address)) { - continue - } - } - } - receipts_cursor.append(tx_num, receipt)?; - } - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{AccountReader, ProviderFactory}; - use reth_db::{ - database::Database, test_utils::create_test_rw_db, transaction::DbTx, DatabaseEnv, - }; - use reth_primitives::{proofs::EMPTY_ROOT, MAINNET}; - use reth_trie::test_utils::state_root; - use std::sync::Arc; - - // Ensure that the transition id is not incremented if postate is extended by another empty - // poststate. - #[test] - fn extend_empty() { - let mut a = PostState::new(); - - // Extend empty poststate with another empty poststate - a.extend(PostState::new()); - - // Add single transition and extend with empty poststate - a.create_account(1, Address::zero(), Account::default()); - a.extend(PostState::new()); - assert_eq!(a.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), 1); - } - - #[test] - fn extend() { - let mut a = PostState::new(); - a.create_account(1, Address::zero(), Account::default()); - a.destroy_account(1, Address::zero(), Account::default()); - - assert_eq!(a.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), 0); - - let mut b = PostState::new(); - b.create_account(2, Address::repeat_byte(0xff), Account::default()); - - assert_eq!(b.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), 1); - - let mut c = a.clone(); - c.extend(b.clone()); - - assert_eq!(c.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), 1); - - let mut d = PostState::new(); - d.create_account(3, Address::zero(), Account::default()); - d.destroy_account(3, Address::zero(), Account::default()); - c.extend(d); - assert_eq!(c.account_storage(&Address::zero()).unwrap().times_wiped, 2); - // Primary wipe occurred at block #1. - assert_eq!( - c.storage_changes.get(&1).unwrap().get(&Address::zero()).unwrap().wipe, - StorageWipe::Primary - ); - // Primary wipe occurred at block #3. - assert_eq!( - c.storage_changes.get(&3).unwrap().get(&Address::zero()).unwrap().wipe, - StorageWipe::Secondary - ); - } - - #[test] - fn revert_to() { - let mut state = PostState::new(); - let address1 = Address::repeat_byte(0); - let account1 = Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }; - state.create_account(1, address1, account1); - state.create_account( - 2, - Address::repeat_byte(0xff), - Account { nonce: 2, balance: U256::from(2), bytecode_hash: None }, - ); - assert_eq!( - state.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), - 2 - ); - - let revert_to = 1; - state.revert_to(revert_to); - assert_eq!(state.accounts, BTreeMap::from([(address1, Some(account1))])); - assert_eq!( - state.account_changes.iter().fold(0, |len, (_, changes)| len + changes.len()), - 1 - ); - } - - #[test] - fn wiped_revert() { - let address = Address::random(); - - let init_block_number = 0; - let init_account = Account { balance: U256::from(3), ..Default::default() }; - let init_slot = U256::from(1); - - // Create init state for demonstration purposes - // Block 0 - // Account: exists - // Storage: 0x01: 1 - let mut init_state = PostState::new(); - init_state.create_account(init_block_number, address, init_account); - init_state.change_storage( - init_block_number, - address, - BTreeMap::from([(init_slot, (U256::ZERO, U256::from(1)))]), - ); - assert_eq!( - init_state.storage.get(&address), - Some(&Storage { - storage: BTreeMap::from([(init_slot, U256::from(1))]), - times_wiped: 0 - }) - ); - - let mut post_state = PostState::new(); - // Block 1 - // - - // Block 2 - // Account: destroyed - // Storage: wiped - post_state.destroy_account(2, address, init_account); - assert!(post_state.storage.get(&address).unwrap().wiped()); - - // Block 3 - // Account: recreated - // Storage: wiped, then 0x01: 2 - let recreated_account = Account { balance: U256::from(4), ..Default::default() }; - post_state.create_account(3, address, recreated_account); - post_state.change_storage( - 3, - address, - BTreeMap::from([(init_slot, (U256::ZERO, U256::from(2)))]), - ); - assert!(post_state.storage.get(&address).unwrap().wiped()); - - // Revert to block 2 - post_state.revert_to(2); - assert!(post_state.storage.get(&address).unwrap().wiped()); - assert_eq!( - post_state.storage.get(&address).unwrap(), - &Storage { times_wiped: 1, storage: BTreeMap::default() } - ); - - // Revert to block 1 - post_state.revert_to(1); - assert_eq!(post_state.storage.get(&address), None); - } - - #[test] - fn split_at() { - let address1 = Address::random(); - let address2 = Address::random(); - let slot1 = U256::from(1); - let slot2 = U256::from(2); - - let mut state = PostState::new(); - // Block #1 - // Create account 1 and change its storage - // Assume account 2 already exists in the database and change storage for it - state.create_account(1, address1, Account::default()); - state.change_storage(1, address1, BTreeMap::from([(slot1, (U256::ZERO, U256::from(1)))])); - state.change_storage(1, address1, BTreeMap::from([(slot2, (U256::ZERO, U256::from(1)))])); - state.change_storage(1, address2, BTreeMap::from([(slot2, (U256::ZERO, U256::from(2)))])); - let block1_account_changes = (1, BTreeMap::from([(address1, None)])); - let block1_storage_changes = ( - 1, - BTreeMap::from([ - ( - address1, - StorageTransition { - storage: BTreeMap::from([(slot1, U256::ZERO), (slot2, U256::ZERO)]), - wipe: StorageWipe::None, - }, - ), - ( - address2, - StorageTransition { - storage: BTreeMap::from([(slot2, U256::ZERO)]), - wipe: StorageWipe::None, - }, - ), - ]), - ); - assert_eq!( - state.account_changes, - AccountChanges { inner: BTreeMap::from([block1_account_changes.clone()]), size: 1 } - ); - assert_eq!( - state.storage_changes, - StorageChanges { inner: BTreeMap::from([block1_storage_changes.clone()]), size: 3 } - ); - - // Block #2 - // Destroy account 1 - // Change storage for account 2 - state.destroy_account(2, address1, Account::default()); - state.change_storage( - 2, - address2, - BTreeMap::from([(slot2, (U256::from(2), U256::from(4)))]), - ); - let account_state_after_block_2 = state.accounts.clone(); - let storage_state_after_block_2 = state.storage.clone(); - let block2_account_changes = (2, BTreeMap::from([(address1, Some(Account::default()))])); - let block2_storage_changes = ( - 2, - BTreeMap::from([ - ( - address1, - StorageTransition { - storage: BTreeMap::from([(slot1, U256::from(1)), (slot2, U256::from(1))]), - wipe: StorageWipe::Primary, - }, - ), - ( - address2, - StorageTransition { - storage: BTreeMap::from([(slot2, U256::from(2))]), - wipe: StorageWipe::None, - }, - ), - ]), - ); - assert_eq!( - state.account_changes, - AccountChanges { - inner: BTreeMap::from([ - block1_account_changes.clone(), - block2_account_changes.clone() - ]), - size: 2 - } - ); - assert_eq!( - state.storage_changes, - StorageChanges { - inner: BTreeMap::from([ - block1_storage_changes.clone(), - block2_storage_changes.clone() - ]), - size: 6, - } - ); - - // Block #3 - // Recreate account 1 - // Destroy account 2 - state.create_account(3, address1, Account::default()); - state.change_storage( - 3, - address2, - BTreeMap::from([(slot2, (U256::from(4), U256::from(1)))]), - ); - state.destroy_account(3, address2, Account::default()); - let block3_account_changes = - (3, BTreeMap::from([(address1, None), (address2, Some(Account::default()))])); - let block3_storage_changes = ( - 3, - BTreeMap::from([( - address2, - StorageTransition { - storage: BTreeMap::from([(slot2, U256::from(4))]), - wipe: StorageWipe::Primary, - }, - )]), - ); - assert_eq!( - state.account_changes, - AccountChanges { - inner: BTreeMap::from([ - block1_account_changes.clone(), - block2_account_changes.clone(), - block3_account_changes.clone() - ]), - size: 4 - } - ); - assert_eq!( - state.storage_changes, - StorageChanges { - inner: BTreeMap::from([ - block1_storage_changes.clone(), - block2_storage_changes.clone(), - block3_storage_changes.clone() - ]), - size: 7, - } - ); - - // Block #4 - // Destroy account 1 again - state.destroy_account(4, address1, Account::default()); - let account_state_after_block_4 = state.accounts.clone(); - let storage_state_after_block_4 = state.storage.clone(); - let block4_account_changes = (4, BTreeMap::from([(address1, Some(Account::default()))])); - let block4_storage_changes = ( - 4, - BTreeMap::from([( - address1, - StorageTransition { storage: BTreeMap::default(), wipe: StorageWipe::Secondary }, - )]), - ); - - // Blocks #1-4 - // Account 1. Info: . Storage: . Times Wiped: 2. - // Account 2. Info: . Storage: . Times Wiped: 1. - assert_eq!(state.accounts, BTreeMap::from([(address1, None), (address2, None)])); - assert_eq!( - state.storage, - BTreeMap::from([ - (address1, Storage { times_wiped: 2, storage: BTreeMap::default() }), - (address2, Storage { times_wiped: 1, storage: BTreeMap::default() }) - ]) - ); - assert_eq!( - state.account_changes, - AccountChanges { - inner: BTreeMap::from([ - block1_account_changes.clone(), - block2_account_changes.clone(), - block3_account_changes.clone(), - block4_account_changes.clone(), - ]), - size: 5 - } - ); - assert_eq!( - state.storage_changes, - StorageChanges { - inner: BTreeMap::from([ - block1_storage_changes.clone(), - block2_storage_changes.clone(), - block3_storage_changes.clone(), - block4_storage_changes, - ]), - size: 7, - } - ); - - // Split state at block #2 - let mut state_1_2 = state.clone(); - let state_3_4 = state_1_2.split_at(2); - - // Blocks #1-2 - // Account 1. Info: . Storage: . - // Account 2. Info: exists. Storage: slot2 - 4. - assert_eq!(state_1_2.accounts, account_state_after_block_2); - assert_eq!(state_1_2.storage, storage_state_after_block_2); - assert_eq!( - state_1_2.account_changes, - AccountChanges { - inner: BTreeMap::from([block1_account_changes, block2_account_changes]), - size: 2 - } - ); - assert_eq!( - state_1_2.storage_changes, - StorageChanges { - inner: BTreeMap::from([block1_storage_changes, block2_storage_changes]), - size: 6, - } - ); - - // Plain state for blocks #3-4 should match plain state from blocks #1-4 - // Account 1. Info: . Storage: . - // Account 2. Info: exists. Storage: slot2 - 4. - assert_eq!(state_3_4.accounts, account_state_after_block_4); - // Not equal because the `times_wiped` value is different. - assert_ne!(state_3_4.storage, storage_state_after_block_4); - assert_eq!( - state_3_4.storage, - BTreeMap::from([ - (address1, Storage { times_wiped: 1, storage: BTreeMap::default() }), - (address2, Storage { times_wiped: 1, storage: BTreeMap::default() }) - ]) - ); - - // Account changes should match - assert_eq!( - state_3_4.account_changes, - AccountChanges { - inner: BTreeMap::from([block3_account_changes, block4_account_changes,]), - size: 3 - } - ); - // Storage changes should match except for the wipe flag being promoted to primary - assert_eq!( - state_3_4.storage_changes, - StorageChanges { - inner: BTreeMap::from([ - block3_storage_changes, - // Block #4. Wipe flag must be promoted to primary - ( - 4, - BTreeMap::from([( - address1, - StorageTransition { - storage: BTreeMap::default(), - wipe: StorageWipe::Primary - }, - )]), - ), - ]), - size: 1, - } - ) - } - - #[test] - fn receipts_split_at() { - let mut state = PostState::new(); - (1..=4).for_each(|block| { - state.add_receipt(block, Receipt::default()); - }); - let state2 = state.split_at(2); - assert_eq!( - state.receipts, - BTreeMap::from([(1, vec![Receipt::default()]), (2, vec![Receipt::default()])]) - ); - assert_eq!( - state2.receipts, - BTreeMap::from([(3, vec![Receipt::default()]), (4, vec![Receipt::default()])]) - ); - } - - #[test] - fn write_to_db_account_info() { - let db: Arc = create_test_rw_db(); - let factory = ProviderFactory::new(db, MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); - - let mut post_state = PostState::new(); - - let address_a = Address::zero(); - let address_b = Address::repeat_byte(0xff); - - let account_a = Account { balance: U256::from(1), nonce: 1, bytecode_hash: None }; - let account_b = Account { balance: U256::from(2), nonce: 2, bytecode_hash: None }; - let account_b_changed = Account { balance: U256::from(3), nonce: 3, bytecode_hash: None }; - - // 0x00.. is created - post_state.create_account(1, address_a, account_a); - // 0x11.. is changed (balance + 1, nonce + 1) - post_state.change_account(1, address_b, account_b, account_b_changed); - post_state.write_to_db(provider.tx_ref(), 0).expect("Could not write post state to DB"); - - // Check plain state - assert_eq!( - provider.basic_account(address_a).expect("Could not read account state"), - Some(account_a), - "Account A state is wrong" - ); - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - Some(account_b_changed), - "Account B state is wrong" - ); - - // Check change set - let mut changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(1).expect("Could not read account change set"), - Some((1, AccountBeforeTx { address: address_a, info: None })), - "Account A changeset is wrong" - ); - assert_eq!( - changeset_cursor.next_dup().expect("Changeset table is malformed"), - Some((1, AccountBeforeTx { address: address_b, info: Some(account_b) })), - "Account B changeset is wrong" - ); - - let mut post_state = PostState::new(); - // 0x11.. is destroyed - post_state.destroy_account(2, address_b, account_b_changed); - post_state - .write_to_db(provider.tx_ref(), 0) - .expect("Could not write second post state to DB"); - - // Check new plain state for account B - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - None, - "Account B should be deleted" - ); - - // Check change set - assert_eq!( - changeset_cursor.seek_exact(2).expect("Could not read account change set"), - Some((2, AccountBeforeTx { address: address_b, info: Some(account_b_changed) })), - "Account B changeset is wrong after deletion" - ); - } - - #[test] - fn write_to_db_storage() { - let db: Arc = create_test_rw_db(); - let tx = db.tx_mut().expect("Could not get database tx"); - - let mut post_state = PostState::new(); - - let address_a = Address::zero(); - let address_b = Address::repeat_byte(0xff); - - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - let storage_a_changeset = BTreeMap::from([ - (U256::from(0), (U256::from(0), U256::from(1))), - (U256::from(1), (U256::from(0), U256::from(2))), - ]); - - // 0x01 => 1 => 2 - let storage_b_changeset = BTreeMap::from([(U256::from(1), (U256::from(1), U256::from(2)))]); - - post_state.change_storage(1, address_a, storage_a_changeset); - post_state.change_storage(1, address_b, storage_b_changeset); - post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); - - // Check plain storage state - let mut storage_cursor = tx - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - Some((address_a, StorageEntry { key: H256::zero(), value: U256::from(1) })), - "Slot 0 for account A should be 1" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - Some(( - address_a, - StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account A should only have 2 storage slots" - ); - - assert_eq!( - storage_cursor.seek_exact(address_b).unwrap(), - Some(( - address_b, - StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account B should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account B should only have 1 storage slot" - ); - - // Check change set - let mut changeset_cursor = tx - .cursor_dup_read::() - .expect("Could not open storage changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: H256::zero(), value: U256::from(0) } - )), - "Slot 0 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } - )), - "Slot 1 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), - Some(( - BlockNumberAddress((1, address_b)), - StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } - )), - "Slot 1 for account B should have changed from 1" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account B should only be in the changeset 1 time" - ); - - // Delete account A - let mut post_state = PostState::new(); - post_state.destroy_account(2, address_a, Account::default()); - post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - None, - "Account A should have no storage slots after deletion" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: H256::zero(), value: U256::from(1) } - )), - "Slot 0 for account A should have changed from 1 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should have changed from 2 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times on deletion" - ); - } - - #[test] - fn write_to_db_multiple_selfdestructs() { - let db: Arc = create_test_rw_db(); - let tx = db.tx_mut().expect("Could not get database tx"); - - let address1 = Address::random(); - - let mut init_state = PostState::new(); - init_state.create_account(0, address1, Account::default()); - init_state.change_storage( - 0, - address1, - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - BTreeMap::from([ - (U256::from(0), (U256::ZERO, U256::from(1))), - (U256::from(1), (U256::ZERO, U256::from(2))), - ]), - ); - init_state.write_to_db(&tx, 0).expect("Could not write init state to DB"); - - let mut post_state = PostState::new(); - post_state.change_storage( - 1, - address1, - // 0x00 => 1 => 2 - BTreeMap::from([(U256::from(0), (U256::from(1), U256::from(2)))]), - ); - post_state.destroy_account(2, address1, Account::default()); - post_state.create_account(3, address1, Account::default()); - post_state.change_storage( - 4, - address1, - // 0x00 => 0 => 2 - // 0x02 => 0 => 4 - // 0x06 => 0 => 6 - BTreeMap::from([ - (U256::from(0), (U256::ZERO, U256::from(2))), - (U256::from(2), (U256::ZERO, U256::from(4))), - (U256::from(6), (U256::ZERO, U256::from(6))), - ]), - ); - post_state.destroy_account(5, address1, Account::default()); - - // Create, change, destroy and recreate in the same block. - post_state.create_account(6, address1, Account::default()); - post_state.change_storage( - 6, - address1, - // 0x00 => 0 => 2 - BTreeMap::from([(U256::from(0), (U256::ZERO, U256::from(2)))]), - ); - post_state.destroy_account(6, address1, Account::default()); - post_state.create_account(6, address1, Account::default()); - - post_state.change_storage( - 7, - address1, - // 0x00 => 0 => 9 - BTreeMap::from([(U256::from(0), (U256::ZERO, U256::from(9)))]), - ); - - post_state.write_to_db(&tx, 0).expect("Could not write post state to DB"); - - let mut storage_changeset_cursor = tx - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); - - // Iterate through all storage changes - - // Block - // : - // ... - - // Block #0 - // 0x00: 0 - // 0x01: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: H256::from_low_u64_be(1), value: U256::ZERO } - ))) - ); - - // Block #1 - // 0x00: 1 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(1) } - ))) - ); - - // Block #2 (destroyed) - // 0x00: 2 - // 0x01: 2 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: H256::from_low_u64_be(1), value: U256::from(2) } - ))) - ); - - // Block #3 - // no storage changes - - // Block #4 - // 0x00: 0 - // 0x02: 0 - // 0x06: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: H256::from_low_u64_be(2), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: H256::from_low_u64_be(6), value: U256::ZERO } - ))) - ); - - // Block #5 (destroyed) - // 0x00: 2 - // 0x02: 4 - // 0x06: 6 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: H256::from_low_u64_be(2), value: U256::from(4) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: H256::from_low_u64_be(6), value: U256::from(6) } - ))) - ); - - // Block #6 - // no storage changes (only inter block changes) - - // Block #7 - // 0x00: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((7, address1)), - StorageEntry { key: H256::from_low_u64_be(0), value: U256::ZERO } - ))) - ); - assert_eq!(storage_changes.next(), None); - } - - #[test] - fn reuse_selfdestructed_account() { - let address_a = Address::zero(); - - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - // 0x03 => 0 => 3 - let storage_changeset_one = BTreeMap::from([ - (U256::from(0), (U256::from(0), U256::from(1))), - (U256::from(1), (U256::from(0), U256::from(2))), - (U256::from(3), (U256::from(0), U256::from(3))), - ]); - // 0x00 => 0 => 3 - // 0x01 => 0 => 4 - let storage_changeset_two = BTreeMap::from([ - (U256::from(0), (U256::from(0), U256::from(3))), - (U256::from(2), (U256::from(0), U256::from(4))), - ]); - - let mut state = PostState::new(); - - // Create some storage for account A (simulates a contract deployment) - state.change_storage(1, address_a, storage_changeset_one); - // Next transition destroys the account (selfdestruct) - state.destroy_account(2, address_a, Account::default()); - // Next transition recreates account A with some storage (simulates a contract deployment) - state.change_storage(3, address_a, storage_changeset_two); - - // All the storage of account A has to be deleted in the database (wiped) - assert!( - state.account_storage(&address_a).expect("Account A should have some storage").wiped(), - "The wiped flag should be set to discard all pre-existing storage from the database" - ); - // Then, we must ensure that *only* the storage from the last transition will be written - assert_eq!( - state.account_storage(&address_a).expect("Account A should have some storage").storage, - BTreeMap::from([(U256::from(0), U256::from(3)), (U256::from(2), U256::from(4))]), - "Account A's storage should only have slots 0 and 2, and they should have values 3 and 4, respectively." - ); - } - - /// Checks that if an account is touched multiple times in the same block, - /// then the old value from the first change is kept and not overwritten. - /// - /// This is important because post states from different transactions in the same block may see - /// different states of the same account as the old value, but the changeset should reflect the - /// state of the account before the block. - #[test] - fn account_changesets_keep_old_values() { - let mut state = PostState::new(); - let block = 1; - let address = Address::repeat_byte(0); - - // A transaction in block 1 creates the account - state.create_account( - block, - address, - Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }, - ); - - // A transaction in block 1 then changes the same account - state.change_account( - block, - address, - Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }, - Account { nonce: 1, balance: U256::from(2), bytecode_hash: None }, - ); - - // The value in the changeset for the account should be `None` since this was an account - // creation - assert_eq!( - state.account_changes().inner, - BTreeMap::from([(block, BTreeMap::from([(address, None)]))]), - "The changeset for the account is incorrect" - ); - - // The latest state of the account should be: nonce = 1, balance = 2, bytecode hash = None - assert_eq!( - state.accounts.get(&address).unwrap(), - &Some(Account { nonce: 1, balance: U256::from(2), bytecode_hash: None }), - "The latest state of the account is incorrect" - ); - - // Another transaction in block 1 then changes the account yet again - state.change_account( - block, - address, - Account { nonce: 1, balance: U256::from(2), bytecode_hash: None }, - Account { nonce: 2, balance: U256::from(1), bytecode_hash: None }, - ); - - // The value in the changeset for the account should still be `None` - assert_eq!( - state.account_changes().inner, - BTreeMap::from([(block, BTreeMap::from([(address, None)]))]), - "The changeset for the account is incorrect" - ); - - // The latest state of the account should be: nonce = 2, balance = 1, bytecode hash = None - assert_eq!( - state.accounts.get(&address).unwrap(), - &Some(Account { nonce: 2, balance: U256::from(1), bytecode_hash: None }), - "The latest state of the account is incorrect" - ); - } - - /// Checks that if a storage slot is touched multiple times in the same block, - /// then the old value from the first change is kept and not overwritten. - /// - /// This is important because post states from different transactions in the same block may see - /// different states of the same account as the old value, but the changeset should reflect the - /// state of the account before the block. - #[test] - fn storage_changesets_keep_old_values() { - let mut state = PostState::new(); - let block = 1; - let address = Address::repeat_byte(0); - - // A transaction in block 1 changes: - // - // Slot 0: 0 -> 1 - // Slot 1: 3 -> 4 - state.change_storage( - block, - address, - BTreeMap::from([ - (U256::from(0), (U256::from(0), U256::from(1))), - (U256::from(1), (U256::from(3), U256::from(4))), - ]), - ); - - // A transaction in block 1 changes: - // - // Slot 0: 1 -> 2 - // Slot 1: 4 -> 5 - state.change_storage( - block, - address, - BTreeMap::from([ - (U256::from(0), (U256::from(1), U256::from(2))), - (U256::from(1), (U256::from(4), U256::from(5))), - ]), - ); - - // The storage changeset for the account in block 1 should now be: - // - // Slot 0: 0 (the value before the first tx in the block) - // Slot 1: 3 - assert_eq!( - state.storage_changes().inner, - BTreeMap::from([( - block, - BTreeMap::from([( - address, - StorageTransition { - storage: BTreeMap::from([ - (U256::from(0), U256::from(0)), - (U256::from(1), U256::from(3)) - ]), - wipe: StorageWipe::None, - } - )]) - )]), - "The changeset for the storage is incorrect" - ); - - // The latest state of the storage should be: - // - // Slot 0: 2 - // Slot 1: 5 - assert_eq!( - state.storage(), - &BTreeMap::from([( - address, - Storage { - storage: BTreeMap::from([ - (U256::from(0), U256::from(2)), - (U256::from(1), U256::from(5)) - ]), - times_wiped: 0, - } - )]), - "The latest state of the storage is incorrect" - ); - } - - /// Tests that the oldest value for changesets is kept when extending a post state from another - /// post state. - /// - /// In other words, this tests the same cases as `account_changesets_keep_old_values` and - /// `storage_changesets_keep_old_values`, but in the case where accounts/slots are changed in - /// different post states that are then merged. - #[test] - fn extending_preserves_changesets() { - let mut a = PostState::new(); - let mut b = PostState::new(); - let block = 1; - let address = Address::repeat_byte(0); - - // The first state (a) represents a transaction that creates an account with some storage - // slots - // - // Expected changeset state: - // - Account: None - // - Storage: Slot 0: 0 - a.create_account( - block, - address, - Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }, - ); - a.change_storage( - block, - address, - BTreeMap::from([(U256::from(0), (U256::from(0), U256::from(1)))]), - ); - assert_eq!( - a.account_changes().inner, - BTreeMap::from([(block, BTreeMap::from([(address, None)]))]), - "The changeset for the account is incorrect in state A" - ); - assert_eq!( - a.storage_changes().inner, - BTreeMap::from([( - block, - BTreeMap::from([( - address, - StorageTransition { - storage: BTreeMap::from([(U256::from(0), U256::from(0)),]), - wipe: StorageWipe::None, - } - )]) - )]), - "The changeset for the storage is incorrect in state A" - ); - - // The second state (b) represents a transaction that changes some slots and account info - // for the same account - // - // Expected changeset state is the same, i.e.: - // - Account: None - // - Storage: Slot 0: 0 - b.change_account( - block, - address, - Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }, - Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }, - ); - b.change_storage( - block, - address, - BTreeMap::from([(U256::from(0), (U256::from(1), U256::from(2)))]), - ); - assert_eq!( - b.account_changes().inner, - BTreeMap::from([( - block, - BTreeMap::from([( - address, - Some(Account { nonce: 1, balance: U256::from(1), bytecode_hash: None }) - )]) - )]), - "The changeset for the account is incorrect in state B" - ); - assert_eq!( - b.storage_changes().inner, - BTreeMap::from([( - block, - BTreeMap::from([( - address, - StorageTransition { - storage: BTreeMap::from([(U256::from(0), U256::from(1)),]), - wipe: StorageWipe::None, - } - )]) - )]), - "The changeset for the storage is incorrect in state B" - ); - - // Now we merge the states - a.extend(b); - - // The expected state is: - // - // Changesets: - // - Account: None - // - Storage: Slot 0: 0 - // - // Accounts: - // - Nonce 1, balance 10, bytecode hash None - // - // Storage: - // - Slot 0: 2 - assert_eq!( - a.account_changes().inner, - BTreeMap::from([(block, BTreeMap::from([(address, None)]))]), - "The changeset for the account is incorrect in the merged state" - ); - assert_eq!( - a.storage_changes().inner, - BTreeMap::from([( - block, - BTreeMap::from([( - address, - StorageTransition { - storage: BTreeMap::from([(U256::from(0), U256::from(0)),]), - wipe: StorageWipe::None, - } - )]) - )]), - "The changeset for the storage is incorrect in the merged state" - ); - assert_eq!( - a.accounts(), - &BTreeMap::from([( - address, - Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }) - )]), - "The state of accounts in the merged state is incorrect" - ); - assert_eq!( - a.storage(), - &BTreeMap::from([( - address, - Storage { - storage: BTreeMap::from([(U256::from(0), U256::from(2)),]), - times_wiped: 0, - } - )]), - "The latest state of the storage is incorrect in the merged state" - ); - } - - #[test] - fn collapsible_account_changes() { - let address = Address::random(); - let mut post_state = PostState::default(); - - // Create account on block #1 - let account_at_block_1 = Account { nonce: 1, ..Default::default() }; - post_state.create_account(1, address, account_at_block_1); - - // Modify account on block #2 and return it to original state. - post_state.change_account( - 2, - address, - Account { nonce: 1, ..Default::default() }, - Account { nonce: 1, balance: U256::from(1), ..Default::default() }, - ); - post_state.change_account( - 2, - address, - Account { nonce: 1, balance: U256::from(1), ..Default::default() }, - Account { nonce: 1, ..Default::default() }, - ); - - assert_eq!(post_state.account_changes().get(&2).and_then(|ch| ch.get(&address)), None); - } - - #[test] - fn empty_post_state_state_root() { - let db: Arc = create_test_rw_db(); - let tx = db.tx().unwrap(); - - let post_state = PostState::new(); - let state_root = post_state.state_root_slow(&tx).expect("Could not get state root"); - assert_eq!(state_root, EMPTY_ROOT); - } - - #[test] - fn post_state_state_root() { - let mut state: BTreeMap)> = (0..10) - .map(|key| { - let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; - let storage = - (1..11).map(|key| (H256::from_low_u64_be(key), U256::from(key))).collect(); - (Address::from_low_u64_be(key), (account, storage)) - }) - .collect(); - - let db: Arc = create_test_rw_db(); - - // insert initial state to the database - db.update(|tx| { - for (address, (account, storage)) in state.iter() { - let hashed_address = keccak256(address); - tx.put::(hashed_address, *account).unwrap(); - for (slot, value) in storage { - tx.put::( - hashed_address, - StorageEntry { key: keccak256(slot), value: *value }, - ) - .unwrap(); - } - } - - let (_, updates) = StateRoot::new(tx).root_with_updates().unwrap(); - updates.flush(tx).unwrap(); - }) - .unwrap(); - - let block_number = 1; - let tx = db.tx().unwrap(); - let mut post_state = PostState::new(); - - // database only state root is correct - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // destroy account 1 - let address_1 = Address::from_low_u64_be(1); - let account_1_old = state.remove(&address_1).unwrap(); - post_state.destroy_account(block_number, address_1, account_1_old.0); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // change slot 2 in account 2 - let address_2 = Address::from_low_u64_be(2); - let slot_2 = U256::from(2); - let slot_2_key = H256(slot_2.to_be_bytes()); - let address_2_slot_2_old_value = - *state.get(&address_2).unwrap().1.get(&slot_2_key).unwrap(); - let address_2_slot_2_new_value = U256::from(100); - state.get_mut(&address_2).unwrap().1.insert(slot_2_key, address_2_slot_2_new_value); - post_state.change_storage( - block_number, - address_2, - BTreeMap::from([(slot_2, (address_2_slot_2_old_value, address_2_slot_2_new_value))]), - ); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // change balance of account 3 - let address_3 = Address::from_low_u64_be(3); - let address_3_account_old = state.get(&address_3).unwrap().0; - let address_3_account_new = Account { balance: U256::from(24), ..address_3_account_old }; - state.get_mut(&address_3).unwrap().0.balance = address_3_account_new.balance; - post_state.change_account( - block_number, - address_3, - address_3_account_old, - address_3_account_new, - ); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // change nonce of account 4 - let address_4 = Address::from_low_u64_be(4); - let address_4_account_old = state.get(&address_4).unwrap().0; - let address_4_account_new = Account { nonce: 128, ..address_4_account_old }; - state.get_mut(&address_4).unwrap().0.nonce = address_4_account_new.nonce; - post_state.change_account( - block_number, - address_4, - address_4_account_old, - address_4_account_new, - ); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // recreate account 1 - let account_1_new = - Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(H256::random()) }; - state.insert(address_1, (account_1_new, BTreeMap::default())); - post_state.create_account(block_number, address_1, account_1_new); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - - // update storage for account 1 - let slot_20 = U256::from(20); - let slot_20_key = H256(slot_20.to_be_bytes()); - let account_1_slot_20_value = U256::from(12345); - state.get_mut(&address_1).unwrap().1.insert(slot_20_key, account_1_slot_20_value); - post_state.change_storage( - block_number, - address_1, - BTreeMap::from([(slot_20, (U256::from(0), account_1_slot_20_value))]), - ); - assert_eq!( - post_state.state_root_slow(&tx).unwrap(), - state_root( - state - .clone() - .into_iter() - .map(|(address, (account, storage))| (address, (account, storage.into_iter()))) - ) - ); - } -} diff --git a/crates/storage/provider/src/post_state/storage.rs b/crates/storage/provider/src/post_state/storage.rs deleted file mode 100644 index 08e5629b21de..000000000000 --- a/crates/storage/provider/src/post_state/storage.rs +++ /dev/null @@ -1,156 +0,0 @@ -use derive_more::Deref; -use reth_primitives::{Address, BlockNumber, U256}; -use std::collections::{btree_map::Entry, BTreeMap}; - -/// Storage for an account with the old and new values for each slot: (slot -> (old, new)). -pub type StorageChangeset = BTreeMap; - -/// The storage state of the account before the state transition. -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub struct StorageTransition { - /// The indicator of the storage wipe. - pub wipe: StorageWipe, - /// The storage slots. - pub storage: BTreeMap, -} - -/// The indicator of the storage wipe. -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub enum StorageWipe { - /// The storage was not wiped at this change. - #[default] - None, - /// The storage was wiped for the first time in the current in-memory state. - /// - /// When writing history to the database, on the primary storage wipe the pre-existing storage - /// will be inserted as the storage state before this transition. - Primary, - /// The storage had been already wiped before. - Secondary, -} - -impl StorageWipe { - /// Returns `true` if the wipe occurred at this transition. - pub fn is_wiped(&self) -> bool { - matches!(self, Self::Primary | Self::Secondary) - } - - /// Returns `true` if the primary wiped occurred at this transition. - /// See [StorageWipe::Primary] for more details. - pub fn is_primary(&self) -> bool { - matches!(self, Self::Primary) - } -} - -/// Latest storage state for the account. -/// -/// # Wiped Storage -/// -/// The `times_wiped` field indicates the number of times the storage was wiped in this poststate. -/// -/// If `times_wiped` is greater than 0, then the account was selfdestructed at some point, and the -/// values contained in `storage` should be the only values written to the database. -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub struct Storage { - /// The number of times the storage was wiped. - pub times_wiped: u64, - /// The storage slots. - pub storage: BTreeMap, -} - -impl Storage { - /// Returns `true` if the storage was wiped at any point. - pub fn wiped(&self) -> bool { - self.times_wiped > 0 - } -} - -/// A mapping of `block -> account -> slot -> old value` that represents what slots were changed, -/// and what their values were prior to that change. -#[derive(Default, Clone, Eq, PartialEq, Debug, Deref)] -pub struct StorageChanges { - /// The inner mapping of block changes. - #[deref] - pub inner: BTreeMap>, - /// Hand tracked change size. - pub size: usize, -} - -impl StorageChanges { - /// Insert storage entries for specified block number and address. - pub fn insert_for_block_and_address( - &mut self, - block: BlockNumber, - address: Address, - wipe: StorageWipe, - storage: I, - ) where - I: Iterator, - { - let block_entry = self.inner.entry(block).or_default(); - let storage_entry = block_entry.entry(address).or_default(); - if wipe.is_wiped() { - storage_entry.wipe = wipe; - } - for (slot, value) in storage { - if let Entry::Vacant(entry) = storage_entry.storage.entry(slot) { - entry.insert(value); - self.size += 1; - } - } - } - - /// Drain and return any entries above the target block number. - pub fn drain_above( - &mut self, - target_block: BlockNumber, - ) -> BTreeMap> { - let mut evicted = BTreeMap::new(); - self.inner.retain(|block_number, storages| { - if *block_number > target_block { - // This is fine, because it's called only on post state splits - self.size -= - storages.iter().fold(0, |acc, (_, storage)| acc + storage.storage.len()); - evicted.insert(*block_number, storages.clone()); - false - } else { - true - } - }); - evicted - } - - /// Retain entries only above specified block number. - /// - /// # Returns - /// - /// The update mapping of address to the number of times it was wiped. - pub fn retain_above(&mut self, target_block: BlockNumber) -> BTreeMap { - let mut updated_times_wiped: BTreeMap = BTreeMap::default(); - self.inner.retain(|block_number, storages| { - if *block_number > target_block { - for (address, storage) in storages.iter_mut() { - if storage.wipe.is_wiped() { - let times_wiped_entry = updated_times_wiped.entry(*address).or_default(); - storage.wipe = if *times_wiped_entry == 0 { - // No wipe was observed, promote the wipe to primary even if it was - // secondary before. - StorageWipe::Primary - } else { - // We already observed the storage wipe for this address - StorageWipe::Secondary - }; - *times_wiped_entry += 1; - } - } - true - } else { - // This is fine, because it's called only on post state splits - self.size -= - storages.iter().fold(0, |acc, (_, storage)| acc + storage.storage.len()); - false - } - }); - updated_times_wiped - } -} diff --git a/crates/storage/provider/src/providers/post_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs similarity index 59% rename from crates/storage/provider/src/providers/post_state_provider.rs rename to crates/storage/provider/src/providers/bundle_state_provider.rs index 517ff50af114..2fa536742c25 100644 --- a/crates/storage/provider/src/providers/post_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,30 +1,30 @@ use crate::{ - AccountReader, BlockHashReader, PostState, PostStateDataProvider, StateProvider, - StateRootProvider, + bundle_state::BundleStateWithReceipts, AccountReader, BlockHashReader, BundleStateDataProvider, + StateProvider, StateRootProvider, }; use reth_interfaces::{provider::ProviderError, Result}; -use reth_primitives::{Account, Address, BlockNumber, Bytecode, Bytes, H256, U256}; +use reth_primitives::{Account, Address, BlockNumber, Bytecode, Bytes, H256}; -/// A state provider that either resolves to data in a wrapped [`crate::PostState`], or an -/// underlying state provider. -pub struct PostStateProvider { +/// A state provider that either resolves to data in a wrapped [`crate::BundleStateWithReceipts`], +/// or an underlying state provider. +pub struct BundleStateProvider { /// The inner state provider. pub(crate) state_provider: SP, /// Post state data, - pub(crate) post_state_data_provider: PSDP, + pub(crate) post_state_data_provider: BSDP, } -impl PostStateProvider { +impl BundleStateProvider { /// Create new post-state provider - pub fn new(state_provider: SP, post_state_data_provider: PSDP) -> Self { + pub fn new(state_provider: SP, post_state_data_provider: BSDP) -> Self { Self { state_provider, post_state_data_provider } } } /* Implement StateProvider traits */ -impl BlockHashReader - for PostStateProvider +impl BlockHashReader + for BundleStateProvider { fn block_hash(&self, block_number: BlockNumber) -> Result> { let block_hash = self.post_state_data_provider.block_hash(block_number); @@ -39,48 +39,48 @@ impl BlockHashReader } } -impl AccountReader for PostStateProvider { +impl AccountReader + for BundleStateProvider +{ fn basic_account(&self, address: Address) -> Result> { if let Some(account) = self.post_state_data_provider.state().account(&address) { - Ok(*account) + Ok(account) } else { self.state_provider.basic_account(address) } } } -impl StateRootProvider - for PostStateProvider +impl StateRootProvider + for BundleStateProvider { - fn state_root(&self, post_state: PostState) -> Result { + fn state_root(&self, post_state: BundleStateWithReceipts) -> Result { let mut state = self.post_state_data_provider.state().clone(); state.extend(post_state); self.state_provider.state_root(state) } } -impl StateProvider for PostStateProvider { +impl StateProvider + for BundleStateProvider +{ fn storage( &self, account: Address, storage_key: reth_primitives::StorageKey, ) -> Result> { - if let Some(storage) = self.post_state_data_provider.state().account_storage(&account) { - if let Some(value) = - storage.storage.get(&U256::from_be_bytes(storage_key.to_fixed_bytes())) - { - return Ok(Some(*value)) - } else if storage.wiped() { - return Ok(Some(U256::ZERO)) - } + let u256_storage_key = storage_key.into(); + if let Some(value) = + self.post_state_data_provider.state().storage(&account, u256_storage_key) + { + return Ok(Some(value)) } self.state_provider.storage(account, storage_key) } fn bytecode_by_hash(&self, code_hash: H256) -> Result> { - if let Some(bytecode) = self.post_state_data_provider.state().bytecode(&code_hash).cloned() - { + if let Some(bytecode) = self.post_state_data_provider.state().bytecode(&code_hash) { return Ok(Some(bytecode)) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a599ee47f6ff..fb1632cee5a1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,12 +1,12 @@ use crate::{ - post_state::StorageChangeset, + bundle_state::{BundleStateInit, BundleStateWithReceipts, RevertsInit}, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - EvmEnvProvider, HashingWriter, HeaderProvider, HistoryWriter, PostState, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, StorageReader, - TransactionsProvider, WithdrawalsProvider, + Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, + ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, + StorageReader, TransactionsProvider, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ @@ -43,7 +43,7 @@ use reth_revm_primitives::{ }; use reth_trie::{prefix_set::PrefixSetMut, StateRoot}; use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::Arc, @@ -197,8 +197,12 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { // TODO(joshie) TEMPORARY should be moved to trait providers - /// Traverse over changesets and plain state and recreate the [`PostState`]s for the given range - /// of blocks. + /// Unwind or peek at last N blocks of state recreating the [`BundleStateWithReceipts`]. + /// + /// If UNWIND it set to true tip and latest state will be unwind + /// and returned back with all the blocks + /// + /// If UNWIND is false we will just read the state/blocks and return them. /// /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all /// the transaction ids. @@ -217,16 +221,14 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - /// - /// If `TAKE` is `true`, the local state will be written to the plain state tables. - /// 5. Get all receipts from table - fn get_take_block_execution_result_range( + fn unwind_or_peek_state( &self, range: RangeInclusive, - ) -> Result> { + ) -> Result { if range.is_empty() { - return Ok(Vec::new()) + return Ok(BundleStateWithReceipts::default()) } + let start_block_number = *range.start(); // We are not removing block meta as it is used to get block changesets. let block_bodies = self.get_or_take::(range.clone())?; @@ -236,146 +238,139 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); let to_transaction_num = block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); - let receipts = - self.get_or_take::(from_transaction_num..=to_transaction_num)?; let storage_range = BlockNumberAddress::range(range.clone()); let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and // account is removed (Second Option) - type LocalPlainState = BTreeMap>, BTreeMap)>; - let mut local_plain_state: LocalPlainState = BTreeMap::new(); - - // iterate in reverse and get plain state. - - // Bundle execution changeset to its particular transaction and block - let mut block_states = - BTreeMap::from_iter(block_bodies.iter().map(|(num, _)| (*num, PostState::default()))); + let mut state: BundleStateInit = HashMap::new(); + // This is not working for blocks that are not at tip. as plain state is not the last + // state of end range. We should rename the functions or add support to access + // History state. Accessing history state can be tricky but we are not gaining + // anything. let mut plain_accounts_cursor = self.tx.cursor_write::()?; let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; + let mut reverts: RevertsInit = HashMap::new(); + // add account changeset changes for (block_number, account_before) in account_changeset.into_iter().rev() { let AccountBeforeTx { info: old_info, address } = account_before; - let new_info = match local_plain_state.entry(address) { - Entry::Vacant(entry) => { - let new_account = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((Some(old_info), BTreeMap::new())); - new_account + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); + entry.insert((old_info, new_info, HashMap::new())); } - Entry::Occupied(mut entry) => { - let new_account = std::mem::replace(&mut entry.get_mut().0, Some(old_info)); - new_account.expect("As we are stacking account first, account would always be Some(Some) or Some(None)") + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; } - }; + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } - let post_state = block_states.entry(block_number).or_default(); - match (old_info, new_info) { - (Some(old), Some(new)) => { - if new != old { - post_state.change_account(block_number, address, old, new); - } else { - unreachable!("Junk data in database: an account changeset did not represent any change"); - } + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); + entry.insert((present_info, present_info, HashMap::new())) } - (None, Some(account)) => post_state.create_account(block_number, address, account), - (Some(old), None) => - post_state.destroy_account(block_number, address, old), - (None, None) => unreachable!("Junk data in database: an account changeset transitioned from no account to no account"), + hash_map::Entry::Occupied(entry) => entry.into_mut(), }; - } - // add storage changeset changes - let mut storage_changes: BTreeMap = BTreeMap::new(); - for (block_and_address, storage_entry) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((_, address)) = block_and_address; - let new_storage = - match local_plain_state.entry(address).or_default().1.entry(storage_entry.key) { - Entry::Vacant(entry) => { - let new_storage = plain_storage_cursor - .seek_by_key_subkey(address, storage_entry.key)? - .filter(|storage| storage.key == storage_entry.key) - .unwrap_or_default(); - entry.insert(storage_entry.value); - new_storage.value - } - Entry::Occupied(mut entry) => { - std::mem::replace(entry.get_mut(), storage_entry.value) - } - }; - storage_changes.entry(block_and_address).or_default().insert( - U256::from_be_bytes(storage_entry.key.0), - (storage_entry.value, new_storage), - ); - } + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage = plain_storage_cursor + .seek_by_key_subkey(address, old_storage.key)? + .filter(|storage| storage.key == old_storage.key) + .unwrap_or_default(); + entry.insert((old_storage.value, new_storage.value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; - for (BlockNumberAddress((block_number, address)), storage_changeset) in - storage_changes.into_iter() - { - block_states.entry(block_number).or_default().change_storage( - block_number, - address, - storage_changeset, - ); + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); } - if TAKE { + if UNWIND { // iterate over local plain state remove all account and all storages. - for (address, (account, storage)) in local_plain_state.into_iter() { - // revert account - if let Some(account) = account { - let existing_entry = plain_accounts_cursor.seek_exact(address)?; - if let Some(account) = account { - plain_accounts_cursor.upsert(address, account)?; + for (address, (old_account, new_account, storage)) in state.iter() { + // revert account if needed. + if old_account != new_account { + let existing_entry = plain_accounts_cursor.seek_exact(*address)?; + if let Some(account) = old_account { + plain_accounts_cursor.upsert(*address, *account)?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; } } // revert storages - for (storage_key, storage_value) in storage.into_iter() { - let storage_entry = StorageEntry { key: storage_key, value: storage_value }; + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + let storage_entry = + StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value // TODO: This does not use dupsort features if plain_storage_cursor - .seek_by_key_subkey(address, storage_key)? - .filter(|s| s.key == storage_key) + .seek_by_key_subkey(*address, *storage_key)? + .filter(|s| s.key == *storage_key) .is_some() { plain_storage_cursor.delete_current()? } - // TODO: This does not use dupsort features // insert value if needed - if storage_value != U256::ZERO { - plain_storage_cursor.upsert(address, storage_entry)?; + if *old_storage_value != U256::ZERO { + plain_storage_cursor.upsert(*address, storage_entry)?; } } } } // iterate over block body and create ExecutionResult - let mut receipt_iter = receipts.into_iter(); + let mut receipt_iter = self + .get_or_take::(from_transaction_num..=to_transaction_num)? + .into_iter(); + let mut receipts = Vec::new(); // loop break if we are at the end of the blocks. - for (block_number, block_body) in block_bodies.into_iter() { + for (_, block_body) in block_bodies.into_iter() { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); for _ in block_body.tx_num_range() { if let Some((_, receipt)) = receipt_iter.next() { - block_states - .entry(block_number) - .or_default() - .add_receipt(block_number, receipt); + block_receipts.push(Some(receipt)); } } + receipts.push(block_receipts); } - Ok(block_states.into_values().collect()) + + Ok(BundleStateWithReceipts::new_init( + state, + reverts, + Vec::new(), + receipts, + start_block_number, + )) } /// Return list of entries from table @@ -1826,11 +1821,12 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> HistoryWriter for DatabaseProvider } impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseProvider<'this, TX> { + /// Return range of blocks and its execution result fn get_or_take_block_and_execution_range( &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> Result> { + ) -> Result { if TAKE { let storage_range = BlockNumberAddress::range(range.clone()); @@ -1905,9 +1901,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP let blocks = self.get_take_block_range::(chain_spec, range.clone())?; let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); // get execution res - let execution_res = self.get_take_block_execution_result_range::(range.clone())?; - // combine them - let blocks_with_exec_result: Vec<_> = blocks.into_iter().zip(execution_res).collect(); + let execution_state = self.unwind_or_peek_state::(range.clone())?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. @@ -1921,8 +1915,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockExecutionWriter for DatabaseP } } - // return them - Ok(blocks_with_exec_result) + Ok(Chain::new(blocks, execution_state)) } } @@ -2021,10 +2014,10 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' Ok(block_indices) } - fn append_blocks_with_post_state( + fn append_blocks_with_bundle_state( &self, blocks: Vec, - state: PostState, + state: BundleStateWithReceipts, prune_modes: Option<&PruneModes>, ) -> Result<()> { if blocks.is_empty() { @@ -2048,7 +2041,7 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> BlockWriter for DatabaseProvider<' // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - state.write_to_db(self.tx_ref(), new_tip_number)?; + state.write_to_db(self.tx_ref(), OriginalValuesKnown::No)?; self.insert_hashes(first_number..=last_block_number, last_block_hash, expected_state_root)?; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 812606a5d397..9b46aa2d6e96 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,8 +1,8 @@ use crate::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, - PostStateDataProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonChainTracker, + CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, TransactionsProvider, WithdrawalsProvider, }; @@ -32,13 +32,13 @@ use std::{ }; use tracing::trace; +mod bundle_state_provider; mod chain_info; mod database; -mod post_state_provider; mod state; use crate::{providers::chain_info::ChainInfoTracker, traits::BlockSource}; +pub use bundle_state_provider::BundleStateProvider; pub use database::*; -pub use post_state_provider::PostStateProvider; use reth_db::models::AccountBeforeTx; use reth_interfaces::blockchain_tree::{ error::InsertBlockError, CanonicalOutcome, InsertPayloadOk, @@ -515,13 +515,13 @@ where fn pending_with_provider( &self, - post_state_data: Box, + post_state_data: Box, ) -> Result> { let canonical_fork = post_state_data.canonical_fork(); trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); let state_provider = self.history_by_block_hash(canonical_fork.hash)?; - let post_state_provider = PostStateProvider::new(state_provider, post_state_data); + let post_state_provider = BundleStateProvider::new(state_provider, post_state_data); Ok(Box::new(post_state_provider)) } } @@ -754,7 +754,7 @@ where fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option> { + ) -> Option> { self.tree.find_pending_state_provider(block_hash) } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 01c9ea8b61fb..dc53d15f80f8 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, PostState, - ProviderError, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + BundleStateWithReceipts, ProviderError, StateProvider, StateRootProvider, }; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -201,7 +201,7 @@ impl<'a, 'b, TX: DbTx<'a>> BlockHashReader for HistoricalStateProviderRef<'a, 'b } impl<'a, 'b, TX: DbTx<'a>> StateRootProvider for HistoricalStateProviderRef<'a, 'b, TX> { - fn state_root(&self, _post_state: PostState) -> Result { + fn state_root(&self, _post_state: BundleStateWithReceipts) -> Result { Err(ProviderError::StateRootNotAvailableForHistoricalBlock.into()) } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 77a8692b51d7..7e4cfeea8b1d 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,6 +1,6 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, PostState, - StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + BundleStateWithReceipts, StateProvider, StateRootProvider, }; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -56,8 +56,8 @@ impl<'a, 'b, TX: DbTx<'a>> BlockHashReader for LatestStateProviderRef<'a, 'b, TX } impl<'a, 'b, TX: DbTx<'a>> StateRootProvider for LatestStateProviderRef<'a, 'b, TX> { - fn state_root(&self, post_state: PostState) -> Result { - post_state + fn state_root(&self, bundle_state: BundleStateWithReceipts) -> Result { + bundle_state .state_root_slow(self.db) .map_err(|err| reth_interfaces::Error::Database(err.into())) } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 8c8c5f709d58..301719e31be7 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -31,7 +31,7 @@ macro_rules! delegate_provider_impls { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: crate::PostState) -> reth_interfaces::Result; + fn state_root(&self, state: crate::BundleStateWithReceipts) -> reth_interfaces::Result; } AccountReader $(where [$($generics)*])? { fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::Result>; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 7df40d45e00c..55982b010870 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,13 +1,13 @@ //! Dummy blocks and data for tests -use crate::{post_state::PostState, DatabaseProviderRW}; +use crate::{BundleStateWithReceipts, DatabaseProviderRW}; use reth_db::{database::Database, models::StoredBlockBodyIndices, tables}; use reth_primitives::{ hex_literal::hex, Account, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, - SealedBlockWithSenders, TxType, Withdrawal, H160, H256, U256, + SealedBlockWithSenders, StorageEntry, TxType, Withdrawal, H160, H256, U256, }; use reth_rlp::Decodable; -use std::collections::BTreeMap; +use std::collections::HashMap; /// Assert genesis block pub fn assert_genesis_block(provider: &DatabaseProviderRW<'_, DB>, g: SealedBlock) { @@ -53,7 +53,7 @@ pub struct BlockChainTestData { /// Genesis pub genesis: SealedBlock, /// Blocks with its execution result - pub blocks: Vec<(SealedBlockWithSenders, PostState)>, + pub blocks: Vec<(SealedBlockWithSenders, BundleStateWithReceipts)>, } impl BlockChainTestData { @@ -85,7 +85,7 @@ pub fn genesis() -> SealedBlock { } /// Block one that points to genesis -fn block1(number: BlockNumber) -> (SealedBlockWithSenders, PostState) { +fn block1(number: BlockNumber) -> (SealedBlockWithSenders, BundleStateWithReceipts) { let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let mut block = SealedBlock::decode(&mut block_rlp).unwrap(); block.withdrawals = Some(vec![Withdrawal::default()]); @@ -96,27 +96,39 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, PostState) { header.parent_hash = H256::zero(); block.header = header.seal_slow(); - let mut post_state = PostState::default(); - // Transaction changes - post_state.create_account( - number, - H160([0x61; 20]), - Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }, - ); - post_state.create_account( - number, - H160([0x60; 20]), - Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }, - ); - post_state.change_storage( - number, - H160([0x60; 20]), - BTreeMap::from([(U256::from(5), (U256::ZERO, U256::from(10)))]), - ); + // block changes + let account1: H160 = [0x60; 20].into(); + let account2: H160 = [0x61; 20].into(); + let slot: H256 = H256::from_low_u64_be(5); - post_state.add_receipt( - number, - Receipt { + let bundle = BundleStateWithReceipts::new_init( + HashMap::from([ + ( + account1, + ( + None, + Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + HashMap::from([(slot, (U256::from(0), U256::from(10)))]), + ), + ), + ( + account2, + ( + None, + Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + HashMap::from([]), + ), + ), + ]), + HashMap::from([( + number, + HashMap::from([ + (account1, (Some(None), vec![StorageEntry::new(slot, U256::from(0))])), + (account2, (Some(None), vec![])), + ]), + )]), + vec![], + vec![vec![Some(Receipt { tx_type: TxType::EIP2930, success: true, cumulative_gas_used: 300, @@ -125,14 +137,18 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, PostState) { topics: vec![H256::from_low_u64_be(1), H256::from_low_u64_be(2)], data: Bytes::default(), }], - }, + })]], + number, ); - (SealedBlockWithSenders { block, senders: vec![H160([0x30; 20])] }, post_state) + (SealedBlockWithSenders { block, senders: vec![H160([0x30; 20])] }, bundle) } /// Block two that points to block 1 -fn block2(number: BlockNumber, parent_hash: H256) -> (SealedBlockWithSenders, PostState) { +fn block2( + number: BlockNumber, + parent_hash: H256, +) -> (SealedBlockWithSenders, BundleStateWithReceipts) { let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let mut block = SealedBlock::decode(&mut block_rlp).unwrap(); block.withdrawals = Some(vec![Withdrawal::default()]); @@ -144,22 +160,31 @@ fn block2(number: BlockNumber, parent_hash: H256) -> (SealedBlockWithSenders, Po header.parent_hash = parent_hash; block.header = header.seal_slow(); - let mut post_state = PostState::default(); // block changes - post_state.change_account( - number, - H160([0x60; 20]), - Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }, - Account { nonce: 3, balance: U256::from(20), bytecode_hash: None }, - ); - post_state.change_storage( - number, - H160([0x60; 20]), - BTreeMap::from([(U256::from(5), (U256::from(10), U256::from(15)))]), - ); - post_state.add_receipt( - number, - Receipt { + let account: H160 = [0x60; 20].into(); + let slot: H256 = H256::from_low_u64_be(5); + + let bundle = BundleStateWithReceipts::new_init( + HashMap::from([( + account, + ( + None, + Some(Account { nonce: 3, balance: U256::from(20), bytecode_hash: None }), + HashMap::from([(slot, (U256::from(0), U256::from(15)))]), + ), + )]), + HashMap::from([( + number, + HashMap::from([( + account, + ( + Some(Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None })), + vec![StorageEntry::new(slot, U256::from(10))], + ), + )]), + )]), + vec![], + vec![vec![Some(Receipt { tx_type: TxType::EIP1559, success: false, cumulative_gas_used: 400, @@ -168,8 +193,8 @@ fn block2(number: BlockNumber, parent_hash: H256) -> (SealedBlockWithSenders, Po topics: vec![H256::from_low_u64_be(3), H256::from_low_u64_be(4)], data: Bytes::default(), }], - }, + })]], + number, ); - - (SealedBlockWithSenders { block, senders: vec![H160([0x31; 20])] }, post_state) + (SealedBlockWithSenders { block, senders: vec![H160([0x31; 20])] }, bundle) } diff --git a/crates/storage/provider/src/test_utils/executor.rs b/crates/storage/provider/src/test_utils/executor.rs index 58bca889e5a7..5ed366dfb3cb 100644 --- a/crates/storage/provider/src/test_utils/executor.rs +++ b/crates/storage/provider/src/test_utils/executor.rs @@ -1,19 +1,25 @@ -use crate::{post_state::PostState, BlockExecutor, ExecutorFactory, StateProvider}; +use crate::{ + bundle_state::BundleStateWithReceipts, BlockExecutor, BlockExecutorStats, ExecutorFactory, + PrunableBlockExecutor, StateProvider, +}; use parking_lot::Mutex; use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{Address, Block, ChainSpec, U256}; +use reth_primitives::{Address, Block, BlockNumber, ChainSpec, PruneModes, U256}; use std::sync::Arc; /// Test executor with mocked result. -pub struct TestExecutor(pub Option); +pub struct TestExecutor(pub Option); -impl BlockExecutor for TestExecutor { +impl BlockExecutor for TestExecutor { fn execute( &mut self, _block: &Block, _total_difficulty: U256, _senders: Option>, - ) -> Result { - self.0.clone().ok_or(BlockExecutionError::UnavailableForTest) + ) -> Result<(), BlockExecutionError> { + if self.0.is_none() { + return Err(BlockExecutionError::UnavailableForTest) + } + Ok(()) } fn execute_and_verify_receipt( @@ -21,15 +27,36 @@ impl BlockExecutor for TestExecutor { _block: &Block, _total_difficulty: U256, _senders: Option>, - ) -> Result { - self.0.clone().ok_or(BlockExecutionError::UnavailableForTest) + ) -> Result<(), BlockExecutionError> { + if self.0.is_none() { + return Err(BlockExecutionError::UnavailableForTest) + } + Ok(()) + } + + fn take_output_state(&mut self) -> BundleStateWithReceipts { + self.0.clone().unwrap_or_default() + } + + fn stats(&self) -> BlockExecutorStats { + BlockExecutorStats::default() + } + + fn size_hint(&self) -> Option { + None } } +impl PrunableBlockExecutor for TestExecutor { + fn set_tip(&mut self, _tip: BlockNumber) {} + + fn set_prune_modes(&mut self, _prune_modes: PruneModes) {} +} + /// Executor factory with pre-set execution results. #[derive(Clone, Debug)] pub struct TestExecutorFactory { - exec_results: Arc>>, + exec_results: Arc>>, chain_spec: Arc, } @@ -40,17 +67,18 @@ impl TestExecutorFactory { } /// Extend the mocked execution results - pub fn extend(&self, results: Vec) { + pub fn extend(&self, results: Vec) { self.exec_results.lock().extend(results); } } impl ExecutorFactory for TestExecutorFactory { - type Executor = TestExecutor; - - fn with_sp(&self, _sp: SP) -> Self::Executor { + fn with_state<'a, SP: StateProvider + 'a>( + &'a self, + _sp: SP, + ) -> Box { let exec_res = self.exec_results.lock().pop(); - TestExecutor(exec_res) + Box::new(TestExecutor(exec_res)) } fn chain_spec(&self) -> &ChainSpec { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index ccb39f6c97b9..11e690715601 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,7 +1,8 @@ use crate::{ + bundle_state::BundleStateWithReceipts, traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, EvmEnvProvider, HeaderProvider, PostState, PostStateDataProvider, + BundleStateDataProvider, ChainSpecProvider, EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, WithdrawalsProvider, }; @@ -399,7 +400,7 @@ impl AccountReader for MockEthProvider { } impl StateRootProvider for MockEthProvider { - fn state_root(&self, _post_state: PostState) -> Result { + fn state_root(&self, _state: BundleStateWithReceipts) -> Result { todo!() } } @@ -498,7 +499,7 @@ impl StateProviderFactory for MockEthProvider { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _post_state_data: Box, ) -> Result> { todo!() } @@ -531,7 +532,7 @@ impl StateProviderFactory for Arc { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _post_state_data: Box, ) -> Result> { todo!() } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 593b338343f2..530f0cbbd7fa 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,10 +1,10 @@ use crate::{ + bundle_state::BundleStateWithReceipts, traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PostState, - PruneCheckpointReader, ReceiptProviderIdExt, StageCheckpointReader, StateProvider, - StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, - WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, + ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, TransactionsProvider, WithdrawalsProvider, }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_interfaces::Result; @@ -243,7 +243,7 @@ impl ChangeSetReader for NoopProvider { } impl StateRootProvider for NoopProvider { - fn state_root(&self, _post_state: PostState) -> Result { + fn state_root(&self, _state: BundleStateWithReceipts) -> Result { todo!() } } @@ -333,7 +333,7 @@ impl StateProviderFactory for NoopProvider { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _post_state_data: Box, ) -> Result> { Ok(Box::new(*self)) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index c780701619e8..a094bd2f81bb 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,5 +1,5 @@ use crate::{ - BlockIdReader, BlockNumReader, HeaderProvider, PostState, ReceiptProvider, + BlockIdReader, BlockNumReader, BundleStateWithReceipts, Chain, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionsProvider, WithdrawalsProvider, }; use auto_impl::auto_impl; @@ -210,7 +210,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> Result> { + ) -> Result { self.get_or_take_block_and_execution_range::(chain_spec, range) } @@ -219,7 +219,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> Result> { + ) -> Result { self.get_or_take_block_and_execution_range::(chain_spec, range) } @@ -228,7 +228,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> Result>; + ) -> Result; } /// Block Writer @@ -250,7 +250,7 @@ pub trait BlockWriter: Send + Sync { /// updates the post-state. /// /// Inserts the blocks into the database and updates the state with - /// provided `PostState`. + /// provided `BundleState`. /// /// # Parameters /// @@ -261,11 +261,10 @@ pub trait BlockWriter: Send + Sync { /// # Returns /// /// Returns `Ok(())` on success, or an error if any operation fails. - - fn append_blocks_with_post_state( + fn append_blocks_with_bundle_state( &self, blocks: Vec, - state: PostState, + state: BundleStateWithReceipts, prune_modes: Option<&PruneModes>, ) -> Result<()>; } diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/storage/provider/src/traits/chain.rs index 93f5a61a4b78..c87f17573377 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -59,8 +59,8 @@ impl Stream for CanonStateNotificationStream { } /// Chain action that is triggered when a new block is imported or old block is reverted. -/// and will return all [`crate::PostState`] and [`reth_primitives::SealedBlockWithSenders`] of both -/// reverted and committed blocks. +/// and will return all [`crate::BundleStateWithReceipts`] and +/// [`reth_primitives::SealedBlockWithSenders`] of both reverted and committed blocks. #[derive(Clone, Debug)] #[allow(missing_docs)] pub enum CanonStateNotification { diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs index 2ff648d43e87..e0c1805fa11a 100644 --- a/crates/storage/provider/src/traits/executor.rs +++ b/crates/storage/provider/src/traits/executor.rs @@ -1,25 +1,27 @@ //! Executor Factory -use crate::{post_state::PostState, StateProvider}; +use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{Address, Block, ChainSpec, U256}; +use reth_primitives::{Address, Block, BlockNumber, ChainSpec, PruneModes, U256}; +use std::time::Duration; +use tracing::info; /// Executor factory that would create the EVM with particular state provider. /// /// It can be used to mock executor. pub trait ExecutorFactory: Send + Sync + 'static { - /// The executor produced by the factory - type Executor: BlockExecutor; - /// Executor with [`StateProvider`] - fn with_sp(&self, sp: SP) -> Self::Executor; + fn with_state<'a, SP: StateProvider + 'a>( + &'a self, + _sp: SP, + ) -> Box; /// Return internal chainspec fn chain_spec(&self) -> &ChainSpec; } /// An executor capable of executing a block. -pub trait BlockExecutor { +pub trait BlockExecutor { /// Execute a block. /// /// The number of `senders` should be equal to the number of transactions in the block. @@ -33,13 +35,63 @@ pub trait BlockExecutor { block: &Block, total_difficulty: U256, senders: Option>, - ) -> Result; + ) -> Result<(), BlockExecutionError>; - /// Executes the block and checks receipts + /// Executes the block and checks receipts. fn execute_and_verify_receipt( &mut self, block: &Block, total_difficulty: U256, senders: Option>, - ) -> Result; + ) -> Result<(), BlockExecutionError>; + + /// Return bundle state. This is output of executed blocks. + fn take_output_state(&mut self) -> BundleStateWithReceipts; + + /// Internal statistics of execution. + fn stats(&self) -> BlockExecutorStats; + + /// Returns the size hint of current in-memory changes. + fn size_hint(&self) -> Option; +} + +/// A [BlockExecutor] capable of in-memory pruning of the data that will be written to the database. +pub trait PrunableBlockExecutor: BlockExecutor { + /// Set tip - highest known block number. + fn set_tip(&mut self, tip: BlockNumber); + + /// Set prune modes. + fn set_prune_modes(&mut self, prune_modes: PruneModes); +} + +/// Block execution statistics. Contains duration of each step of block execution. +#[derive(Clone, Debug, Default)] +pub struct BlockExecutorStats { + /// Execution duration. + pub execution_duration: Duration, + /// Time needed to apply output of revm execution to revm cached state. + pub apply_state_duration: Duration, + /// Time needed to apply post execution state changes. + pub apply_post_execution_state_changes_duration: Duration, + /// Time needed to merge transitions and create reverts. + /// It this time transitions are applies to revm bundle state. + pub merge_transitions_duration: Duration, + /// Time needed to caclulate receipt roots. + pub receipt_root_duration: Duration, + /// Time needed to recovere senders. + pub sender_recovery_duration: Duration, +} + +impl BlockExecutorStats { + /// Log duration to info level log. + pub fn log_info(&self) { + info!(target: "evm", + evm_transact = ?self.execution_duration, + apply_state = ?self.apply_state_duration, + apply_post_state = ?self.apply_post_execution_state_changes_duration, + merge_transitions = ?self.merge_transitions_duration, + receipt_root = ?self.receipt_root_duration, + sender_recovery = ?self.sender_recovery_duration, + "Execution time"); + } } diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 3c2e06d21142..474549577185 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -29,7 +29,7 @@ pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; mod state; pub use state::{ - BlockchainTreePendingStateProvider, PostStateDataProvider, StateProvider, StateProviderBox, + BlockchainTreePendingStateProvider, BundleStateDataProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, }; @@ -40,7 +40,7 @@ mod withdrawals; pub use withdrawals::WithdrawalsProvider; mod executor; -pub use executor::{BlockExecutor, ExecutorFactory}; +pub use executor::{BlockExecutor, BlockExecutorStats, ExecutorFactory, PrunableBlockExecutor}; mod chain; pub use chain::{ diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 66b66bb9eb72..bea4bc207f22 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,5 +1,5 @@ use super::AccountReader; -use crate::{post_state::PostState, BlockHashReader, BlockIdReader}; +use crate::{BlockHashReader, BlockIdReader, BundleStateWithReceipts}; use auto_impl::auto_impl; use reth_interfaces::{provider::ProviderError, Result}; use reth_primitives::{ @@ -177,7 +177,7 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// Used to inspect or execute transaction on the pending state. fn pending_with_provider( &self, - post_state_data: Box, + post_state_data: Box, ) -> Result>; } @@ -191,7 +191,7 @@ pub trait BlockchainTreePendingStateProvider: Send + Sync { fn pending_state_provider( &self, block_hash: BlockHash, - ) -> Result> { + ) -> Result> { Ok(self .find_pending_state_provider(block_hash) .ok_or(ProviderError::StateForHashNotFound(block_hash))?) @@ -201,20 +201,20 @@ pub trait BlockchainTreePendingStateProvider: Send + Sync { fn find_pending_state_provider( &self, block_hash: BlockHash, - ) -> Option>; + ) -> Option>; } /// Post state data needs for execution on it. /// This trait is used to create a state provider over pending state. /// /// Pending state contains: -/// * [`PostState`] contains all changed of accounts and storage of pending chain +/// * [`BundleStateWithReceipts`] contains all changed of accounts and storage of pending chain /// * block hashes of pending chain and canonical blocks. /// * canonical fork, the block on what pending chain was forked from. #[auto_impl[Box,&]] -pub trait PostStateDataProvider: Send + Sync { +pub trait BundleStateDataProvider: Send + Sync { /// Return post state - fn state(&self) -> &PostState; + fn state(&self) -> &BundleStateWithReceipts; /// Return block hash by block number of pending or canonical chain. fn block_hash(&self, block_number: BlockNumber) -> Option; /// return canonical fork, the block on what post state was forked from. @@ -226,7 +226,6 @@ pub trait PostStateDataProvider: Send + Sync { /// A type that can compute the state root of a given post state. #[auto_impl[Box,&, Arc]] pub trait StateRootProvider: Send + Sync { - /// Returns the state root of the PostState on top of the current state. - /// See [PostState::state_root_slow] for more info. - fn state_root(&self, post_state: PostState) -> Result; + /// Returns the state root of the BundleState on top of the current state. + fn state_root(&self, post_state: BundleStateWithReceipts) -> Result; } diff --git a/crates/storage/provider/src/transaction.rs b/crates/storage/provider/src/transaction.rs deleted file mode 100644 index 2ce2643c3b24..000000000000 --- a/crates/storage/provider/src/transaction.rs +++ /dev/null @@ -1,197 +0,0 @@ -use reth_interfaces::{db::DatabaseError as DbError, provider::ProviderError}; -use reth_primitives::{BlockHash, BlockNumber, H256}; -use reth_trie::StateRootError; -use std::fmt::Debug; - -#[cfg(test)] -mod test { - use crate::{test_utils::blocks::*, ProviderFactory, TransactionsProvider}; - use reth_db::{ - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, - tables, - test_utils::create_test_rw_db, - }; - use reth_primitives::{ChainSpecBuilder, IntegerList, H160, MAINNET, U256}; - use std::sync::Arc; - - #[test] - fn insert_block_and_hashes_get_take() { - let db = create_test_rw_db(); - - // setup - let chain_spec = ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(); - - let factory = ProviderFactory::new(db.as_ref(), Arc::new(chain_spec.clone())); - let provider = factory.provider_rw().unwrap(); - - let data = BlockChainTestData::default(); - let genesis = data.genesis.clone(); - let (block1, exec_res1) = data.blocks[0].clone(); - let (block2, exec_res2) = data.blocks[1].clone(); - - let acc1_shard_key = ShardedKey::new(H160([0x60; 20]), u64::MAX); - let acc2_shard_key = ShardedKey::new(H160([0x61; 20]), u64::MAX); - let storage1_shard_key = - StorageShardedKey::new(H160([0x60; 20]), U256::from(5).into(), u64::MAX); - - provider.insert_block(data.genesis.clone(), None).unwrap(); - - assert_genesis_block(&provider, data.genesis); - - provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); - - assert_eq!( - provider.table::().unwrap(), - vec![ - (acc1_shard_key.clone(), IntegerList::new(vec![1]).unwrap()), - (acc2_shard_key.clone(), IntegerList::new(vec![1]).unwrap()) - ] - ); - assert_eq!( - provider.table::().unwrap(), - vec![(storage1_shard_key.clone(), IntegerList::new(vec![1]).unwrap())] - ); - - // get one block - let get = provider.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); - let get_block = get[0].0.clone(); - let get_state = get[0].1.clone(); - assert_eq!(get_block, block1); - assert_eq!(get_state, exec_res1); - - // take one block - let take = provider.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); - assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]); - assert_genesis_block(&provider, genesis.clone()); - - // check if history is empty. - assert_eq!(provider.table::().unwrap(), vec![]); - assert_eq!(provider.table::().unwrap(), vec![]); - - provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); - provider.append_blocks_with_post_state(vec![block2.clone()], exec_res2.clone()).unwrap(); - - // check history of two blocks - assert_eq!( - provider.table::().unwrap(), - vec![ - (acc1_shard_key, IntegerList::new(vec![1, 2]).unwrap()), - (acc2_shard_key, IntegerList::new(vec![1]).unwrap()) - ] - ); - assert_eq!( - provider.table::().unwrap(), - vec![(storage1_shard_key, IntegerList::new(vec![1, 2]).unwrap())] - ); - provider.commit().unwrap(); - - // Check that transactions map onto blocks correctly. - { - let provider = factory.provider_rw().unwrap(); - assert_eq!( - provider.transaction_block(0).unwrap(), - Some(1), - "Transaction 0 should be in block 1" - ); - assert_eq!( - provider.transaction_block(1).unwrap(), - Some(2), - "Transaction 1 should be in block 2" - ); - assert_eq!( - provider.transaction_block(2).unwrap(), - None, - "Transaction 0 should not exist" - ); - } - - let provider = factory.provider_rw().unwrap(); - // get second block - let get = provider.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); - assert_eq!(get, vec![(block2.clone(), exec_res2.clone())]); - - // get two blocks - let get = provider.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); - assert_eq!(get[0].0, block1); - assert_eq!(get[1].0, block2); - assert_eq!(get[0].1, exec_res1); - assert_eq!(get[1].1, exec_res2); - - // take two blocks - let get = provider.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); - assert_eq!(get, vec![(block1, exec_res1), (block2, exec_res2)]); - - // assert genesis state - assert_genesis_block(&provider, genesis); - } - - #[test] - fn insert_get_take_multiblocks() { - let db = create_test_rw_db(); - - // setup - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(), - ); - - let factory = ProviderFactory::new(db.as_ref(), chain_spec.clone()); - let provider = factory.provider_rw().unwrap(); - - let data = BlockChainTestData::default(); - let genesis = data.genesis.clone(); - let (block1, exec_res1) = data.blocks[0].clone(); - let (block2, exec_res2) = data.blocks[1].clone(); - - provider.insert_block(data.genesis.clone(), None).unwrap(); - - assert_genesis_block(&provider, data.genesis); - - provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); - - // get one block - let get = provider.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); - assert_eq!(get, vec![(block1.clone(), exec_res1.clone())]); - - // take one block - let take = provider.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); - assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]); - assert_genesis_block(&provider, genesis.clone()); - - // insert two blocks - let mut merged_state = exec_res1.clone(); - merged_state.extend(exec_res2.clone()); - provider - .append_blocks_with_post_state( - vec![block1.clone(), block2.clone()], - merged_state.clone(), - ) - .unwrap(); - - // get second block - let get = provider.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); - assert_eq!(get, vec![(block2.clone(), exec_res2.clone())]); - - // get two blocks - let get = provider.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); - assert_eq!( - get, - vec![(block1.clone(), exec_res1.clone()), (block2.clone(), exec_res2.clone())] - ); - - // take two blocks - let get = provider.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); - assert_eq!(get, vec![(block1, exec_res1), (block2, exec_res2)]); - - // assert genesis state - assert_genesis_block(&provider, genesis); - } -} diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 257c389e69e5..16f3a9ebc7ff 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -14,7 +14,8 @@ use reth_primitives::{ Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredTransaction, }; use reth_provider::{ - BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, PostState, StateProviderFactory, + BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotification, ChainSpecProvider, + StateProviderFactory, }; use reth_tasks::TaskSpawner; use std::{ @@ -247,9 +248,8 @@ pub async fn maintain_transaction_pool( // find all accounts that were changed in the old chain but _not_ in the new chain let missing_changed_acc = old_state - .accounts() - .keys() - .copied() + .accounts_iter() + .map(|(a, _)| a) .filter(|addr| !new_changed_accounts.contains(addr)); // for these we need to fetch the nonce+balance from the db at the new tip @@ -353,7 +353,7 @@ pub async fn maintain_transaction_pool( continue } - let mut changed_accounts = Vec::with_capacity(state.accounts().len()); + let mut changed_accounts = Vec::with_capacity(state.state().len()); for acc in changed_accounts_iter(state) { // we can always clear the dirty flag for this account dirty_addresses.remove(&acc.address); @@ -497,15 +497,14 @@ where Ok(res) } -/// Extracts all changed accounts from the PostState -fn changed_accounts_iter(state: &PostState) -> impl Iterator + '_ { - state.accounts().iter().filter_map(|(addr, acc)| acc.map(|acc| (addr, acc))).map( - |(address, acc)| ChangedAccount { - address: *address, - nonce: acc.nonce, - balance: acc.balance, - }, - ) +/// Extracts all changed accounts from the BundleState +fn changed_accounts_iter( + state: &BundleStateWithReceipts, +) -> impl Iterator + '_ { + state + .accounts_iter() + .filter_map(|(addr, acc)| acc.map(|acc| (addr, acc))) + .map(|(address, acc)| ChangedAccount { address, nonce: acc.nonce, balance: acc.balance }) } #[cfg(test)] diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index 05c8fd4c6a57..055ecd272287 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -38,7 +38,7 @@ impl TxState { /// - enough fee cap #[inline] pub(crate) fn is_pending(&self) -> bool { - *self >= TxState::PENDING_POOL_BITS + self.bits() >= TxState::PENDING_POOL_BITS.bits() } /// Returns `true` if the transaction has a nonce gap. @@ -95,7 +95,7 @@ impl From for SubPool { if value.is_pending() { return SubPool::Pending } - if value < TxState::BASE_FEE_POOL_BITS { + if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { return SubPool::Queued } SubPool::BaseFee From ae0d5241f217b7ec6d22f498410cbaaec80f136a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 16 Sep 2023 17:56:07 +0200 Subject: [PATCH 687/722] chore(deps): bump jsonrpsee 0.20.1 (#4624) --- Cargo.lock | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 352b0ebc2b13..1f4ff43b5a72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2758,9 +2758,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-net" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66b4e3c7d9ed8d315fd6b97c8b1f74a7c6ecbbc2320e65ae7ed38b7068cc620" +checksum = "8ac9e8288ae2c632fa9f8657ac70bfe38a1530f345282d7ba66a1f70b72b7dc4" dependencies = [ "futures-channel", "futures-core", @@ -2791,9 +2791,9 @@ dependencies = [ [[package]] name = "gloo-utils" -version = "0.1.7" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" +checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" dependencies = [ "js-sys", "serde", @@ -3522,9 +3522,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8002beb64691edce321fc16cdba91916b10d798f9d480a05467b0ee98463c03b" +checksum = "9ad9b31183a8bcbe843e32ca8554ad2936633548d95a7bb6a8e14c767dea6b05" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3534,14 +3534,15 @@ dependencies = [ "jsonrpsee-types", "jsonrpsee-wasm-client", "jsonrpsee-ws-client", + "tokio", "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310f9566a32ec8db214805127c4f17e7e8e91015e4a1407fc1d0e84df0086a73" +checksum = "97f2743cad51cc86b0dbfe316309eeb87a9d96a3d7f4dd7a99767c4b5f065335" dependencies = [ "futures-channel", "futures-util", @@ -3562,9 +3563,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4278372ecb78ebb522c36a242209a29162f4af0997a41158c8b60450b081baf1" +checksum = "35dc957af59ce98373bcdde0c1698060ca6c2d2e9ae357b459c7158b6df33330" dependencies = [ "anyhow", "async-lock", @@ -3588,9 +3589,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2393386c97ce214851a9677568c5a38223ae4eada833617cb16d8464d1128f1b" +checksum = "0dd865d0072764cb937b0110a92b5f53e995f7101cb346beca03d93a2dea79de" dependencies = [ "async-trait", "hyper", @@ -3608,9 +3609,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985d4a3753a08aaf120429924567795b2764c5c691489316a7fd076178e708b4" +checksum = "cef91b1017a4edb63f65239381c18de39f88d0e0760ab626d806e196f7f51477" dependencies = [ "heck", "proc-macro-crate", @@ -3621,9 +3622,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc6357836b1d7b1367fe6d9a9b8d6e5488d1f1db985dfca4cb4ceaa9f37679e" +checksum = "24f4e2f3d223d810e363fb8b5616ec4c6254243ee7f452d05ac281cdc9cf76b2" dependencies = [ "futures-util", "http", @@ -3644,9 +3645,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbea61f2d95b9592491228db0c4d2b1e43ea1154ed9713bb666169cf3919ea7d" +checksum = "fa9e25aec855b2a7d3ed90fded6c41e8c3fb72b63f071e1be3f0004eba19b625" dependencies = [ "anyhow", "beef", @@ -3658,9 +3659,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "051742038473f3aaada8fc1eb19c76a5354e37e886999d60061f1f303cfc45e8" +checksum = "010306151579898dc1000bab239ef7a73a73f04cb8ef267ee28b9a000267e813" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3669,9 +3670,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9590173f77867bc96b5127e4a862e2edcb7f603c83616e9302d68aab983bc023" +checksum = "d88e35e9dfa89248ae3e92f689c1f0a190ce12d377eba7d2d08e5a7f6cc5694a" dependencies = [ "http", "jsonrpsee-client-transport", From f7b28e5925d0f3b660c6367a837985c604e654a0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 16 Sep 2023 15:53:10 -0400 Subject: [PATCH 688/722] fix: apply FCU on invalid PayloadAttributes (#4591) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 68 +++++++++++++++++---- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index f76d883bc72e..b7aa7c6a6889 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -118,10 +118,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { - if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V1, &attrs.into())?; - } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + self.validate_and_execute_forkchoice(EngineApiMessageVersion::V1, state, payload_attrs) + .await } /// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals, @@ -133,10 +131,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { - if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V2, &attrs.into())?; - } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + self.validate_and_execute_forkchoice(EngineApiMessageVersion::V2, state, payload_attrs) + .await } /// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals, @@ -148,11 +144,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { - if let Some(ref attrs) = payload_attrs { - self.validate_version_specific_fields(EngineApiMessageVersion::V3, &attrs.into())?; - } - - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + self.validate_and_execute_forkchoice(EngineApiMessageVersion::V3, state, payload_attrs) + .await } /// Returns the most recent version of the payload that is available in the corresponding @@ -534,6 +527,55 @@ where payload_or_attrs.parent_beacon_block_root().is_some(), ) } + + /// Validates the `engine_forkchoiceUpdated` payload attributes and executes the forkchoice + /// update. + /// + /// The payload attributes will be validated according to the engine API rules for the given + /// message version: + /// * If the version is [EngineApiMessageVersion::V1], then the payload attributes will be + /// validated according to the Paris rules. + /// * If the version is [EngineApiMessageVersion::V2], then the payload attributes will be + /// validated according to the Shanghai rules, as well as the validity changes from cancun: + /// + /// + /// * If the version is [EngineApiMessageVersion::V3], then the payload attributes will be + /// validated according to the Cancun rules. + async fn validate_and_execute_forkchoice( + &self, + version: EngineApiMessageVersion, + state: ForkchoiceState, + payload_attrs: Option, + ) -> EngineApiResult { + if let Some(ref attrs) = payload_attrs { + let attr_validation_res = self.validate_version_specific_fields(version, &attrs.into()); + + // From the engine API spec: + // + // Client software MUST ensure that payloadAttributes.timestamp is greater than + // timestamp of a block referenced by forkchoiceState.headBlockHash. If this condition + // isn't held client software MUST respond with -38003: Invalid payload attributes and + // MUST NOT begin a payload build process. In such an event, the forkchoiceState + // update MUST NOT be rolled back. + // + // NOTE: This will also apply to the validation result for the cancun or + // shanghai-specific fields provided in the payload attributes. + // + // To do this, we set the payload attrs to `None` if attribute validation failed, but + // we still apply the forkchoice update. + if let Err(err) = attr_validation_res { + let fcu_res = self.inner.beacon_consensus.fork_choice_updated(state, None).await?; + // TODO: decide if we want this branch - the FCU INVALID response might be more + // useful than the payload attributes INVALID response + if fcu_res.is_invalid() { + return Ok(fcu_res) + } + return Err(err) + } + } + + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + } } #[async_trait] From a55f48cf28b1646436315d52ecb244691f6e9b82 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 17 Sep 2023 18:52:59 +0200 Subject: [PATCH 689/722] chore(deps): weekly `cargo update` (#4627) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 288 ++++++++++++++------------ crates/payload/builder/src/payload.rs | 2 + crates/rpc/rpc/src/eth/error.rs | 27 +++ crates/rpc/rpc/src/eth/revm_utils.rs | 4 + 4 files changed, 186 insertions(+), 135 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f4ff43b5a72..0845433a00f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,9 +195,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" [[package]] name = "anstyle-parse" @@ -403,9 +403,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-compression" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d495b6dc0184693324491a5ac05f559acc97bf937ab31d7a1c33dd0016be6d2b" +checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" dependencies = [ "brotli", "flate2", @@ -434,7 +434,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -537,9 +537,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -619,7 +619,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.36", "which", ] @@ -640,7 +640,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -828,7 +828,7 @@ checksum = "ca3de43b7806061fccfba716fef51eea462d636de36803b62d10f902608ffef4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", "synstructure 0.13.0", ] @@ -861,9 +861,9 @@ checksum = "a24f6aa1ecc56e797506437b1f9a172e4a5f207894e74196c682cb656d2c2d60" [[package]] name = "boyer-moore-magiclen" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c77eb6b3a37f71fcd40e49b56c028ea8795c0e550afd8021e3e6a2369653035" +checksum = "116d76fee857b03ecdd95d5f9555e46aa0cd34e5bb348a520e9445d151182a7e" dependencies = [ "debug-helper", ] @@ -912,9 +912,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byte-slice-cast" @@ -924,9 +924,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "byteorder" @@ -957,6 +957,19 @@ dependencies = [ "serde", ] +[[package]] +name = "c-kzg" +version = "0.1.0" +source = "git+https://github.com/ethereum/c-kzg-4844#fbef59a3f9e8fa998bdb5069d212daf83d586aa5" +dependencies = [ + "bindgen 0.66.1", + "blst", + "cc", + "glob", + "hex", + "libc", +] + [[package]] name = "camino" version = "1.1.6" @@ -1042,9 +1055,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.28" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1112,9 +1125,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.2" +version = "4.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" +checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" dependencies = [ "clap_builder", "clap_derive", @@ -1141,7 +1154,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1165,7 +1178,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1206,7 +1219,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bech32", "bs58", "digest 0.10.7", @@ -1510,9 +1523,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1550,9 +1563,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" +checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" dependencies = [ "cfg-if", "cpufeatures", @@ -1573,7 +1586,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1621,7 +1634,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1643,7 +1656,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1728,7 +1741,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1902,7 +1915,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -1975,9 +1988,9 @@ dependencies = [ [[package]] name = "educe" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" dependencies = [ "enum-ordinalize", "proc-macro2", @@ -2055,7 +2068,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "ed25519-dalek", "hex", @@ -2091,7 +2104,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -2104,7 +2117,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -2115,7 +2128,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -2262,8 +2275,8 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.29", - "toml 0.7.6", + "syn 2.0.36", + "toml 0.7.8", "walkdir", ] @@ -2280,7 +2293,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -2306,7 +2319,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.29", + "syn 2.0.36", "tempfile", "thiserror", "tiny-keccak", @@ -2363,7 +2376,7 @@ checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.3", + "base64 0.21.4", "bytes", "const-hex", "enr", @@ -2509,9 +2522,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] name = "findshlibs" @@ -2649,7 +2662,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -3040,9 +3053,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human_bytes" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27e2b089f28ad15597b48d8c0a8fe94eeb1c1cb26ca99b6f66ac9582ae10c5e6" +checksum = "91f255a4535024abf7640cb288260811fc14794f62b063652ed349f9a6c2348e" [[package]] name = "humantime" @@ -3362,9 +3375,9 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.16" +version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c0fefcb6d409a6587c07515951495d482006f89a21daa0f2f783aa4fd5e027" +checksum = "c50453ec3a6555fad17b1cd1a80d16af5bc7cb35094f64e429fd46549018c6a3" dependencies = [ "ahash 0.8.3", "indexmap 2.0.0", @@ -3414,7 +3427,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.4", "widestring", "windows-sys 0.48.0", "winreg", @@ -3443,7 +3456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.11", + "rustix 0.38.13", "windows-sys 0.48.0", ] @@ -3687,7 +3700,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "pem", "ring", "serde", @@ -3735,9 +3748,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" @@ -3798,9 +3811,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "litemap" @@ -3928,7 +3941,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "hyper", "indexmap 1.9.3", "ipnet", @@ -3948,7 +3961,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4259,7 +4272,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4271,7 +4284,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4285,9 +4298,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] @@ -4562,7 +4575,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4591,7 +4604,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4770,12 +4783,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -4828,9 +4841,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -5156,7 +5169,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -5261,7 +5274,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.7.6", + "toml 0.7.8", "tracing", "tui", "vergen", @@ -5384,7 +5397,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "toml 0.7.6", + "toml 0.7.8", ] [[package]] @@ -5676,7 +5689,7 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.29", + "syn 2.0.36", "trybuild", ] @@ -5796,7 +5809,7 @@ dependencies = [ "arbitrary", "assert_matches", "bytes", - "c-kzg", + "c-kzg 0.1.0 (git+https://github.com/ethereum/c-kzg-4844?rev=f5f6f863d475847876a2bd5ee252058d37c3a15d)", "crc", "criterion", "crunchy", @@ -5834,7 +5847,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", - "toml 0.7.6", + "toml 0.7.8", "tracing", "triehash", "url", @@ -5927,7 +5940,7 @@ dependencies = [ "arrayvec", "auto_impl", "bytes", - "c-kzg", + "c-kzg 0.1.0 (git+https://github.com/ethereum/c-kzg-4844?rev=f5f6f863d475847876a2bd5ee252058d37c3a15d)", "criterion", "ethereum-types", "ethnum", @@ -5945,7 +5958,7 @@ version = "0.1.0-alpha.8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -6216,7 +6229,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" +source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" dependencies = [ "auto_impl", "revm-interpreter", @@ -6226,7 +6239,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" +source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" dependencies = [ "derive_more", "enumn", @@ -6237,8 +6250,10 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" +source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" dependencies = [ + "c-kzg 0.1.0 (git+https://github.com/ethereum/c-kzg-4844)", + "hex", "k256", "num", "once_cell", @@ -6253,19 +6268,21 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm#9f00e3796e165efb2c76feb2c335d1d7468a9b3d" +source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" dependencies = [ "arbitrary", "auto_impl", "bitflags 2.4.0", "bitvec", "bytes", + "c-kzg 0.1.0 (git+https://github.com/ethereum/c-kzg-4844)", "derive_more", "enumn", "fixed-hash", "hashbrown 0.14.0", "hex", "hex-literal", + "once_cell", "primitive-types", "proptest", "proptest-derive", @@ -6448,14 +6465,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.11" +version = "0.38.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" +checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" dependencies = [ "bitflags 2.4.0", "errno 0.3.3", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.7", "windows-sys 0.48.0", ] @@ -6489,14 +6506,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", ] [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" dependencies = [ "ring", "untrusted", @@ -6746,14 +6763,14 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -6787,7 +6804,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "chrono", "hex", "indexmap 1.9.3", @@ -6807,7 +6824,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -6832,7 +6849,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -6911,9 +6928,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook" @@ -7050,9 +7067,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys 0.48.0", @@ -7173,7 +7190,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -7216,9 +7233,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.3.0" +version = "12.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167a4ffd7c35c143fd1030aa3c2caf76ba42220bd5a6b5f4781896434723b8c3" +checksum = "9e0e9bc48b3852f36a84f8d0da275d50cb3c2b88b59b9ec35fdd8b7fa239e37d" dependencies = [ "debugid", "memmap2", @@ -7228,9 +7245,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.3.0" +version = "12.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e378c50e80686c1c5c205674e1f86a2858bec3d2a7dfdd690331a8a19330f293" +checksum = "691e53bdc0702aba3a5abc2cffff89346fcbd4050748883c7e2f714b33a69045" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7250,9 +7267,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "91e02e55d62894af2a08aca894c6577281f76769ba47c94d5756bec8ac6e7373" dependencies = [ "proc-macro2", "quote", @@ -7279,7 +7296,7 @@ checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", "unicode-xid", ] @@ -7298,7 +7315,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.11", + "rustix 0.38.13", "windows-sys 0.48.0", ] @@ -7355,7 +7372,7 @@ dependencies = [ "proc-macro2", "quote", "subprocess", - "syn 2.0.29", + "syn 2.0.36", "test-fuzz-internal", "toolchain_find", ] @@ -7397,7 +7414,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -7507,7 +7524,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.4", "tokio-macros", "windows-sys 0.48.0", ] @@ -7520,7 +7537,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -7584,9 +7601,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", @@ -7605,9 +7622,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.0.0", "serde", @@ -7657,7 +7674,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "base64 0.21.3", + "base64 0.21.4", "bitflags 2.4.0", "bytes", "futures-core", @@ -7724,7 +7741,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] @@ -7898,9 +7915,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" +checksum = "196a58260a906cedb9bf6d8034b6379d0c11f552416960452f267402ceeddff1" dependencies = [ "basic-toml", "glob", @@ -7945,9 +7962,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" @@ -7990,9 +8007,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -8099,9 +8116,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vergen" -version = "8.2.4" +version = "8.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" +checksum = "85e7dc29b3c54a2ea67ef4f953d5ec0c4085035c0ae2d325be1c0d2144bd9f16" dependencies = [ "anyhow", "rustversion", @@ -8125,9 +8142,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -8175,7 +8192,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", "wasm-bindgen-shared", ] @@ -8209,7 +8226,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8238,13 +8255,14 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix 0.38.13", ] [[package]] @@ -8511,9 +8529,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.16" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" +checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" [[package]] name = "xmltree" @@ -8592,7 +8610,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.36", ] [[package]] diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 9170bc5343a6..8ae7dec636a2 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -175,6 +175,8 @@ impl PayloadBuilderAttributes { basefee: U256::from( parent.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(), ), + // calculate excess gas based on parent block's blob gas usage + excess_blob_gas: parent.next_block_blob_fee().map(|fee| fee.saturating_to()), }; (cfg, block_env) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 29b5d3aac35f..374b25d3ff7c 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -43,6 +43,9 @@ pub enum EthApiError { /// An internal error where prevrandao is not set in the evm's environment #[error("Prevrandao not in th EVM's environment after merge")] PrevrandaoNotSet, + /// Excess_blob_gas is not set for Cancun and above. + #[error("Excess blob gas missing th EVM's environment after Cancun")] + ExcessBlobGasNotSet, /// Thrown when a call or transaction request (`eth_call`, `eth_estimateGas`, /// `eth_sendTransaction`) contains conflicting fields (legacy, EIP-1559) #[error("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")] @@ -110,6 +113,7 @@ impl From for ErrorObject<'static> { EthApiError::InvalidTransaction(err) => err.into(), EthApiError::PoolError(err) => err.into(), EthApiError::PrevrandaoNotSet | + EthApiError::ExcessBlobGasNotSet | EthApiError::InvalidBlockData(_) | EthApiError::Internal(_) | EthApiError::TransactionNotFound => internal_rpc_err(error.to_string()), @@ -184,7 +188,11 @@ where match err { EVMError::Transaction(err) => RpcInvalidTransactionError::from(err).into(), EVMError::PrevrandaoNotSet => EthApiError::PrevrandaoNotSet, + EVMError::ExcessBlobGasNotSet => EthApiError::ExcessBlobGasNotSet, EVMError::Database(err) => err.into(), + _ => { + unreachable!() + } } } } @@ -281,6 +289,16 @@ pub enum RpcInvalidTransactionError { /// The transitions is before Berlin and has access list #[error("Transactions before Berlin should not have access list")] AccessListNotSupported, + /// `max_fee_per_blob_gas` is not supported for blocks before the Cancun hardfork. + #[error("max_fee_per_blob_gas is not supported for blocks before the Cancun hardfork.")] + MaxFeePerBlobGasNotSupported, + /// `blob_hashes`/`blob_versioned_hashes` is not supported for blocks before the Cancun + /// hardfork. + #[error("blob_versioned_hashes is not supported for blocks before the Cancun hardfork.")] + BlobVersionedHashesNotSupported, + /// Block `blob_gas_price` is greater than tx-specified `max_fee_per_blob_gas` after Cancun. + #[error("max fee per blob gas less than block blob gas fee")] + BlobFeeCapTooLow, } impl RpcInvalidTransactionError { @@ -370,6 +388,15 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::AccessListNotSupported => { RpcInvalidTransactionError::AccessListNotSupported } + InvalidTransaction::MaxFeePerBlobGasNotSupported => { + RpcInvalidTransactionError::MaxFeePerBlobGasNotSupported + } + InvalidTransaction::BlobVersionedHashesNotSupported => { + RpcInvalidTransactionError::BlobVersionedHashesNotSupported + } + InvalidTransaction::BlobGasPriceGreaterThanMax => { + RpcInvalidTransactionError::BlobFeeCapTooLow + } } } } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 1dc65db8e2ba..5107d3a515dc 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -311,6 +311,10 @@ pub(crate) fn create_txn_env(block_env: &BlockEnv, request: CallRequest) -> EthR data: input.try_into_unique_input()?.map(|data| data.0).unwrap_or_default(), chain_id: chain_id.map(|c| c.as_u64()), access_list: access_list.map(AccessList::flattened).unwrap_or_default(), + + // EIP-4844 fields + blob_hashes: Default::default(), + max_fee_per_blob_gas: None, }; Ok(env) From 16072b9a94f34d800d6648d1ec6205e03dc961f7 Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:05:03 +0530 Subject: [PATCH 690/722] 4844 blob txs (#4631) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-types/src/eth/call.rs | 9 +++++++-- crates/rpc/rpc/src/eth/api/transactions.rs | 2 ++ crates/rpc/rpc/src/eth/revm_utils.rs | 7 ++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/call.rs b/crates/rpc/rpc-types/src/eth/call.rs index 19db85b77e75..e065cdb39665 100644 --- a/crates/rpc/rpc-types/src/eth/call.rs +++ b/crates/rpc/rpc-types/src/eth/call.rs @@ -1,4 +1,4 @@ -use reth_primitives::{AccessList, Address, BlockId, Bytes, U256, U64, U8}; +use reth_primitives::{AccessList, Address, BlockId, Bytes, H256, U256, U64, U8}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::BlockOverrides; @@ -88,7 +88,7 @@ impl<'de> Deserialize<'de> for TransactionIndex { } } -/// Call request +/// Call request for `eth_call` and adjacent methods. #[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct CallRequest { @@ -115,6 +115,11 @@ pub struct CallRequest { pub chain_id: Option, /// AccessList pub access_list: Option, + /// Max Fee per Blob gas for EIP-4844 transactions + pub max_fee_per_blob_gas: Option, + /// Blob Versioned Hashes for EIP-4844 transactions + #[serde(default)] + pub blob_versioned_hashes: Vec, /// EIP-2718 type #[serde(rename = "type")] pub transaction_type: Option, diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index d28a8a960a0d..308d5f392e8b 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -469,6 +469,8 @@ where access_list: request.access_list.clone(), max_priority_fee_per_gas: Some(U256::from(max_fee_per_gas)), transaction_type: None, + blob_versioned_hashes: Vec::new(), + max_fee_per_blob_gas: None, }, BlockId::Number(BlockNumberOrTag::Pending), ) diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 5107d3a515dc..751627d52be5 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -286,6 +286,8 @@ pub(crate) fn create_txn_env(block_env: &BlockEnv, request: CallRequest) -> EthR nonce, access_list, chain_id, + blob_versioned_hashes, + max_fee_per_blob_gas, .. } = request; @@ -311,10 +313,9 @@ pub(crate) fn create_txn_env(block_env: &BlockEnv, request: CallRequest) -> EthR data: input.try_into_unique_input()?.map(|data| data.0).unwrap_or_default(), chain_id: chain_id.map(|c| c.as_u64()), access_list: access_list.map(AccessList::flattened).unwrap_or_default(), - // EIP-4844 fields - blob_hashes: Default::default(), - max_fee_per_blob_gas: None, + blob_hashes: blob_versioned_hashes, + max_fee_per_blob_gas, }; Ok(env) From 8baf2344d41658bb3fb5fdaf0b93bb87f72bef1e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 18 Sep 2023 11:38:45 +0300 Subject: [PATCH 691/722] chore: demote session established log level (#4564) Co-authored-by: Matthias Seitz --- crates/net/network/src/manager.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index a6ff002e1e73..d094150a6a35 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -55,7 +55,7 @@ use std::{ }; use tokio::sync::mpsc::{self, error::TrySendError}; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, error, trace, warn}; /// Manages the _entire_ state of the network. /// @@ -676,15 +676,16 @@ where let total_active = this.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1; this.metrics.connected_peers.set(total_active as f64); - info!( - target : "net", + debug!( + target: "net", ?remote_addr, %client_version, ?peer_id, ?total_active, + kind=%direction, + peer_enode=%NodeRecord::new(remote_addr, peer_id), "Session established" ); - debug!(target: "net", kind=%direction, peer_enode=%NodeRecord::new(remote_addr, peer_id), "Established peer enode"); if direction.is_incoming() { this.swarm From cbb74346bd77fbc7f5d2c1f122dc5c5aed721ea6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 15:02:20 +0200 Subject: [PATCH 692/722] test: add assert_invariants (#4623) --- crates/transaction-pool/src/pool/blob.rs | 6 ++++ crates/transaction-pool/src/pool/parked.rs | 6 ++++ crates/transaction-pool/src/pool/pending.rs | 10 +++++++ crates/transaction-pool/src/pool/txpool.rs | 32 +++++++++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index 4bda855e5d5e..f0c14e95d261 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -98,6 +98,12 @@ impl BlobTransactions { pub(crate) fn contains(&self, id: &TransactionId) -> bool { self.by_id.contains_key(id) } + + /// Asserts that the bijection between `by_id` and `all` is valid. + #[cfg(any(test, feature = "test-utils"))] + pub(crate) fn assert_invariants(&self) { + assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); + } } impl Default for BlobTransactions { diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 075f244f0edb..633e526f7d24 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -113,6 +113,12 @@ impl ParkedPool { pub(crate) fn contains(&self, id: &TransactionId) -> bool { self.by_id.contains_key(id) } + + /// Asserts that the bijection between `by_id` and `best` is valid. + #[cfg(any(test, feature = "test-utils"))] + pub(crate) fn assert_invariants(&self) { + assert_eq!(self.by_id.len(), self.best.len(), "by_id.len() != best.len()"); + } } impl ParkedPool> { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 8ccd96efa807..b4e71484f5c2 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -307,6 +307,16 @@ impl PendingPool { pub(crate) fn contains(&self, id: &TransactionId) -> bool { self.by_id.contains_key(id) } + + /// Asserts that the bijection between `by_id` and `all` is valid. + #[cfg(any(test, feature = "test-utils"))] + pub(crate) fn assert_invariants(&self) { + assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); + assert!( + self.independent_transactions.len() <= self.all.len(), + "independent.len() > all.len()" + ); + } } /// A transaction that is ready to be included in a block. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 51e6d7e54564..0ba78359772b 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -693,6 +693,25 @@ impl TxPool { pub(crate) fn is_empty(&self) -> bool { self.all_transactions.is_empty() } + + /// Asserts all invariants of the pool's: + /// + /// - All maps are bijections (`by_id`, `by_hash`)` + /// - Total size is equal to the sum of all sub-pools + /// + /// # Panics + /// if any invariant is violated + #[cfg(any(test, feature = "test-utils"))] + pub fn assert_invariants(&self) { + let size = self.size(); + let actual = size.basefee + size.pending + size.queued; + assert_eq!(size.total, actual, "total size must be equal to the sum of all sub-pools, basefee:{}, pending:{}, queued:{}", size.basefee, size.pending, size.queued); + self.all_transactions.assert_invariants(); + self.pending_pool.assert_invariants(); + self.basefee_pool.assert_invariants(); + self.queued_pool.assert_invariants(); + self.blob_transactions.assert_invariants(); + } } #[cfg(any(test, feature = "test-utils"))] @@ -703,6 +722,13 @@ impl TxPool { } } +#[cfg(test)] +impl Drop for TxPool { + fn drop(&mut self) { + self.assert_invariants(); + } +} + // Additional test impls #[cfg(any(test, feature = "test-utils"))] #[allow(missing_docs)] @@ -1462,6 +1488,12 @@ impl AllTransactions { pub(crate) fn is_empty(&self) -> bool { self.txs.is_empty() } + + /// Asserts that the bijection between `by_hash` and `txs` is valid. + #[cfg(any(test, feature = "test-utils"))] + pub(crate) fn assert_invariants(&self) { + assert_eq!(self.by_hash.len(), self.txs.len(), "by_hash.len() != txs.len()"); + } } #[cfg(test)] From aeb37aab5332118fbd362dcaac7ae762668f557e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 15:52:58 +0200 Subject: [PATCH 693/722] fix: fill missing eip4844 settings (#4633) --- crates/revm/revm-primitives/src/env.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/revm/revm-primitives/src/env.rs b/crates/revm/revm-primitives/src/env.rs index 2ddea714dd2a..3efa2b19c6c6 100644 --- a/crates/revm/revm-primitives/src/env.rs +++ b/crates/revm/revm-primitives/src/env.rs @@ -220,8 +220,8 @@ where to, value, access_list, - blob_versioned_hashes: _, - max_fee_per_blob_gas: _, + blob_versioned_hashes, + max_fee_per_blob_gas, input, }) => { tx_env.gas_limit = *gas_limit; @@ -248,6 +248,8 @@ where ) }) .collect(); + tx_env.blob_hashes = blob_versioned_hashes.clone(); + tx_env.max_fee_per_blob_gas = Some(U256::from(*max_fee_per_blob_gas)); } } } From 66589209a066b0d9bef6f3f07bfa7dc8ef7acacb Mon Sep 17 00:00:00 2001 From: Bjerg Date: Mon, 18 Sep 2023 16:08:18 +0200 Subject: [PATCH 694/722] feat: enable size-limited file logs by default (#4192) Co-authored-by: Matthias Seitz --- Cargo.lock | 12 ++++++++++++ bin/reth/src/cli/mod.rs | 32 ++++++++++++++++++++++---------- crates/tracing/Cargo.toml | 1 + crates/tracing/src/lib.rs | 20 ++++++++++++++++++-- 4 files changed, 53 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0845433a00f9..37fad638345e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1061,8 +1061,10 @@ checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-targets 0.48.5", ] @@ -6169,6 +6171,7 @@ dependencies = [ name = "reth-tracing" version = "0.1.0-alpha.8" dependencies = [ + "rolling-file", "tracing", "tracing-appender", "tracing-journald", @@ -6366,6 +6369,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rolling-file" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8395b4f860856b740f20a296ea2cd4d823e81a2658cf05ef61be22916026a906" +dependencies = [ + "chrono", +] + [[package]] name = "route-recognizer" version = "0.3.1" diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 79b0ab00a6a3..440a45d10062 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -158,10 +158,6 @@ pub enum Commands { #[derive(Debug, Args)] #[command(next_help_heading = "Logging")] pub struct Logs { - /// The flag to enable persistent logs. - #[arg(long = "log.persistent", global = true, conflicts_with = "journald")] - persistent: bool, - /// The path to put log files in. #[arg( long = "log.directory", @@ -172,6 +168,15 @@ pub struct Logs { )] log_directory: PlatformPath, + /// The maximum size (in MB) of log files. + #[arg(long = "log.max-size", value_name = "SIZE", global = true, default_value_t = 200)] + log_max_size: u64, + + /// The maximum amount of log files that will be stored. If set to 0, background file logging + /// is disabled. + #[arg(long = "log.max-files", value_name = "COUNT", global = true, default_value_t = 5)] + log_max_files: usize, + /// Log events to journald. #[arg(long = "log.journald", global = true, conflicts_with = "log_directory")] journald: bool, @@ -191,6 +196,9 @@ pub struct Logs { color: ColorMode, } +/// Constant to convert megabytes to bytes +const MB_TO_BYTES: u64 = 1024 * 1024; + impl Logs { /// Builds a tracing layer from the current log options. pub fn layer(&self) -> eyre::Result, Option)>> @@ -202,8 +210,14 @@ impl Logs { if self.journald { Ok(Some((reth_tracing::journald(filter).expect("Could not connect to journald"), None))) - } else if self.persistent { - let (layer, guard) = reth_tracing::file(filter, &self.log_directory, "reth.log"); + } else if self.log_max_files > 0 { + let (layer, guard) = reth_tracing::file( + filter, + &self.log_directory, + "reth.log", + self.log_max_size * MB_TO_BYTES, + self.log_max_files, + ); Ok(Some((layer, Some(guard)))) } else { Ok(None) @@ -305,14 +319,12 @@ mod tests { /// name #[test] fn parse_logs_path() { - let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--log.persistent"]).unwrap(); + let mut reth = Cli::<()>::try_parse_from(["reth", "node"]).unwrap(); reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); let log_dir = reth.logs.log_directory; assert!(log_dir.as_ref().ends_with("reth/logs/mainnet"), "{:?}", log_dir); - let mut reth = - Cli::<()>::try_parse_from(["reth", "node", "--chain", "sepolia", "--log.persistent"]) - .unwrap(); + let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--chain", "sepolia"]).unwrap(); reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string()); let log_dir = reth.logs.log_directory; assert!(log_dir.as_ref().ends_with("reth/logs/sepolia"), "{:?}", log_dir); diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 8f652da2592a..424545b98a55 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -13,3 +13,4 @@ tracing.workspace = true tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt"] } tracing-appender.workspace = true tracing-journald = "0.3" +rolling-file = "0.2.0" diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index e6c3caacc468..8ad0d70e1c7c 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -19,6 +19,7 @@ //! - [`journald()`] //! //! As well as a simple way to initialize a subscriber: [`init`]. +use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::path::Path; use tracing::Subscriber; use tracing_subscriber::{ @@ -74,13 +75,28 @@ pub fn file( filter: EnvFilter, dir: impl AsRef, file_name: impl AsRef, + max_size_bytes: u64, + max_files: usize, ) -> (BoxedLayer, tracing_appender::non_blocking::WorkerGuard) where S: Subscriber, for<'a> S: LookupSpan<'a>, { - let (writer, guard) = - tracing_appender::non_blocking(tracing_appender::rolling::never(dir, file_name)); + // Create log dir if it doesn't exist (RFA doesn't do this for us) + let log_dir = dir.as_ref(); + if !log_dir.exists() { + std::fs::create_dir_all(log_dir).expect("Could not create log directory"); + } + + // Create layer + let (writer, guard) = tracing_appender::non_blocking( + RollingFileAppender::new( + log_dir.join(file_name.as_ref()), + RollingConditionBasic::new().max_size(max_size_bytes), + max_files, + ) + .expect("Could not initialize file logging"), + ); let layer = tracing_subscriber::fmt::layer() .with_ansi(false) .with_writer(writer) From 93ccf4155068383b99b09874754db8971d91ff80 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 16:10:32 +0200 Subject: [PATCH 695/722] perf(rpc): fetch range of blocks and return empty if unchanged (#4592) --- crates/rpc/rpc/src/eth/filter.rs | 27 ++++++++++++------- .../storage/provider/src/traits/block_hash.rs | 4 +++ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index d7ab17c3adc4..56d6c1b58457 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -71,13 +71,20 @@ where let info = self.inner.provider.chain_info()?; let best_number = info.best_number; + // start_block is the block from which we should start fetching changes, the next block from + // the last time changes were polled, in other words the best block at last poll + 1 let (start_block, kind) = { let mut filters = self.inner.active_filters.inner.lock().await; let filter = filters.get_mut(&id).ok_or(FilterError::FilterNotFound(id))?; + if filter.block > best_number { + // no new blocks since the last poll + return Ok(FilterChanges::Empty) + } + // update filter // we fetch all changes from [filter.block..best_block], so we advance the filter's - // block to `best_block +1` + // block to `best_block +1`, the next from which we should start fetching changes again let mut block = best_number + 1; std::mem::swap(&mut filter.block, &mut block); filter.last_poll_timestamp = Instant::now(); @@ -90,15 +97,14 @@ where Err(EthApiError::Unsupported("pending transaction filter not supported").into()) } FilterKind::Block => { - let mut block_hashes = Vec::new(); - for block_num in start_block..best_number { - let block_hash = self - .inner - .provider - .block_hash(block_num)? - .ok_or(EthApiError::UnknownBlockNumber)?; - block_hashes.push(block_hash); - } + // Note: we need to fetch the block hashes from inclusive range + // [start_block..best_block] + let end_block = best_number + 1; + let block_hashes = self + .inner + .provider + .canonical_hashes_range(start_block, end_block) + .map_err(|_| EthApiError::UnknownBlockNumber)?; Ok(FilterChanges::Hashes(block_hashes)) } FilterKind::Log(filter) => { @@ -117,6 +123,7 @@ where FilterBlockOption::AtBlockHash(_) => { // blockHash is equivalent to fromBlock = toBlock = the block number with // hash blockHash + // get_logs_in_block_range is inclusive (start_block, best_number) } }; diff --git a/crates/storage/provider/src/traits/block_hash.rs b/crates/storage/provider/src/traits/block_hash.rs index 24cf932841af..cda554f7d7a3 100644 --- a/crates/storage/provider/src/traits/block_hash.rs +++ b/crates/storage/provider/src/traits/block_hash.rs @@ -19,5 +19,9 @@ pub trait BlockHashReader: Send + Sync { } /// Get headers in range of block hashes or numbers + /// + /// Returns the available hashes of that range. + /// + /// Note: The range is `start..end`, so the expected result is `[start..end)` fn canonical_hashes_range(&self, start: BlockNumber, end: BlockNumber) -> Result>; } From 69045fd665fa729f67d477d04c06d6622093e990 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 18:49:59 +0200 Subject: [PATCH 696/722] chore: get rid of unreachable (#4638) --- Cargo.lock | 8 ++++---- crates/rpc/rpc/src/eth/error.rs | 3 --- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37fad638345e..18ccf93cfcc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "revm" version = "3.3.0" -source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" +source = "git+https://github.com/bluealloy/revm#70cf969a25a45e3bb4e503926297d61a90c7eec5" dependencies = [ "auto_impl", "revm-interpreter", @@ -6242,7 +6242,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" +source = "git+https://github.com/bluealloy/revm#70cf969a25a45e3bb4e503926297d61a90c7eec5" dependencies = [ "derive_more", "enumn", @@ -6253,7 +6253,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.0.3" -source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" +source = "git+https://github.com/bluealloy/revm#70cf969a25a45e3bb4e503926297d61a90c7eec5" dependencies = [ "c-kzg 0.1.0 (git+https://github.com/ethereum/c-kzg-4844)", "hex", @@ -6271,7 +6271,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.1.2" -source = "git+https://github.com/bluealloy/revm#fa13feac3b6623a81bb06b325d869050f381d464" +source = "git+https://github.com/bluealloy/revm#70cf969a25a45e3bb4e503926297d61a90c7eec5" dependencies = [ "arbitrary", "auto_impl", diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 374b25d3ff7c..2aff42e161e5 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -190,9 +190,6 @@ where EVMError::PrevrandaoNotSet => EthApiError::PrevrandaoNotSet, EVMError::ExcessBlobGasNotSet => EthApiError::ExcessBlobGasNotSet, EVMError::Database(err) => err.into(), - _ => { - unreachable!() - } } } } From 20455d055025608819336960b39ea4e5b57c7859 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 18:50:11 +0200 Subject: [PATCH 697/722] refactor: use revm blob fee calc (#4637) --- crates/payload/builder/src/payload.rs | 2 +- crates/primitives/src/constants/eip4844.rs | 53 +--------------------- crates/primitives/src/eip4844.rs | 3 ++ crates/primitives/src/header.rs | 14 +++--- crates/rpc/rpc/src/eth/api/transactions.rs | 4 +- crates/transaction-pool/src/maintain.rs | 8 ++-- 6 files changed, 16 insertions(+), 68 deletions(-) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 8ae7dec636a2..cbc30a0d20fd 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -176,7 +176,7 @@ impl PayloadBuilderAttributes { parent.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(), ), // calculate excess gas based on parent block's blob gas usage - excess_blob_gas: parent.next_block_blob_fee().map(|fee| fee.saturating_to()), + excess_blob_gas: parent.next_block_blob_fee(), }; (cfg, block_env) diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 94d56a1f629c..ae0cdc34a033 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -1,6 +1,6 @@ //! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. -use crate::{kzg::KzgSettings, U128}; +use crate::kzg::KzgSettings; use once_cell::sync::Lazy; use std::{io::Write, sync::Arc}; @@ -66,32 +66,6 @@ pub enum LoadKzgSettingsError { KzgError(c_kzg::Error), } -/// Calculates the blob fee for the given excess blob gas. -pub fn blob_fee(excess_blob_gas: u64) -> U128 { - fake_exponential( - U128::from(BLOB_TX_MIN_BLOB_GASPRICE), - U128::from(excess_blob_gas), - U128::from(BLOB_GASPRICE_UPDATE_FRACTION), - ) -} - -/// Approximates factor * e ** (numerator / denominator) using Taylor expansion. -/// -/// This is used to calculate the blob price. -/// -/// See also -pub fn fake_exponential(factor: U128, numerator: U128, denominator: U128) -> U128 { - let mut output = U128::ZERO; - let mut numerator_accum = factor.saturating_mul(denominator); - let mut i = U128::from(1u64); - while numerator_accum > U128::ZERO { - output += numerator_accum; - numerator_accum = numerator_accum * numerator / (denominator * i); - i += U128::from(1u64); - } - output / denominator -} - #[cfg(test)] mod tests { use super::*; @@ -100,29 +74,4 @@ mod tests { fn ensure_load_kzg_settings() { let _settings = Arc::clone(&MAINNET_KZG_TRUSTED_SETUP); } - - #[test] - fn test_fake_exp() { - // - for (factor, num, denom, expected) in &[ - (1u64, 0u64, 1u64, 1u64), - (38493, 0, 1000, 38493), - (0, 1234, 2345, 0), - (1, 2, 1, 6), // approximate 7.389 - (1, 4, 2, 6), - (1, 3, 1, 16), // approximate 20.09 - (1, 6, 2, 18), - (1, 4, 1, 49), // approximate 54.60 - (1, 8, 2, 50), - (10, 8, 2, 542), // approximate 540.598 - (11, 8, 2, 596), // approximate 600.58 - (1, 5, 1, 136), // approximate 148.4 - (1, 5, 2, 11), // approximate 12.18 - (2, 5, 2, 23), // approximate 24.36 - (1, 50000000, 2225652, 5709098764), - ] { - let res = fake_exponential(U128::from(*factor), U128::from(*num), U128::from(*denom)); - assert_eq!(res, U128::from(*expected)); - } - } } diff --git a/crates/primitives/src/eip4844.rs b/crates/primitives/src/eip4844.rs index 01d954373066..f7d136d6ac66 100644 --- a/crates/primitives/src/eip4844.rs +++ b/crates/primitives/src/eip4844.rs @@ -6,6 +6,9 @@ use crate::{ }; use sha2::{Digest, Sha256}; +// re-exports from revm for calculating blob fee +pub use revm_primitives::calc_blob_fee; + /// Calculates the versioned hash for a KzgCommitment /// /// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 3aa72842402e..0a409787bb38 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -1,14 +1,12 @@ use crate::{ basefee::calculate_next_block_base_fee, - eip4844::calculate_excess_blob_gas, + eip4844::{calc_blob_fee, calculate_excess_blob_gas}, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, BaseFeeParams, BlockBodyRoots, BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, - H64, U128, U256, + H64, U256, }; use bytes::{Buf, BufMut, BytesMut}; - -use crate::constants::eip4844::blob_fee; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_LIST_CODE, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; @@ -187,8 +185,8 @@ impl Header { /// Returns the blob fee for _this_ block according to the EIP-4844 spec. /// /// Returns `None` if `excess_blob_gas` is None - pub fn blob_fee(&self) -> Option { - self.excess_blob_gas.map(blob_fee) + pub fn blob_fee(&self) -> Option { + self.excess_blob_gas.map(calc_blob_fee) } /// Returns the blob fee for the next block according to the EIP-4844 spec. @@ -196,8 +194,8 @@ impl Header { /// Returns `None` if `excess_blob_gas` is None. /// /// See also [Self::next_block_excess_blob_gas] - pub fn next_block_blob_fee(&self) -> Option { - self.next_block_excess_blob_gas().map(blob_fee) + pub fn next_block_blob_fee(&self) -> Option { + self.next_block_excess_blob_gas().map(calc_blob_fee) } /// Calculate base fee for next block according to the EIP-1559 spec. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 308d5f392e8b..d3b4e16770c7 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -14,7 +14,7 @@ use crate::{ use async_trait::async_trait; use reth_network_api::NetworkInfo; use reth_primitives::{ - constants::eip4844::blob_fee, + eip4844::calc_blob_fee, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, IntoRecoveredTransaction, Receipt, SealedBlock, TransactionKind::{Call, Create}, @@ -887,7 +887,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( status_code: if receipt.success { Some(U64::from(1)) } else { Some(U64::from(0)) }, // EIP-4844 fields - blob_gas_price: meta.excess_blob_gas.map(blob_fee), + blob_gas_price: meta.excess_blob_gas.map(calc_blob_fee).map(U128::from), blob_gas_used: transaction.transaction.blob_gas_used().map(U128::from), }; diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 16f3a9ebc7ff..7cd290183e8c 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -93,7 +93,7 @@ pub async fn maintain_transaction_pool( pending_basefee: latest .next_block_base_fee(chain_spec.base_fee_params) .unwrap_or_default(), - pending_blob_fee: latest.next_block_blob_fee().map(|fee| fee.saturating_to()), + pending_blob_fee: latest.next_block_blob_fee(), }; pool.set_block_info(info); } @@ -239,8 +239,7 @@ pub async fn maintain_transaction_pool( // fees for the next block: `new_tip+1` let pending_block_base_fee = new_tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); - let pending_block_blob_fee = - new_tip.next_block_blob_fee().map(|fee| fee.saturating_to()); + let pending_block_blob_fee = new_tip.next_block_blob_fee(); // we know all changed account in the new chain let new_changed_accounts: HashSet<_> = @@ -321,8 +320,7 @@ pub async fn maintain_transaction_pool( // fees for the next block: `tip+1` let pending_block_base_fee = tip.next_block_base_fee(chain_spec.base_fee_params).unwrap_or_default(); - let pending_block_blob_fee = - tip.next_block_blob_fee().map(|fee| fee.saturating_to()); + let pending_block_blob_fee = tip.next_block_blob_fee(); let first_block = blocks.first(); trace!( From cabb5bee24e746e0d06c7c4a722611ee77b1cafc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:02:28 -0400 Subject: [PATCH 698/722] fix: use proper type for engine_newPayloadV2 (#4630) --- crates/rpc/rpc-api/src/engine.rs | 6 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 10 +- .../rpc/rpc-types/src/eth/engine/payload.rs | 102 ++++++++++++++++++ 3 files changed, 110 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 6d22fc89f9ef..2c8075ebab63 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,7 +3,7 @@ use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, H256 use reth_rpc_types::{ engine::{ ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, - ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, ForkchoiceState, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, @@ -18,9 +18,9 @@ pub trait EngineApi { #[method(name = "newPayloadV1")] async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult; - /// See also + /// See also #[method(name = "newPayloadV2")] - async fn new_payload_v2(&self, payload: ExecutionPayloadV2) -> RpcResult; + async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult; /// Post Cancun payload handler /// diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index b7aa7c6a6889..8f05f5d4aab4 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,7 +11,7 @@ use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFa use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadEnvelopeV2, - ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ExecutionPayloadEnvelopeV3, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; @@ -79,10 +79,10 @@ where Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) } - /// See also + /// See also pub async fn new_payload_v2( &self, - payload: ExecutionPayloadV2, + payload: ExecutionPayloadInputV2, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = PayloadOrAttributes::from_execution_payload(&payload, None); @@ -592,8 +592,8 @@ where } /// Handler for `engine_newPayloadV2` - /// See also - async fn new_payload_v2(&self, payload: ExecutionPayloadV2) -> RpcResult { + /// See also + async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); Ok(EngineApi::new_payload_v2(self, payload).await?) } diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 6b53c4e84cd2..9b6dcea07209 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -71,6 +71,48 @@ impl From for ExecutionPayloadFieldV2 { } } +impl From for ExecutionPayload { + fn from(value: ExecutionPayloadFieldV2) -> Self { + match value { + ExecutionPayloadFieldV2::V1(payload) => ExecutionPayload::V1(payload), + ExecutionPayloadFieldV2::V2(payload) => ExecutionPayload::V2(payload), + } + } +} + +/// This is the input to `engine_newPayloadV2`, which may or may not have a withdrawals field. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionPayloadInputV2 { + /// The V1 execution payload + #[serde(flatten)] + pub execution_payload: ExecutionPayloadV1, + /// The payload withdrawals + #[serde(default, skip_serializing_if = "Option::is_none")] + pub withdrawals: Option>, +} + +impl From for ExecutionPayload { + fn from(value: ExecutionPayloadInputV2) -> Self { + match value.withdrawals { + Some(withdrawals) => ExecutionPayload::V2(ExecutionPayloadV2 { + payload_inner: value.execution_payload, + withdrawals, + }), + None => ExecutionPayload::V1(value.execution_payload), + } + } +} + +impl From for ExecutionPayloadInputV2 { + fn from(value: SealedBlock) -> Self { + ExecutionPayloadInputV2 { + withdrawals: value.withdrawals.clone(), + execution_payload: value.into(), + } + } +} + /// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for /// V2. /// @@ -898,4 +940,64 @@ mod tests { let envelope: ExecutionPayloadEnvelopeV3 = serde_json::from_str(response).unwrap(); assert_eq!(serde_json::to_string(&envelope).unwrap(), response); } + + #[test] + fn serde_deserialize_execution_payload_input_v2() { + let response = r#" +{ + "baseFeePerGas": "0x173b30b3", + "blockHash": "0x99d486755fd046ad0bbb60457bac93d4856aa42fa00629cc7e4a28b65b5f8164", + "blockNumber": "0xb", + "extraData": "0xd883010d01846765746888676f312e32302e33856c696e7578", + "feeRecipient": "0x0000000000000000000000000000000000000000", + "gasLimit": "0x405829", + "gasUsed": "0x3f0ca0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0xfe34aaa2b869c66a727783ee5ad3e3983b6ef22baf24a1e502add94e7bcac67a", + "prevRandao": "0x74132c32fe3ab9a470a8352544514d21b6969e7749f97742b53c18a1b22b396c", + "receiptsRoot": "0x6a5c41dc55a1bd3e74e7f6accc799efb08b00c36c15265058433fcea6323e95f", + "stateRoot": "0xde3b357f5f099e4c33d0343c9e9d204d663d7bd9c65020a38e5d0b2a9ace78a2", + "timestamp": "0x6507d6b4", + "transactions": [ + "0xf86d0a8458b20efd825208946177843db3138ae69679a54b95cf345ed759450d8806f3e8d87878800080820a95a0f8bddb1dcc4558b532ff747760a6f547dd275afdbe7bdecc90680e71de105757a014f34ba38c180913c0543b0ac2eccfb77cc3f801a535008dc50e533fbe435f53", + "0xf86d0b8458b20efd82520894687704db07e902e9a8b3754031d168d46e3d586e8806f3e8d87878800080820a95a0e3108f710902be662d5c978af16109961ffaf2ac4f88522407d40949a9574276a0205719ed21889b42ab5c1026d40b759a507c12d92db0d100fa69e1ac79137caa", + "0xf86d0c8458b20efd8252089415e6a5a2e131dd5467fa1ff3acd104f45ee5940b8806f3e8d87878800080820a96a0af556ba9cda1d686239e08c24e169dece7afa7b85e0948eaa8d457c0561277fca029da03d3af0978322e54ac7e8e654da23934e0dd839804cb0430f8aaafd732dc", + "0xf8521784565adcb7830186a0808080820a96a0ec782872a673a9fe4eff028a5bdb30d6b8b7711f58a187bf55d3aec9757cb18ea001796d373da76f2b0aeda72183cce0ad070a4f03aa3e6fee4c757a9444245206", + "0xf8521284565adcb7830186a0808080820a95a08a0ea89028eff02596b385a10e0bd6ae098f3b281be2c95a9feb1685065d7384a06239d48a72e4be767bd12f317dd54202f5623a33e71e25a87cb25dd781aa2fc8", + "0xf8521384565adcb7830186a0808080820a95a0784dbd311a82f822184a46f1677a428cbe3a2b88a798fb8ad1370cdbc06429e8a07a7f6a0efd428e3d822d1de9a050b8a883938b632185c254944dd3e40180eb79" + ], + "withdrawals": [] +} + "#; + let payload: ExecutionPayloadInputV2 = serde_json::from_str(response).unwrap(); + assert_eq!(payload.withdrawals, Some(vec![])); + + let response = r#" +{ + "baseFeePerGas": "0x173b30b3", + "blockHash": "0x99d486755fd046ad0bbb60457bac93d4856aa42fa00629cc7e4a28b65b5f8164", + "blockNumber": "0xb", + "extraData": "0xd883010d01846765746888676f312e32302e33856c696e7578", + "feeRecipient": "0x0000000000000000000000000000000000000000", + "gasLimit": "0x405829", + "gasUsed": "0x3f0ca0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0xfe34aaa2b869c66a727783ee5ad3e3983b6ef22baf24a1e502add94e7bcac67a", + "prevRandao": "0x74132c32fe3ab9a470a8352544514d21b6969e7749f97742b53c18a1b22b396c", + "receiptsRoot": "0x6a5c41dc55a1bd3e74e7f6accc799efb08b00c36c15265058433fcea6323e95f", + "stateRoot": "0xde3b357f5f099e4c33d0343c9e9d204d663d7bd9c65020a38e5d0b2a9ace78a2", + "timestamp": "0x6507d6b4", + "transactions": [ + "0xf86d0a8458b20efd825208946177843db3138ae69679a54b95cf345ed759450d8806f3e8d87878800080820a95a0f8bddb1dcc4558b532ff747760a6f547dd275afdbe7bdecc90680e71de105757a014f34ba38c180913c0543b0ac2eccfb77cc3f801a535008dc50e533fbe435f53", + "0xf86d0b8458b20efd82520894687704db07e902e9a8b3754031d168d46e3d586e8806f3e8d87878800080820a95a0e3108f710902be662d5c978af16109961ffaf2ac4f88522407d40949a9574276a0205719ed21889b42ab5c1026d40b759a507c12d92db0d100fa69e1ac79137caa", + "0xf86d0c8458b20efd8252089415e6a5a2e131dd5467fa1ff3acd104f45ee5940b8806f3e8d87878800080820a96a0af556ba9cda1d686239e08c24e169dece7afa7b85e0948eaa8d457c0561277fca029da03d3af0978322e54ac7e8e654da23934e0dd839804cb0430f8aaafd732dc", + "0xf8521784565adcb7830186a0808080820a96a0ec782872a673a9fe4eff028a5bdb30d6b8b7711f58a187bf55d3aec9757cb18ea001796d373da76f2b0aeda72183cce0ad070a4f03aa3e6fee4c757a9444245206", + "0xf8521284565adcb7830186a0808080820a95a08a0ea89028eff02596b385a10e0bd6ae098f3b281be2c95a9feb1685065d7384a06239d48a72e4be767bd12f317dd54202f5623a33e71e25a87cb25dd781aa2fc8", + "0xf8521384565adcb7830186a0808080820a95a0784dbd311a82f822184a46f1677a428cbe3a2b88a798fb8ad1370cdbc06429e8a07a7f6a0efd428e3d822d1de9a050b8a883938b632185c254944dd3e40180eb79" + ] +} + "#; + let payload: ExecutionPayloadInputV2 = serde_json::from_str(response).unwrap(); + assert_eq!(payload.withdrawals, None); + } } From 4aa3ebdbdddb31484547f8c08985b15e29124772 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 18 Sep 2023 18:08:35 +0100 Subject: [PATCH 699/722] chore: fix doc lints (#4639) --- bin/reth/src/args/secret_key.rs | 2 +- crates/net/dns/src/resolver.rs | 2 +- crates/net/downloaders/src/bodies/bodies.rs | 3 +-- .../src/headers/reverse_headers.rs | 3 +-- .../downloaders/src/test_utils/file_client.rs | 4 ++-- crates/net/eth-wire/src/builder.rs | 13 ++++++------- crates/net/eth-wire/src/disconnect.rs | 8 ++++---- crates/net/eth-wire/src/p2pstream.rs | 19 ++++++++++--------- crates/net/eth-wire/src/types/status.rs | 3 +-- crates/net/network/src/peers/manager.rs | 2 +- crates/net/network/src/transactions.rs | 4 ++-- crates/primitives/src/block.rs | 2 +- crates/primitives/src/chain/spec.rs | 2 +- crates/primitives/src/serde_helper/num.rs | 4 ++-- crates/revm/revm-inspectors/src/stack/mod.rs | 2 +- crates/revm/src/lib.rs | 2 +- crates/rpc/rpc/src/layers/auth_layer.rs | 8 +++----- crates/rpc/rpc/src/layers/jwt_secret.rs | 7 +++---- crates/rpc/rpc/src/layers/mod.rs | 8 +++----- crates/stages/src/pipeline/builder.rs | 2 +- crates/storage/libmdbx-rs/src/transaction.rs | 4 ++-- crates/transaction-pool/src/lib.rs | 7 +++---- crates/transaction-pool/src/pool/blob.rs | 2 +- 23 files changed, 52 insertions(+), 61 deletions(-) diff --git a/bin/reth/src/args/secret_key.rs b/bin/reth/src/args/secret_key.rs index 644513c3439f..9dbb83078f15 100644 --- a/bin/reth/src/args/secret_key.rs +++ b/bin/reth/src/args/secret_key.rs @@ -8,7 +8,7 @@ use std::{ }; use thiserror::Error; -/// Errors returned by loading a [`SecretKey`][secp256k1::SecretKey], including IO errors. +/// Errors returned by loading a [`SecretKey`], including IO errors. #[derive(Error, Debug)] #[allow(missing_docs)] pub enum SecretKeyError { diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 907a3b054a56..d8b22831b4ab 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -36,7 +36,7 @@ impl Resolver for AsyncResolver

{ /// An asynchronous DNS resolver /// -/// See also [TokioAsyncResolver](trust_dns_resolver::TokioAsyncResolver) +/// See also [TokioAsyncResolver] /// /// ``` /// # fn t() { diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index e615520e33eb..e2f51d0c2429 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -297,8 +297,7 @@ where self.into_task_with(&TokioTaskExecutor::default()) } - /// Convert the downloader into a [`TaskDownloader`](super::task::TaskDownloader) by spawning - /// it via the given spawner. + /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. pub fn into_task_with(self, spawner: &S) -> TaskDownloader where S: TaskSpawner, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 17a9232278c4..bcee0549cb47 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -643,8 +643,7 @@ where self.into_task_with(&TokioTaskExecutor::default()) } - /// Convert the downloader into a [`TaskDownloader`](super::task::TaskDownloader) by spawning - /// it via the given `spawner`. + /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`. pub fn into_task_with(self, spawner: &S) -> TaskDownloader where S: TaskSpawner, diff --git a/crates/net/downloaders/src/test_utils/file_client.rs b/crates/net/downloaders/src/test_utils/file_client.rs index 64aee95cd9bf..11135264cd6e 100644 --- a/crates/net/downloaders/src/test_utils/file_client.rs +++ b/crates/net/downloaders/src/test_utils/file_client.rs @@ -56,7 +56,7 @@ pub struct FileClient { bodies: HashMap, } -/// An error that can occur when constructing and using a [`FileClient`](FileClient). +/// An error that can occur when constructing and using a [`FileClient`]. #[derive(Debug, Error)] pub enum FileClientError { /// An error occurred when opening or reading the file. @@ -75,7 +75,7 @@ impl FileClient { FileClient::from_file(file).await } - /// Initialize the [`FileClient`](FileClient) with a file directly. + /// Initialize the [`FileClient`] with a file directly. pub(crate) async fn from_file(mut file: File) -> Result { // get file len from metadata before reading let metadata = file.metadata().await?; diff --git a/crates/net/eth-wire/src/builder.rs b/crates/net/eth-wire/src/builder.rs index 6b271f72df1d..b0ab46a49214 100644 --- a/crates/net/eth-wire/src/builder.rs +++ b/crates/net/eth-wire/src/builder.rs @@ -1,5 +1,4 @@ -//! Builder structs for [`Status`](crate::types::Status) and -//! [`HelloMessage`](crate::HelloMessage) messages. +//! Builder structs for [`Status`] and [`HelloMessage`] messages. use crate::{ capability::Capability, hello::HelloMessage, p2pstream::ProtocolVersion, EthVersion, Status, @@ -7,7 +6,7 @@ use crate::{ use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_primitives::{Chain, ForkId, PeerId, H256, U256}; -/// Builder for [`Status`](crate::types::Status) messages. +/// Builder for [`Status`] messages. /// /// # Example /// ``` @@ -43,7 +42,7 @@ pub struct StatusBuilder { } impl StatusBuilder { - /// Consumes the type and creates the actual [`Status`](crate::types::Status) message. + /// Consumes the type and creates the actual [`Status`] message. pub fn build(self) -> Status { self.status } @@ -85,14 +84,14 @@ impl StatusBuilder { } } -/// Builder for [`HelloMessage`](crate::HelloMessage) messages. +/// Builder for [`HelloMessage`] messages. pub struct HelloBuilder { hello: HelloMessage, } impl HelloBuilder { - /// Creates a new [`HelloBuilder`](crate::builder::HelloBuilder) with default [`HelloMessage`] - /// values, and a `PeerId` corresponding to the given pubkey. + /// Creates a new [`HelloBuilder`] with default [`HelloMessage`] values, and a `PeerId` + /// corresponding to the given pubkey. pub fn new(pubkey: PeerId) -> Self { Self { hello: HelloMessage { diff --git a/crates/net/eth-wire/src/disconnect.rs b/crates/net/eth-wire/src/disconnect.rs index b72d7bf9a2bf..a3d0b252a316 100644 --- a/crates/net/eth-wire/src/disconnect.rs +++ b/crates/net/eth-wire/src/disconnect.rs @@ -106,8 +106,8 @@ impl TryFrom for DisconnectReason { } } -/// The [`Encodable`](reth_rlp::Encodable) implementation for [`DisconnectReason`] encodes the -/// disconnect reason in a single-element RLP list. +/// The [`Encodable`] implementation for [`DisconnectReason`] encodes the disconnect reason in a +/// single-element RLP list. impl Encodable for DisconnectReason { fn encode(&self, out: &mut dyn BufMut) { vec![*self as u8].encode(out); @@ -117,8 +117,8 @@ impl Encodable for DisconnectReason { } } -/// The [`Decodable`](reth_rlp::Decodable) implementation for [`DisconnectReason`] supports either -/// a disconnect reason encoded a single byte or a RLP list containing the disconnect reason. +/// The [`Decodable`] implementation for [`DisconnectReason`] supports either a disconnect reason +/// encoded a single byte or a RLP list containing the disconnect reason. impl Decodable for DisconnectReason { fn decode(buf: &mut &[u8]) -> Result { if buf.is_empty() { diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 081662db09bc..41781f10207a 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -687,10 +687,10 @@ impl P2PMessage { } } -/// The [`Encodable`](reth_rlp::Encodable) implementation for [`P2PMessage::Ping`] and -/// [`P2PMessage::Pong`] encodes the message as RLP, and prepends a snappy header to the RLP bytes -/// for all variants except the [`P2PMessage::Hello`] variant, because the hello message is never -/// compressed in the `p2p` subprotocol. +/// The [`Encodable`] implementation for [`P2PMessage::Ping`] and [`P2PMessage::Pong`] encodes the +/// message as RLP, and prepends a snappy header to the RLP bytes for all variants except the +/// [`P2PMessage::Hello`] variant, because the hello message is never compressed in the `p2p` +/// subprotocol. impl Encodable for P2PMessage { fn encode(&self, out: &mut dyn BufMut) { (self.message_id() as u8).encode(out); @@ -724,11 +724,12 @@ impl Encodable for P2PMessage { } } -/// The [`Decodable`](reth_rlp::Decodable) implementation for [`P2PMessage`] assumes that each of -/// the message variants are snappy compressed, except for the [`P2PMessage::Hello`] variant since -/// the hello message is never compressed in the `p2p` subprotocol. -/// The [`Decodable`] implementation for [`P2PMessage::Ping`] and -/// [`P2PMessage::Pong`] expects a snappy encoded payload, see [`Encodable`] implementation. +/// The [`Decodable`] implementation for [`P2PMessage`] assumes that each of the message variants +/// are snappy compressed, except for the [`P2PMessage::Hello`] variant since the hello message is +/// never compressed in the `p2p` subprotocol. +/// +/// The [`Decodable`] implementation for [`P2PMessage::Ping`] and [`P2PMessage::Pong`] expects a +/// snappy encoded payload, see [`Encodable`] implementation. impl Decodable for P2PMessage { fn decode(buf: &mut &[u8]) -> Result { /// Removes the snappy prefix from the Ping/Pong buffer diff --git a/crates/net/eth-wire/src/types/status.rs b/crates/net/eth-wire/src/types/status.rs index a29af1894147..cbb8ca0c16a4 100644 --- a/crates/net/eth-wire/src/types/status.rs +++ b/crates/net/eth-wire/src/types/status.rs @@ -67,8 +67,7 @@ impl Status { Default::default() } - /// Create a [`StatusBuilder`] from the given [`ChainSpec`](reth_primitives::ChainSpec) and - /// head block. + /// Create a [`StatusBuilder`] from the given [`ChainSpec`] and head block. /// /// Sets the `chain` and `genesis`, `blockhash`, and `forkid` fields based on the [`ChainSpec`] /// and head. diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index a9a7e48bdb3d..2188647b3909 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -1217,7 +1217,7 @@ impl PeersConfig { /// The durations to use when a backoff should be applied to a peer. /// -/// See also [`BackoffKind`](BackoffKind). +/// See also [`BackoffKind`]. #[derive(Debug, Clone, Copy, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct PeerBackoffDurations { diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 80d9a89bde1a..d765436c5ce5 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -236,7 +236,7 @@ where /// Propagate the transactions to all connected peers either as full objects or hashes /// /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [NewPooledTransactionHashes](NewPooledTransactionHashes) + /// See [NewPooledTransactionHashes] /// /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . fn propagate_transactions( @@ -827,7 +827,7 @@ struct Peer { client_version: Arc, } -/// Commands to send to the [`TransactionsManager`](crate::transactions::TransactionsManager) +/// Commands to send to the [`TransactionsManager`] enum TransactionsCommand { PropagateHash(H256), } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 01a477482bc1..a6696ccefa77 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -810,7 +810,7 @@ pub struct BlockBody { } impl BlockBody { - /// Create a [`Block`](Block) from the body and its header. + /// Create a [`Block`] from the body and its header. pub fn create_block(&self, header: Header) -> Block { Block { header, diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 41a5410968bd..7cda28e689be 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -463,7 +463,7 @@ impl ChainSpec { .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) } - /// Creates a [`ForkFilter`](crate::ForkFilter) for the block described by [Head]. + /// Creates a [`ForkFilter`] for the block described by [Head]. pub fn fork_filter(&self, head: Head) -> ForkFilter { let forks = self.forks_iter().filter_map(|(_, condition)| { // We filter out TTD-based forks w/o a pre-known block since those do not show up in the diff --git a/crates/primitives/src/serde_helper/num.rs b/crates/primitives/src/serde_helper/num.rs index 820174985138..02544ee0f065 100644 --- a/crates/primitives/src/serde_helper/num.rs +++ b/crates/primitives/src/serde_helper/num.rs @@ -69,7 +69,7 @@ impl<'de> Deserialize<'de> for U64HexOrNumber { } } -/// serde functions for handling primitive `u64` as [U64](crate::U64) +/// serde functions for handling primitive `u64` as [U64] pub mod u64_hex_or_decimal { use crate::serde_helper::num::U64HexOrNumber; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -89,7 +89,7 @@ pub mod u64_hex_or_decimal { } } -/// serde functions for handling primitive optional `u64` as [U64](crate::U64) +/// serde functions for handling primitive optional `u64` as [U64] pub mod u64_hex_or_decimal_opt { use crate::serde_helper::num::U64HexOrNumber; use serde::{Deserialize, Deserializer, Serialize, Serializer}; diff --git a/crates/revm/revm-inspectors/src/stack/mod.rs b/crates/revm/revm-inspectors/src/stack/mod.rs index 482e09d4ce95..fac15905bbfe 100644 --- a/crates/revm/revm-inspectors/src/stack/mod.rs +++ b/crates/revm/revm-inspectors/src/stack/mod.rs @@ -8,7 +8,7 @@ use revm::{ Database, EVMData, Inspector, }; -/// A wrapped [Inspector](revm::Inspector) that can be reused in the stack +/// A wrapped [Inspector] that can be reused in the stack mod maybe_owned; pub use maybe_owned::MaybeOwnedInspector; diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index ac5da0b58bbf..2db43b57ad2b 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -13,7 +13,7 @@ //! revm utils and implementations specific to reth. -/// Contains glue code for integrating reth database into revm's [Database](revm::Database). +/// Contains glue code for integrating reth database into revm's [Database]. pub mod database; /// revm implementation of reth block and transaction executors. diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc/src/layers/auth_layer.rs index 0548320eb113..838fb678dd7b 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc/src/layers/auth_layer.rs @@ -48,7 +48,7 @@ where V: AuthValidator, V::ResponseBody: Body, { - /// Creates an instance of [`AuthLayer`][crate::layers::AuthLayer]. + /// Creates an instance of [`AuthLayer`]. /// `validator` is a generic trait able to validate requests (see [`AuthValidator`]). pub fn new(validator: V) -> Self { Self { validator } @@ -66,10 +66,8 @@ where } } -/// This type is the actual implementation of -/// the middleware. It follows the [`Service`](tower::Service) -/// specification to correctly proxy Http requests -/// to its inner service after headers validation. +/// This type is the actual implementation of the middleware. It follows the [`Service`] +/// specification to correctly proxy Http requests to its inner service after headers validation. #[allow(missing_debug_implementations)] pub struct AuthService { /// Performs auth validation logics diff --git a/crates/rpc/rpc/src/layers/jwt_secret.rs b/crates/rpc/rpc/src/layers/jwt_secret.rs index 0af6251438cb..9be36c34cb16 100644 --- a/crates/rpc/rpc/src/layers/jwt_secret.rs +++ b/crates/rpc/rpc/src/layers/jwt_secret.rs @@ -9,7 +9,7 @@ use std::{ }; use thiserror::Error; -/// Errors returned by the [`JwtSecret`][crate::layers::JwtSecret] +/// Errors returned by the [`JwtSecret`] #[derive(Error, Debug)] #[allow(missing_docs)] pub enum JwtError { @@ -56,7 +56,7 @@ const JWT_SIGNATURE_ALGO: Algorithm = Algorithm::HS256; pub struct JwtSecret([u8; 32]); impl JwtSecret { - /// Creates an instance of [`JwtSecret`][crate::layers::JwtSecret]. + /// Creates an instance of [`JwtSecret`]. /// /// Returns an error if one of the following applies: /// - `hex` is not a valid hexadecimal string @@ -138,8 +138,7 @@ impl JwtSecret { Ok(()) } - /// Generates a random [`JwtSecret`][crate::layers::JwtSecret] - /// containing a hex-encoded 256 bit secret key. + /// Generates a random [`JwtSecret`] containing a hex-encoded 256 bit secret key. pub fn random() -> Self { let random_bytes: [u8; 32] = rand::thread_rng().gen(); let secret = hex_encode(random_bytes); diff --git a/crates/rpc/rpc/src/layers/mod.rs b/crates/rpc/rpc/src/layers/mod.rs index b8a3cf2e47c6..ff021a37250b 100644 --- a/crates/rpc/rpc/src/layers/mod.rs +++ b/crates/rpc/rpc/src/layers/mod.rs @@ -7,15 +7,13 @@ pub use auth_layer::AuthLayer; pub use jwt_secret::{Claims, JwtError, JwtSecret}; pub use jwt_validator::JwtAuthValidator; -/// General purpose trait to validate Http Authorization -/// headers. It's supposed to be integrated as a validator -/// trait into an [`AuthLayer`][crate::layers::AuthLayer]. +/// General purpose trait to validate Http Authorization headers. It's supposed to be integrated as +/// a validator trait into an [`AuthLayer`]. pub trait AuthValidator { /// Body type of the error response type ResponseBody; - /// This function is invoked by the [`AuthLayer`][crate::layers::AuthLayer] - /// to perform validation on Http headers. + /// This function is invoked by the [`AuthLayer`] to perform validation on Http headers. /// The result conveys validation errors in the form of an Http response. fn validate(&self, headers: &HeaderMap) -> Result<(), Response>; } diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages/src/pipeline/builder.rs index 7679361c839f..eca3d9209a11 100644 --- a/crates/stages/src/pipeline/builder.rs +++ b/crates/stages/src/pipeline/builder.rs @@ -69,7 +69,7 @@ where /// Builds the final [`Pipeline`] using the given database. /// - /// Note: it's expected that this is either an [Arc](std::sync::Arc) or an Arc wrapper type. + /// Note: it's expected that this is either an [Arc] or an Arc wrapper type. pub fn build(self, db: DB, chain_spec: Arc) -> Pipeline { let Self { stages, max_block, tip_tx, metrics_tx } = self; Pipeline { diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index df391e94e0c3..5f9bf644064f 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -256,8 +256,8 @@ where /// case the environment must be configured to allow named databases through /// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs). /// - /// This function will fail with [Error::BadRslot](crate::error::Error::BadRslot) if called by a - /// thread with an open transaction. + /// This function will fail with [Error::BadRslot] if called by a thread with an open + /// transaction. pub fn create_db<'txn>( &'txn self, name: Option<&str>, diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 667e611aadde..4b154028635f 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -79,12 +79,11 @@ //! //! The lowest layer is the actual pool implementations that manages (validated) transactions: //! [`TxPool`](crate::pool::txpool::TxPool). This is contained in a higher level pool type that -//! guards the low level pool and handles additional listeners or metrics: -//! [`PoolInner`](crate::pool::PoolInner) +//! guards the low level pool and handles additional listeners or metrics: [`PoolInner`]. //! //! The transaction pool will be used by separate consumers (RPC, P2P), to make sharing easier, the -//! [`Pool`](crate::Pool) type is just an `Arc` wrapper around `PoolInner`. This is the usable type -//! that provides the `TransactionPool` interface. +//! [`Pool`] type is just an `Arc` wrapper around `PoolInner`. This is the usable type that provides +//! the `TransactionPool` interface. //! //! //! ## Blob Transactions diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index f0c14e95d261..8ee1fb5caceb 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -26,7 +26,7 @@ pub(crate) struct BlobTransactions { all: BTreeSet>, /// Keeps track of the size of this pool. /// - /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). + /// See also [`PoolTransaction::size`]. size_of: SizeTracker, } From b5905c482bfde8618c43552082d1d3dc0169e584 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 18 Sep 2023 18:19:22 +0100 Subject: [PATCH 700/722] feat(bin): log prune config on startup (#4641) --- bin/reth/src/node/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 8905dba4d0c5..70d61ca8b44f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -447,7 +447,7 @@ impl NodeCommand { }; let pruner = prune_config.map(|prune_config| { - info!(target: "reth::cli", "Pruner initialized"); + info!(target: "reth::cli", ?prune_config, "Pruner initialized"); reth_prune::Pruner::new( db.clone(), self.chain.clone(), From 78edae4d4f6f9b4779922d4a98f8c7d862dc988d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 18 Sep 2023 19:45:31 +0200 Subject: [PATCH 701/722] feat(prune): add pruner log with `INFO` level (#4573) Co-authored-by: Alexey Shekhirin --- crates/prune/src/pruner.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index a47a36a46da9..6cf6f5f728da 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -19,8 +19,8 @@ use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, TransactionsProvider, }; -use std::{ops::RangeInclusive, sync::Arc, time::Instant}; -use tracing::{debug, error, instrument, trace}; +use std::{collections::HashMap, ops::RangeInclusive, sync::Arc, time::Instant}; +use tracing::{debug, error, info, instrument, trace}; /// Result of [Pruner::run] execution. /// @@ -81,6 +81,8 @@ impl Pruner { let mut done = true; + let mut parts_done = HashMap::new(); + if let Some((to_block, prune_mode)) = self.modes.prune_target_block_receipts(tip_block_number)? { @@ -95,6 +97,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_receipts(&provider, to_block, prune_mode)?; done = done && part_done; + parts_done.insert(PrunePart::Receipts, part_done); self.metrics .get_prune_part_metrics(PrunePart::Receipts) .duration_seconds @@ -107,6 +110,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_receipts_by_logs(&provider, tip_block_number)?; done = done && part_done; + parts_done.insert(PrunePart::ContractLogs, part_done); self.metrics .get_prune_part_metrics(PrunePart::ContractLogs) .duration_seconds @@ -129,6 +133,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_transaction_lookup(&provider, to_block, prune_mode)?; done = done && part_done; + parts_done.insert(PrunePart::TransactionLookup, part_done); self.metrics .get_prune_part_metrics(PrunePart::TransactionLookup) .duration_seconds @@ -155,6 +160,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_transaction_senders(&provider, to_block, prune_mode)?; done = done && part_done; + parts_done.insert(PrunePart::SenderRecovery, part_done); self.metrics .get_prune_part_metrics(PrunePart::SenderRecovery) .duration_seconds @@ -181,6 +187,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_account_history(&provider, to_block, prune_mode)?; done = done && part_done; + parts_done.insert(PrunePart::AccountHistory, part_done); self.metrics .get_prune_part_metrics(PrunePart::AccountHistory) .duration_seconds @@ -207,6 +214,7 @@ impl Pruner { let part_start = Instant::now(); let part_done = self.prune_storage_history(&provider, to_block, prune_mode)?; done = done && part_done; + parts_done.insert(PrunePart::StorageHistory, part_done); self.metrics .get_prune_part_metrics(PrunePart::StorageHistory) .duration_seconds @@ -225,7 +233,14 @@ impl Pruner { let elapsed = start.elapsed(); self.metrics.duration_seconds.record(elapsed); - trace!(target: "pruner", %tip_block_number, ?elapsed, "Pruner finished"); + info!( + target: "pruner", + %tip_block_number, + ?elapsed, + %done, + ?parts_done, + "Pruner finished" + ); Ok(done) } From 11f5f3f8d79eca8f03a6c714be338fc0c68f3faa Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 18 Sep 2023 18:52:58 +0100 Subject: [PATCH 702/722] feat(engine): hooks (#4582) --- bin/reth/src/node/mod.rs | 18 +- crates/consensus/beacon/src/engine/error.rs | 6 +- .../beacon/src/engine/hooks/controller.rs | 132 ++++++++++++++ .../consensus/beacon/src/engine/hooks/mod.rs | 128 +++++++++++++ .../beacon/src/engine/{ => hooks}/prune.rs | 131 ++++++++------ crates/consensus/beacon/src/engine/metrics.rs | 2 - crates/consensus/beacon/src/engine/mod.rs | 168 ++++++------------ .../consensus/beacon/src/engine/test_utils.rs | 11 +- crates/interfaces/src/sync.rs | 4 +- 9 files changed, 423 insertions(+), 177 deletions(-) create mode 100644 crates/consensus/beacon/src/engine/hooks/controller.rs create mode 100644 crates/consensus/beacon/src/engine/hooks/mod.rs rename crates/consensus/beacon/src/engine/{ => hooks}/prune.rs (50%) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 70d61ca8b44f..2e626a1c63e3 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -25,7 +25,10 @@ use eyre::Context; use fdlimit::raise_fd_limit; use futures::{future::Either, pin_mut, stream, stream_select, StreamExt}; use reth_auto_seal_consensus::{AutoSealBuilder, AutoSealConsensus, MiningMode}; -use reth_beacon_consensus::{BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN}; +use reth_beacon_consensus::{ + hooks::{EngineHooks, PruneHook}, + BeaconConsensus, BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN, +}; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; @@ -446,16 +449,19 @@ impl NodeCommand { None }; - let pruner = prune_config.map(|prune_config| { + let mut hooks = EngineHooks::new(); + + if let Some(prune_config) = prune_config { info!(target: "reth::cli", ?prune_config, "Pruner initialized"); - reth_prune::Pruner::new( + let pruner = reth_prune::Pruner::new( db.clone(), self.chain.clone(), prune_config.block_interval, prune_config.parts, self.chain.prune_batch_sizes, - ) - }); + ); + hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor.clone()))); + } // Configure the consensus engine let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( @@ -471,7 +477,7 @@ impl NodeCommand { MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, consensus_engine_rx, - pruner, + hooks, )?; info!(target: "reth::cli", "Consensus engine initialized"); diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index b78b3828bdbd..eb4ffd62acac 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,4 +1,4 @@ -use reth_prune::PrunerError; +use crate::engine::hooks::EngineHookError; use reth_rpc_types::engine::ForkchoiceUpdateError; use reth_stages::PipelineError; @@ -20,9 +20,9 @@ pub enum BeaconConsensusEngineError { /// Pruner channel closed. #[error("Pruner channel closed")] PrunerChannelClosed, - /// Pruner error. + /// Hook error. #[error(transparent)] - Pruner(#[from] PrunerError), + Hook(#[from] EngineHookError), /// Common error. Wrapper around [reth_interfaces::Error]. #[error(transparent)] Common(#[from] reth_interfaces::Error), diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs new file mode 100644 index 000000000000..1449629c70f8 --- /dev/null +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -0,0 +1,132 @@ +use crate::hooks::{EngineContext, EngineHook, EngineHookAction, EngineHookError, EngineHooks}; +use std::{ + collections::VecDeque, + task::{Context, Poll}, +}; +use tracing::debug; + +/// Manages hooks under the control of the engine. +/// +/// This type polls the initialized hooks one by one, respecting the DB access level +/// (i.e. [crate::hooks::EngineHookDBAccessLevel::ReadWrite] that enforces running at most one such +/// hook). +pub(crate) struct EngineHooksController { + /// Collection of hooks. + /// + /// Hooks might be removed from the collection, and returned upon completion. + /// In the current implementation, it only happens when moved to `running_hook_with_db_write`. + hooks: VecDeque>, + /// Currently running hook with DB write access, if any. + running_hook_with_db_write: Option>, +} + +impl EngineHooksController { + /// Creates a new [`EngineHooksController`]. + pub(crate) fn new(hooks: EngineHooks) -> Self { + Self { hooks: hooks.inner.into(), running_hook_with_db_write: None } + } + + /// Polls currently running hook with DB write access, if any. + /// + /// Returns [`Poll::Ready`] if currently running hook with DB write access returned + /// an [event][`crate::hooks::EngineHookEvent`] that resulted in [action][`EngineHookAction`] or + /// error. + /// + /// Returns [`Poll::Pending`] in all other cases: + /// 1. No hook with DB write access is running. + /// 2. Currently running hook with DB write access returned [`Poll::Pending`] on polling. + /// 3. Currently running hook with DB write access returned [`Poll::Ready`] on polling, but no + /// action to act upon. + pub(crate) fn poll_running_hook_with_db_write( + &mut self, + cx: &mut Context<'_>, + args: EngineContext, + ) -> Poll> { + let Some(mut hook) = self.running_hook_with_db_write.take() else { return Poll::Pending }; + + match hook.poll(cx, args) { + Poll::Ready((event, action)) => { + debug!( + target: "consensus::engine::hooks", + hook = hook.name(), + ?action, + ?event, + "Polled running hook with db write access" + ); + + if !event.is_finished() { + self.running_hook_with_db_write = Some(hook); + } else { + self.hooks.push_back(hook); + } + + if let Some(action) = action { + return Poll::Ready(Ok(action)) + } + } + Poll::Pending => { + self.running_hook_with_db_write = Some(hook); + } + } + + Poll::Pending + } + + /// Polls next engine from the collection. + /// + /// Returns [`Poll::Ready`] if next hook returned an [event][`crate::hooks::EngineHookEvent`] + /// that resulted in [action][`EngineHookAction`]. + /// + /// Returns [`Poll::Pending`] in all other cases: + /// 1. Next hook is [`Option::None`], i.e. taken, meaning it's currently running and has a DB + /// write access. + /// 2. Next hook needs a DB write access, but either there's another hook with DB write access + /// running, or `db_write_active` passed into arguments is `true`. + /// 3. Next hook returned [`Poll::Pending`] on polling. + /// 4. Next hook returned [`Poll::Ready`] on polling, but no action to act upon. + pub(crate) fn poll_next_hook( + &mut self, + cx: &mut Context<'_>, + args: EngineContext, + db_write_active: bool, + ) -> Poll> { + let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; + + // Hook with DB write access level is not allowed to run due to already running hook with DB + // write access level or active DB write according to passed argument + if hook.db_access_level().is_read_write() && + (self.running_hook_with_db_write.is_some() || db_write_active) + { + return Poll::Pending + } + + if let Poll::Ready((event, action)) = hook.poll(cx, args) { + debug!( + target: "consensus::engine::hooks", + hook = hook.name(), + ?action, + ?event, + "Polled next hook" + ); + + if event.is_started() && hook.db_access_level().is_read_write() { + self.running_hook_with_db_write = Some(hook); + } else { + self.hooks.push_back(hook); + } + + if let Some(action) = action { + return Poll::Ready(Ok(action)) + } + } else { + self.hooks.push_back(hook); + } + + Poll::Pending + } + + /// Returns `true` if there's a hook with DB write access running. + pub(crate) fn is_hook_with_db_write_running(&self) -> bool { + self.running_hook_with_db_write.is_some() + } +} diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs new file mode 100644 index 000000000000..8f0877aa135f --- /dev/null +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -0,0 +1,128 @@ +use reth_interfaces::sync::SyncState; +use reth_primitives::BlockNumber; +use std::{ + fmt::Debug, + task::{Context, Poll}, +}; + +mod controller; +pub(crate) use controller::EngineHooksController; + +mod prune; +pub use prune::PruneHook; + +/// Collection of [engine hooks][`EngineHook`]. +#[derive(Default)] +pub struct EngineHooks { + inner: Vec>, +} + +impl EngineHooks { + /// Creates a new empty collection of [engine hooks][`EngineHook`]. + pub fn new() -> Self { + Self { inner: Vec::new() } + } + + /// Adds a new [engine hook][`EngineHook`] to the collection. + pub fn add(&mut self, hook: H) { + self.inner.push(Box::new(hook)) + } +} + +/// Hook that will be run during the main loop of +/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. +pub trait EngineHook: Send + Sync + 'static { + /// Returns a human-readable name for the hook. + fn name(&self) -> &'static str; + + /// Advances the hook execution, emitting an [event][`EngineHookEvent`] and an optional + /// [action][`EngineHookAction`]. + fn poll( + &mut self, + cx: &mut Context<'_>, + ctx: EngineContext, + ) -> Poll<(EngineHookEvent, Option)>; + + /// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs. + fn db_access_level(&self) -> EngineHookDBAccessLevel; +} + +/// Engine context passed to the [hook polling function][`EngineHook::poll`]. +#[derive(Copy, Clone, Debug)] +pub struct EngineContext { + /// Tip block number. + pub tip_block_number: BlockNumber, +} + +/// An event emitted when [hook][`EngineHook`] is polled. +#[derive(Debug)] +pub enum EngineHookEvent { + /// Hook is not ready. + /// + /// If this is returned, the hook is idle. + NotReady, + /// Hook started. + /// + /// If this is returned, the hook is running. + Started, + /// Hook finished. + /// + /// If this is returned, the hook is idle. + Finished(Result<(), EngineHookError>), +} + +impl EngineHookEvent { + /// Returns `true` if the event is [`EngineHookEvent::Started`]. + pub fn is_started(&self) -> bool { + matches!(self, Self::Started) + } + + /// Returns `true` if the event is [`EngineHookEvent::Finished`]. + pub fn is_finished(&self) -> bool { + matches!(self, Self::Finished(_)) + } +} + +/// An action that the caller of [hook][`EngineHook`] should act upon. +#[derive(Debug, Copy, Clone)] +pub enum EngineHookAction { + /// Notify about a [SyncState] update. + UpdateSyncState(SyncState), + /// Read the last relevant canonical hashes from the database and update the block indices of + /// the blockchain tree. + RestoreCanonicalHashes, +} + +/// An error returned by [hook][`EngineHook`]. +#[derive(Debug, thiserror::Error)] +pub enum EngineHookError { + /// Hook channel closed. + #[error("Hook channel closed")] + ChannelClosed, + /// Common error. Wrapper around [reth_interfaces::Error]. + #[error(transparent)] + Common(#[from] reth_interfaces::Error), + /// An internal error occurred. + #[error("Internal hook error occurred.")] + Internal(#[from] Box), +} + +/// Level of database access the hook needs for execution. +pub enum EngineHookDBAccessLevel { + /// Read-only database access. + ReadOnly, + /// Read-write database access. + ReadWrite, +} + +impl EngineHookDBAccessLevel { + /// Returns `true` if the hook needs read-only access to the database. + pub fn is_read_only(&self) -> bool { + matches!(self, Self::ReadOnly) + } + + /// Returns `true` if the hook needs read-write access to the database. + pub fn is_read_write(&self) -> bool { + matches!(self, Self::ReadWrite) + } +} diff --git a/crates/consensus/beacon/src/engine/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs similarity index 50% rename from crates/consensus/beacon/src/engine/prune.rs rename to crates/consensus/beacon/src/engine/hooks/prune.rs index 4b2b4852dccf..1650f9f52158 100644 --- a/crates/consensus/beacon/src/engine/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -1,9 +1,17 @@ -//! Prune management for the engine implementation. +//! Prune hook for the engine implementation. +use crate::{ + engine::hooks::{ + EngineContext, EngineHook, EngineHookAction, EngineHookError, EngineHookEvent, + }, + hooks::EngineHookDBAccessLevel, +}; use futures::FutureExt; +use metrics::Counter; use reth_db::database::Database; +use reth_interfaces::sync::SyncState; use reth_primitives::BlockNumber; -use reth_prune::{Pruner, PrunerResult, PrunerWithResult}; +use reth_prune::{Pruner, PrunerError, PrunerWithResult}; use reth_tasks::TaskSpawner; use std::task::{ready, Context, Poll}; use tokio::sync::oneshot; @@ -11,45 +19,67 @@ use tokio::sync::oneshot; /// Manages pruning under the control of the engine. /// /// This type controls the [Pruner]. -pub(crate) struct EnginePruneController { +pub struct PruneHook { /// The current state of the pruner. pruner_state: PrunerState, /// The type that can spawn the pruner task. pruner_task_spawner: Box, + metrics: Metrics, } -impl EnginePruneController { +impl PruneHook { /// Create a new instance - pub(crate) fn new(pruner: Pruner, pruner_task_spawner: Box) -> Self { - Self { pruner_state: PrunerState::Idle(Some(pruner)), pruner_task_spawner } - } - - /// Returns `true` if the pruner is idle. - pub(crate) fn is_pruner_idle(&self) -> bool { - self.pruner_state.is_idle() + pub fn new(pruner: Pruner, pruner_task_spawner: Box) -> Self { + Self { + pruner_state: PrunerState::Idle(Some(pruner)), + pruner_task_spawner, + metrics: Metrics::default(), + } } /// Advances the pruner state. /// /// This checks for the result in the channel, or returns pending if the pruner is idle. - fn poll_pruner(&mut self, cx: &mut Context<'_>) -> Poll { - let res = match self.pruner_state { + fn poll_pruner( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<(EngineHookEvent, Option)> { + let result = match self.pruner_state { PrunerState::Idle(_) => return Poll::Pending, PrunerState::Running(ref mut fut) => { ready!(fut.poll_unpin(cx)) } }; - let ev = match res { + + let event = match result { Ok((pruner, result)) => { self.pruner_state = PrunerState::Idle(Some(pruner)); - EnginePruneEvent::Finished { result } + + match result { + Ok(_) => EngineHookEvent::Finished(Ok(())), + Err(err) => EngineHookEvent::Finished(Err(match err { + PrunerError::PrunePart(_) | PrunerError::InconsistentData(_) => { + EngineHookError::Internal(Box::new(err)) + } + PrunerError::Interface(err) => err.into(), + PrunerError::Database(err) => reth_interfaces::Error::Database(err).into(), + PrunerError::Provider(err) => reth_interfaces::Error::Provider(err).into(), + })), + } } Err(_) => { // failed to receive the pruner - EnginePruneEvent::TaskDropped + EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) } }; - Poll::Ready(ev) + + let action = if matches!(event, EngineHookEvent::Finished(Ok(_))) { + Some(EngineHookAction::RestoreCanonicalHashes) + } else { + None + }; + + Poll::Ready((event, action)) } /// This will try to spawn the pruner if it is idle: @@ -59,7 +89,10 @@ impl EnginePruneController { /// 2b. If pruning is not needed, set pruner state back to [PrunerState::Idle]. /// /// If pruner is already running, do nothing. - fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { + fn try_spawn_pruner( + &mut self, + tip_block_number: BlockNumber, + ) -> Option<(EngineHookEvent, Option)> { match &mut self.pruner_state { PrunerState::Idle(pruner) => { let mut pruner = pruner.take()?; @@ -74,53 +107,51 @@ impl EnginePruneController { let _ = tx.send((pruner, result)); }), ); + self.metrics.runs.increment(1); self.pruner_state = PrunerState::Running(rx); - Some(EnginePruneEvent::Started(tip_block_number)) + Some(( + EngineHookEvent::Started, + // Engine can't process any FCU/payload messages from CL while we're + // pruning, as pruner needs an exclusive write access to the database. To + // prevent CL from sending us unneeded updates, we need to respond `true` + // on `eth_syncing` request. + Some(EngineHookAction::UpdateSyncState(SyncState::Syncing)), + )) } else { self.pruner_state = PrunerState::Idle(Some(pruner)); - Some(EnginePruneEvent::NotReady) + Some((EngineHookEvent::NotReady, None)) } } PrunerState::Running(_) => None, } } +} + +impl EngineHook for PruneHook { + fn name(&self) -> &'static str { + "Prune" + } - /// Advances the prune process with the tip block number. - pub(crate) fn poll( + fn poll( &mut self, cx: &mut Context<'_>, - tip_block_number: BlockNumber, - ) -> Poll { + ctx: EngineContext, + ) -> Poll<(EngineHookEvent, Option)> { // Try to spawn a pruner - match self.try_spawn_pruner(tip_block_number) { - Some(EnginePruneEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(event), + match self.try_spawn_pruner(ctx.tip_block_number) { + Some((EngineHookEvent::NotReady, _)) => return Poll::Pending, + Some((event, action)) => return Poll::Ready((event, action)), None => (), } // Poll pruner and check its status self.poll_pruner(cx) } -} -/// The event type emitted by the [EnginePruneController]. -#[derive(Debug)] -pub(crate) enum EnginePruneEvent { - /// Pruner is not ready - NotReady, - /// Pruner started with tip block number - Started(BlockNumber), - /// Pruner finished - /// - /// If this is returned, the pruner is idle. - Finished { - /// Final result of the pruner run. - result: PrunerResult, - }, - /// Pruner task was dropped after it was started, unable to receive it because channel - /// closed. This would indicate a panicked pruner task - TaskDropped, + fn db_access_level(&self) -> EngineHookDBAccessLevel { + EngineHookDBAccessLevel::ReadWrite + } } /// The possible pruner states within the sync controller. @@ -139,9 +170,9 @@ enum PrunerState { Running(oneshot::Receiver>), } -impl PrunerState { - /// Returns `true` if the state matches idle. - fn is_idle(&self) -> bool { - matches!(self, PrunerState::Idle(_)) - } +#[derive(reth_metrics::Metrics)] +#[metrics(scope = "consensus.engine.prune")] +struct Metrics { + /// The number of times the pruner was run. + runs: Counter, } diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs index 6daae69eaa73..67bae71be8b7 100644 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ b/crates/consensus/beacon/src/engine/metrics.rs @@ -13,8 +13,6 @@ pub(crate) struct EngineMetrics { pub(crate) forkchoice_updated_messages: Counter, /// The total count of new payload messages received. pub(crate) new_payload_messages: Counter, - /// The number of times the pruner was run. - pub(crate) pruner_runs: Counter, /// Latency for making canonical already canonical block pub(crate) make_canonical_already_canonical_latency: Histogram, /// Latency for making canonical committed block diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 71a3577f26f7..fac3e350763d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -3,8 +3,8 @@ use crate::{ forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, message::OnForkChoiceUpdated, metrics::EngineMetrics, - prune::{EnginePruneController, EnginePruneEvent}, }, + hooks::{EngineContext, EngineHookAction, EngineHooksController}, sync::{EngineSyncController, EngineSyncEvent}, }; use futures::{Future, StreamExt}; @@ -29,7 +29,6 @@ use reth_provider::{ BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, }; -use reth_prune::Pruner; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, PayloadAttributes, PayloadError, PayloadStatus, PayloadStatusEnum, PayloadValidationError, @@ -69,11 +68,17 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; +use crate::hooks::EngineHooks; pub use forkchoice::ForkchoiceStatus; + mod metrics; -pub(crate) mod prune; + pub(crate) mod sync; +/// Hooks for running during the main loop of +/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. +pub mod hooks; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; @@ -197,8 +202,7 @@ where /// blocks using the pipeline. Otherwise, the engine, sync controller, and blockchain tree will /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, - /// Controls pruning triggered by engine updates. - prune: Option>, + hooks: EngineHooksController, } impl BeaconConsensusEngine @@ -226,7 +230,7 @@ where payload_builder: PayloadBuilderHandle, target: Option, pipeline_run_threshold: u64, - pruner: Option>, + hooks: EngineHooks, ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let (to_engine, rx) = mpsc::unbounded_channel(); Self::with_channel( @@ -242,7 +246,7 @@ where pipeline_run_threshold, to_engine, rx, - pruner, + hooks, ) } @@ -272,7 +276,7 @@ where pipeline_run_threshold: u64, to_engine: UnboundedSender, rx: UnboundedReceiver, - pruner: Option>, + hooks: EngineHooks, ) -> Result<(Self, BeaconConsensusEngineHandle), Error> { let handle = BeaconConsensusEngineHandle { to_engine }; let sync = EngineSyncController::new( @@ -283,7 +287,6 @@ where max_block, blockchain.chain_spec(), ); - let prune = pruner.map(|pruner| EnginePruneController::new(pruner, task_spawner)); let mut this = Self { sync, blockchain, @@ -296,7 +299,7 @@ where invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), metrics: EngineMetrics::default(), pipeline_run_threshold, - prune, + hooks: EngineHooksController::new(hooks), }; let maybe_pipeline_target = match target { @@ -638,12 +641,12 @@ where return Ok(OnForkChoiceUpdated::syncing()) } - if self.is_prune_active() { - // We can only process new forkchoice updates if the pruner is idle, since it requires - // exclusive access to the database + if self.hooks.is_hook_with_db_write_running() { + // We can only process new forkchoice updates if no hook with db write is running, + // since it requires exclusive access to the database warn!( target: "consensus::engine", - "Pruning is in progress, skipping forkchoice update. \ + "Hook is in progress, skipping forkchoice update. \ This may affect the performance of your node as a validator." ); return Ok(OnForkChoiceUpdated::syncing()) @@ -1083,13 +1086,13 @@ where return Ok(status) } - let res = if self.sync.is_pipeline_idle() && self.is_prune_idle() { - // we can only insert new payloads if the pipeline and the pruner are _not_ running, - // because they hold exclusive access to the database + let res = if self.sync.is_pipeline_idle() && !self.hooks.is_hook_with_db_write_running() { + // we can only insert new payloads if the pipeline and any hook with db write + // are _not_ running, because they hold exclusive access to the database self.try_insert_new_payload(block) } else { - if self.is_prune_active() { - debug!(target: "consensus::engine", "Pruning is in progress, buffering new payload."); + if self.hooks.is_hook_with_db_write_running() { + debug!(target: "consensus::engine", "Hook is in progress, buffering new payload."); } self.try_buffer_payload(block) }; @@ -1226,12 +1229,12 @@ where Ok(()) } - /// When the pipeline or the pruner is active, the tree is unable to commit any additional - /// blocks since the pipeline holds exclusive access to the database. + /// When the pipeline or a hook with DB write access is active, the tree is unable to commit + /// any additional blocks since the pipeline holds exclusive access to the database. /// /// In this scenario we buffer the payload in the tree if the payload is valid, once the - /// pipeline or pruner is finished, the tree is then able to also use the buffered payloads to - /// commit to a (newer) canonical chain. + /// pipeline or a hook with DB write access is finished, the tree is then able to also use the + /// buffered payloads to commit to a (newer) canonical chain. /// /// This will return `SYNCING` if the block was buffered successfully, and an error if an error /// occurred while buffering the block. @@ -1246,7 +1249,7 @@ where /// Attempts to insert a new payload into the tree. /// - /// Caution: This expects that the pipeline and the pruner are idle. + /// Caution: This expects that the pipeline and a hook with DB write access are idle. #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] fn try_insert_new_payload( &mut self, @@ -1339,14 +1342,6 @@ where Ok(synced_to_finalized) } - /// Attempt to restore the tree. - /// - /// This is invoked after a pruner run to update the tree with the most recent canonical - /// hashes. - fn update_tree_on_finished_pruner(&mut self) -> Result<(), Error> { - self.blockchain.restore_canonical_hashes() - } - /// Invoked if we successfully downloaded a new block from the network. /// /// This will attempt to insert the block into the tree. @@ -1686,72 +1681,20 @@ where None } - /// Event handler for events emitted by the [EnginePruneController]. - /// - /// This returns a result to indicate whether the engine future should resolve (fatal error). - fn on_prune_event( - &mut self, - event: EnginePruneEvent, - ) -> Option> { - match event { - EnginePruneEvent::NotReady => {} - EnginePruneEvent::Started(tip_block_number) => { - trace!(target: "consensus::engine", %tip_block_number, "Pruner started"); - self.metrics.pruner_runs.increment(1); - // Engine can't process any FCU/payload messages from CL while we're pruning, as - // pruner needs an exclusive write access to the database. To prevent CL from - // sending us unneeded updates, we need to respond `true` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Syncing); - } - EnginePruneEvent::TaskDropped => { - error!(target: "consensus::engine", "Failed to receive spawned pruner"); - return Some(Err(BeaconConsensusEngineError::PrunerChannelClosed)) + fn on_hook_action(&self, action: EngineHookAction) -> Result<(), BeaconConsensusEngineError> { + match action { + EngineHookAction::UpdateSyncState(state) => { + self.sync_state_updater.update_sync_state(state) } - EnginePruneEvent::Finished { result } => { - trace!(target: "consensus::engine", ?result, "Pruner finished"); - match result { - Ok(_) => { - // Update the state and hashes of the blockchain tree if possible. - match self.update_tree_on_finished_pruner() { - Ok(()) => {} - Err(error) => { - error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); - return Some(Err(error.into())) - } - }; - } - // Any pruner error at this point is fatal. - Err(error) => return Some(Err(error.into())), - }; + EngineHookAction::RestoreCanonicalHashes => { + if let Err(error) = self.blockchain.restore_canonical_hashes() { + error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); + return Err(error.into()) + } } - }; - - None - } - - /// Returns `true` if the prune controller's pruner is idle. - fn is_prune_idle(&self) -> bool { - self.prune.as_ref().map(|prune| prune.is_pruner_idle()).unwrap_or(true) - } - - /// Returns `true` if the prune controller's pruner is active. - fn is_prune_active(&self) -> bool { - !self.is_prune_idle() - } - - /// Polls the prune controller, if it exists, and processes the event [`EnginePruneEvent`] - /// emitted by it. - /// - /// Returns [`Option::Some`] if prune controller emitted an event which resulted in the error - /// (see [`Self::on_prune_event`] for error handling) - fn poll_prune( - &mut self, - cx: &mut Context<'_>, - ) -> Option> { - match self.prune.as_mut()?.poll(cx, self.blockchain.canonical_tip().number) { - Poll::Ready(prune_event) => self.on_prune_event(prune_event), - Poll::Pending => None, } + + Ok(()) } } @@ -1783,11 +1726,14 @@ where // Process all incoming messages from the CL, these can affect the state of the // SyncController, hence they are polled first, and they're also time sensitive. loop { - // Poll prune controller first if it's active, as we will not be able to process any - // engine messages until it's finished. - if this.is_prune_active() { - if let Some(res) = this.poll_prune(cx) { - return Poll::Ready(res) + // Poll a running hook with db write access first, as we will not be able to process + // any engine messages until it's finished. + if let Poll::Ready(result) = this.hooks.poll_running_hook_with_db_write( + cx, + EngineContext { tip_block_number: this.blockchain.canonical_tip().number }, + ) { + if let Err(err) = this.on_hook_action(result?) { + return Poll::Ready(Err(err)) } } @@ -1847,16 +1793,18 @@ where // we're pending if both engine messages and sync events are pending (fully drained) let is_pending = engine_messages_pending && sync_pending; - // Poll prune controller if all conditions are met: - // 1. Pipeline is idle - // 2. No engine and sync messages are pending - // 3. Latest FCU status is not INVALID - if this.sync.is_pipeline_idle() && - is_pending && - !this.forkchoice_state_tracker.is_latest_invalid() - { - if let Some(res) = this.poll_prune(cx) { - return Poll::Ready(res) + // Poll next hook if all conditions are met: + // 1. No engine and sync messages are pending + // 2. Latest FCU status is not INVALID + if is_pending && !this.forkchoice_state_tracker.is_latest_invalid() { + if let Poll::Ready(result) = this.hooks.poll_next_hook( + cx, + EngineContext { tip_block_number: this.blockchain.canonical_tip().number }, + this.sync.is_pipeline_active(), + ) { + if let Err(err) = this.on_hook_action(result?) { + return Poll::Ready(Err(err)) + } } } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index cf2a47c3e9df..d29d4e42af94 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -1,7 +1,7 @@ use crate::{ - BeaconConsensus, BeaconConsensusEngine, BeaconConsensusEngineError, - BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, - MIN_BLOCKS_FOR_PIPELINE_RUN, + engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine, + BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, + BeaconOnNewPayloadError, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, @@ -518,6 +518,9 @@ where PruneBatchSizes::default(), ); + let mut hooks = EngineHooks::new(); + hooks.add(PruneHook::new(pruner, Box::::default())); + let (mut engine, handle) = BeaconConsensusEngine::new( client, pipeline, @@ -529,7 +532,7 @@ where payload_builder, None, self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), - Some(pruner), + hooks, ) .expect("failed to create consensus engine"); diff --git a/crates/interfaces/src/sync.rs b/crates/interfaces/src/sync.rs index 622df29a3ca4..78cad443d40d 100644 --- a/crates/interfaces/src/sync.rs +++ b/crates/interfaces/src/sync.rs @@ -21,7 +21,7 @@ pub trait SyncStateProvider: Send + Sync { /// which point the node is considered fully synced. #[auto_impl::auto_impl(&, Arc, Box)] pub trait NetworkSyncUpdater: std::fmt::Debug + Send + Sync + 'static { - /// Notifies about an [SyncState] update. + /// Notifies about a [SyncState] update. fn update_sync_state(&self, state: SyncState); /// Updates the status of the p2p node @@ -29,7 +29,7 @@ pub trait NetworkSyncUpdater: std::fmt::Debug + Send + Sync + 'static { } /// The state the network is currently in when it comes to synchronization. -#[derive(Clone, Eq, PartialEq, Debug)] +#[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum SyncState { /// Node sync is complete. /// From 733ee193952445edb5b36156ad8a50480726b898 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 18 Sep 2023 21:20:09 +0100 Subject: [PATCH 703/722] refactor(bin): CL events conditions (#4643) --- bin/reth/src/node/cl_events.rs | 50 ++++++++++++++++------------------ 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/bin/reth/src/node/cl_events.rs b/bin/reth/src/node/cl_events.rs index 50ccbbf464d0..1cf43682aed0 100644 --- a/bin/reth/src/node/cl_events.rs +++ b/bin/reth/src/node/cl_events.rs @@ -42,38 +42,36 @@ impl Stream for ConsensusLayerHealthEvents { loop { ready!(this.interval.poll_tick(cx)); - return match ( - this.canon_chain.last_exchanged_transition_configuration_timestamp(), - this.canon_chain.last_received_update_timestamp(), - ) { - // Short circuit if we recently had an FCU. - (_, Some(fork_choice)) - if fork_choice.elapsed() <= NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD => - { + if let Some(fork_choice) = this.canon_chain.last_received_update_timestamp() { + if fork_choice.elapsed() <= NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD { + // We had an FCU, and it's recent. CL is healthy. continue + } else { + // We had an FCU, but it's too old. + return Poll::Ready(Some( + ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile( + fork_choice.elapsed(), + ), + )) } - // Otherwise, continue with health checks based on Transition Configuration exchange - // and Fork Choice update. - (None, _) => Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)), - (Some(transition_config), _) - if transition_config.elapsed() > NO_TRANSITION_CONFIG_EXCHANGED_PERIOD => - { - Poll::Ready(Some(ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile( + } + + if let Some(transition_config) = + this.canon_chain.last_exchanged_transition_configuration_timestamp() + { + if transition_config.elapsed() <= NO_TRANSITION_CONFIG_EXCHANGED_PERIOD { + // We never had an FCU, but had a transition config exchange, and it's recent. + return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverReceivedUpdates)) + } else { + // We never had an FCU, but had a transition config exchange, but it's too old. + return Poll::Ready(Some(ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile( transition_config.elapsed(), ))) } - (Some(_), None) => { - Poll::Ready(Some(ConsensusLayerHealthEvent::NeverReceivedUpdates)) - } - (Some(_), Some(fork_choice)) - if fork_choice.elapsed() > NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD => - { - Poll::Ready(Some(ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile( - fork_choice.elapsed(), - ))) - } - _ => continue, } + + // We never had both FCU and transition config exchange. + return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)) } } } From 55339d7025bf949d7831b46df8599e43d9d186a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 18 Sep 2023 22:49:39 +0200 Subject: [PATCH 704/722] feat: add max logs per response cli arg (#4644) --- bin/reth/src/args/rpc_server_args.rs | 9 ++++++--- crates/rpc/rpc-builder/src/auth.rs | 2 +- crates/rpc/rpc-builder/src/constants.rs | 6 ++++++ crates/rpc/rpc-builder/src/eth.rs | 7 +------ 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 78260e10f97d..433708ef442a 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -51,8 +51,6 @@ pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15; pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 115; /// Default number of incoming connections. pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100; -/// Default number of incoming connections. -pub(crate) const RPC_DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Args)] @@ -135,9 +133,13 @@ pub struct RpcServerArgs { pub rpc_max_connections: u32, /// Maximum number of concurrent tracing requests. - #[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_TRACING_REQUESTS)] + #[arg(long, value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_TRACING_REQUESTS)] pub rpc_max_tracing_requests: u32, + /// Maximum number of logs that can be returned in a single response. + #[arg(long, value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_LOGS_PER_RESPONSE)] + pub rpc_max_logs_per_response: usize, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. #[arg( long, @@ -323,6 +325,7 @@ impl RethRpcConfig for RpcServerArgs { fn eth_config(&self) -> EthConfig { EthConfig::default() .max_tracing_requests(self.rpc_max_tracing_requests) + .max_logs_per_response(self.rpc_max_logs_per_response) .rpc_gas_cap(self.rpc_gas_cap) .gpo_config(self.gas_price_oracle_config()) } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index d0450ab4b42f..cfe545918896 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -1,7 +1,7 @@ use crate::{ constants, + constants::DEFAULT_MAX_LOGS_PER_RESPONSE, error::{RpcError, ServerKind}, - eth::DEFAULT_MAX_LOGS_PER_RESPONSE, EthConfig, }; use hyper::header::AUTHORIZATION; diff --git a/crates/rpc/rpc-builder/src/constants.rs b/crates/rpc/rpc-builder/src/constants.rs index a1b2bc36a82d..cbc051730c8b 100644 --- a/crates/rpc/rpc-builder/src/constants.rs +++ b/crates/rpc/rpc-builder/src/constants.rs @@ -7,6 +7,12 @@ pub const DEFAULT_WS_RPC_PORT: u16 = 8546; /// The default port for the auth server. pub const DEFAULT_AUTH_PORT: u16 = 8551; +/// The default maximum of logs in a single response. +pub const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000; + +/// The default maximum number of concurrently executed tracing calls +pub const DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; + /// The default IPC endpoint #[cfg(windows)] pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc"; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index b372d3be77ea..fc519fa682c0 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,3 +1,4 @@ +use crate::constants::{DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_TRACING_REQUESTS}; use reth_rpc::{ eth::{ cache::{EthStateCache, EthStateCacheConfig}, @@ -8,12 +9,6 @@ use reth_rpc::{ }; use serde::{Deserialize, Serialize}; -/// The default maximum of logs in a single response. -pub(crate) const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000; - -/// The default maximum number of concurrently executed tracing calls -pub(crate) const DEFAULT_MAX_TRACING_REQUESTS: u32 = 25; - /// All handlers for the `eth` namespace #[derive(Debug, Clone)] pub struct EthHandlers { From 394a3a9a5ae799c52a2579da988a8f080559a5da Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Tue, 19 Sep 2023 00:32:47 +0200 Subject: [PATCH 705/722] feat(rpc): split `test_fee_history` in multiple tests (#4646) --- crates/rpc/rpc/src/eth/api/server.rs | 111 ++++++++++++++++++++------- 1 file changed, 82 insertions(+), 29 deletions(-) diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 575a1b3a3160..a294cc2d1537 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -407,6 +407,7 @@ mod tests { use reth_rpc_api::EthApiServer; use reth_rpc_types::FeeHistory; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use revm_primitives::B256; fn build_test_eth_api< P: BlockReaderIdExt @@ -432,36 +433,19 @@ mod tests { ) } - /// Invalid block range - #[tokio::test] - async fn test_fee_history_empty() { - let response = as EthApiServer>::fee_history( - &build_test_eth_api(NoopProvider::default()), - 1.into(), - BlockNumberOrTag::Latest, - None, - ) - .await; - assert!(response.is_err()); - let error_object = response.unwrap_err(); - assert_eq!(error_object.code(), INVALID_PARAMS_CODE); - } - - /// Handler for: `eth_test_fee_history` - // TODO: Split this into multiple tests, and add tests for percentiles. - #[tokio::test] - async fn test_fee_history() { + // Function to prepare the EthApi with mock data + fn prepare_eth_api( + newest_block: u64, + mut oldest_block: Option, + block_count: u64, + mock_provider: MockEthProvider, + ) -> (EthApi, Vec, Vec) { let mut rng = generators::rng(); - let block_count = 10; - let newest_block = 1337; - // Build mock data - let mut oldest_block = None; let mut gas_used_ratios = Vec::new(); let mut base_fees_per_gas = Vec::new(); let mut last_header = None; - let mock_provider = MockEthProvider::default(); for i in (0..block_count).rev() { let hash = H256::random(); @@ -531,7 +515,34 @@ mod tests { let eth_api = build_test_eth_api(mock_provider); - // Invalid block range (request is before genesis) + (eth_api, base_fees_per_gas, gas_used_ratios) + } + + /// Invalid block range + #[tokio::test] + async fn test_fee_history_empty() { + let response = as EthApiServer>::fee_history( + &build_test_eth_api(NoopProvider::default()), + 1.into(), + BlockNumberOrTag::Latest, + None, + ) + .await; + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is before genesis) + async fn test_fee_history_invalid_block_range_before_genesis() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + let response = as EthApiServer>::fee_history( ð_api, (newest_block + 1).into(), @@ -539,11 +550,22 @@ mod tests { Some(vec![10.0]), ) .await; + assert!(response.is_err()); let error_object = response.unwrap_err(); assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is in in the future) + async fn test_fee_history_invalid_block_range_in_future() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - // Invalid block range (request is in in the future) let response = as EthApiServer>::fee_history( ð_api, (1).into(), @@ -551,11 +573,22 @@ mod tests { Some(vec![10.0]), ) .await; + assert!(response.is_err()); let error_object = response.unwrap_err(); assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Requesting no block should result in a default response + async fn test_fee_history_no_block_requested() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - // Requesting no block should result in a default response let response = as EthApiServer>::fee_history( ð_api, (0).into(), @@ -569,8 +602,18 @@ mod tests { FeeHistory::default(), "none: requesting no block should yield a default response" ); + } + + #[tokio::test] + /// Requesting a single block should return 1 block (+ base fee for the next block over) + async fn test_fee_history_single_block() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - // Requesting a single block should return 1 block (+ base fee for the next block over) let fee_history = eth_api.fee_history(1, (newest_block).into(), None).await.unwrap(); assert_eq!( &fee_history.base_fee_per_gas, @@ -596,8 +639,18 @@ mod tests { fee_history.reward.is_none(), "one: no percentiles were requested, so there should be no rewards result" ); + } + + #[tokio::test] + /// Requesting all blocks should be ok + async fn test_fee_history_all_blocks() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - // Requesting all blocks should be ok let fee_history = eth_api.fee_history(block_count, (newest_block).into(), None).await.unwrap(); From 11ee5d75308d4a7519fdecba4a0fa5f51c61cf0e Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Tue, 19 Sep 2023 12:34:37 +0200 Subject: [PATCH 706/722] fix: typos (#4648) Co-authored-by: Alexey Shekhirin --- book/cli/node.md | 2 +- crates/blockchain-tree/src/block_indices.rs | 10 +++++----- crates/blockchain-tree/src/chain.rs | 2 +- crates/blockchain-tree/src/config.rs | 2 +- crates/blockchain-tree/src/shareable.rs | 2 +- crates/primitives/src/transaction/eip4844.rs | 2 +- crates/revm/src/eth_dao_fork.rs | 2 +- crates/revm/src/lib.rs | 2 +- crates/stages/src/stage.rs | 2 +- crates/transaction-pool/src/pool/mod.rs | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/book/cli/node.md b/book/cli/node.md index 07da18faed13..c97a9e34a8f4 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -153,7 +153,7 @@ RPC: [default: 100] --rpc-max-subscriptions-per-connection - Set the the maximum concurrent subscriptions per connection + Set the maximum concurrent subscriptions per connection [default: 1024] diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index b4c8e7548c4e..7097f974746f 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -169,7 +169,7 @@ impl BlockIndices { loop { let Some(old_block_value) = old_hash else { - // end of old_hashes canonical chain. New chain has more block then old chain. + // end of old_hashes canonical chain. New chain has more blocks than old chain. while let Some(new) = new_hash { // add new blocks to added list. added.push(new.into()); @@ -204,7 +204,7 @@ impl BlockIndices { old_hash = old_hashes.next(); } std::cmp::Ordering::Greater => { - // old chain has more past blocks that new chain + // old chain has more past blocks than new chain removed.push(old_block_value); old_hash = old_hashes.next() } @@ -221,7 +221,7 @@ impl BlockIndices { ) } - /// Remove chain from indices and return dependent chains that needs to be removed. + /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { let mut lose_chains = BTreeSet::new(); @@ -316,7 +316,7 @@ impl BlockIndices { /// this is function that is going to remove N number of last canonical hashes. /// /// NOTE: This is not safe standalone, as it will not disconnect - /// blocks that depends on unwinded canonical chain. And should be + /// blocks that depend on unwinded canonical chain. And should be /// used when canonical chain is reinserted inside Tree. pub(crate) fn unwind_canonical_chain(&mut self, unwind_to: BlockNumber) { // this will remove all blocks numbers that are going to be replaced. @@ -383,7 +383,7 @@ impl BlockIndices { self.canonical_chain.tip() } - /// Canonical chain needed for execution of EVM. It should contains last 256 block hashes. + /// Canonical chain needed for execution of EVM. It should contain last 256 block hashes. #[inline] pub(crate) fn canonical_chain(&self) -> &CanonicalChain { &self.canonical_chain diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 258976fde3e1..c426e5b6cbab 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -47,7 +47,7 @@ impl DerefMut for AppendableChain { } impl AppendableChain { - /// Crate a new appendable chain from a given chain. + /// Create a new appendable chain from a given chain. pub fn new(chain: Chain) -> Self { Self { chain } } diff --git a/crates/blockchain-tree/src/config.rs b/crates/blockchain-tree/src/config.rs index 3c56acc56574..019d123408f8 100644 --- a/crates/blockchain-tree/src/config.rs +++ b/crates/blockchain-tree/src/config.rs @@ -42,7 +42,7 @@ impl BlockchainTreeConfig { max_unconnected_blocks: usize, ) -> Self { if max_reorg_depth > max_blocks_in_chain { - panic!("Side chain size should be more then finalization window"); + panic!("Side chain size should be more than finalization window"); } Self { max_blocks_in_chain, diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 6b9c6380b818..ba766b236ea0 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -32,7 +32,7 @@ pub struct ShareableBlockchainTree ShareableBlockchainTree { - /// Create a new sharable database. + /// Create a new shareable database. pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } } diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 1278e75e3b8c..27c5c1d36cfc 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -108,7 +108,7 @@ impl TxEip4844 { /// Verifies that the given blob data, commitments, and proofs are all valid for this /// transaction. /// - /// Takes as input the [KzgSettings], which should contain the the parameters derived from the + /// Takes as input the [KzgSettings], which should contain the parameters derived from the /// KZG trusted setup. /// /// This ensures that the blob transaction payload has the same number of blob data elements, diff --git a/crates/revm/src/eth_dao_fork.rs b/crates/revm/src/eth_dao_fork.rs index 785dae4b88ef..67e26cbd0c61 100644 --- a/crates/revm/src/eth_dao_fork.rs +++ b/crates/revm/src/eth_dao_fork.rs @@ -5,7 +5,7 @@ use reth_primitives::{hex_literal::hex, H160}; /// Dao hardfork beneficiary that received ether from accounts from DAO and DAO creator children. pub static DAO_HARDFORK_BENEFICIARY: H160 = H160(hex!("bf4ed7b27f1d666546e30d74d50d173d20bca754")); -/// DAO hardfork account that ether was taken and added to beneficiry +/// DAO hardfork account that ether was taken and added to beneficiary pub static DAO_HARDKFORK_ACCOUNTS: [H160; 116] = [ H160(hex!("d4fe7bc31cedb7bfb8a345f31e668033056b2728")), H160(hex!("b3fb0e5aba0e20e5c49d252dfd30e102b171a425")), diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 2db43b57ad2b..7dae2a87ef84 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -36,5 +36,5 @@ pub use reth_revm_primitives::*; /// Re-export everything pub use revm; -/// Etereum DAO hardfork state change data. +/// Ethereum DAO hardfork state change data. pub mod eth_dao_fork; diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index e369af06023c..95e397cbe8a1 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -71,7 +71,7 @@ impl ExecInput { } /// Return the next block range determined the number of transactions within it. - /// This function walks the the block indices until either the end of the range is reached or + /// This function walks the block indices until either the end of the range is reached or /// the number of transactions exceeds the threshold. pub fn next_block_range_with_transaction_threshold( &self, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index aadca293a060..95b2c44df472 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -873,7 +873,7 @@ impl AddedTransaction { } } - /// Returns the the replaced transaction if there was one + /// Returns the replaced transaction if there was one pub(crate) fn replaced(&self) -> Option<&Arc>> { match self { AddedTransaction::Pending(tx) => tx.replaced.as_ref(), From d846199525f459d1d1d74e9ff50fcb853903d46e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alp=20G=C3=BCneysel?= Date: Tue, 19 Sep 2023 06:51:14 -0400 Subject: [PATCH 707/722] refactor: remove duplicate RPC namespaces from arguments (#4418) Co-authored-by: Matthias Seitz --- bin/reth/src/args/rpc_server_args.rs | 19 +++++++++++++ crates/rpc/rpc-builder/src/lib.rs | 41 +++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 433708ef442a..0efebcfd3ea6 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -550,6 +550,25 @@ mod tests { ); } + #[test] + fn test_unique_rpc_modules() { + let args = CommandParser::::parse_from([ + "reth", + "--http.api", + " eth, admin, debug, eth,admin", + "--http", + "--ws", + ]) + .args; + let config = args.transport_rpc_module_config(); + let expected = vec![RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug]; + assert_eq!(config.http().cloned().unwrap().into_selection(), expected); + assert_eq!( + config.ws().cloned().unwrap().into_selection(), + RpcModuleSelection::standard_modules() + ); + } + #[test] fn test_rpc_server_config() { let args = CommandParser::::parse_from([ diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 581c4cbcc563..8d4ad0cafb0f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -504,7 +504,11 @@ impl RpcModuleSelection { Self::all_modules() } - /// Creates a new [RpcModuleSelection::Selection] from the given items. + /// Creates a new _unique_ [RpcModuleSelection::Selection] from the given items. + /// + /// # Note + /// + /// This will dedupe the selection and remove duplicates while preserving the order. /// /// # Example /// @@ -516,14 +520,30 @@ impl RpcModuleSelection { /// let config = RpcModuleSelection::try_from_selection(selection).unwrap(); /// assert_eq!(config, RpcModuleSelection::Selection(vec![RethRpcModule::Eth, RethRpcModule::Admin])); /// ``` + /// + /// Create a unique selection from the [RethRpcModule] string identifiers + /// + /// ``` + /// use reth_rpc_builder::{RethRpcModule, RpcModuleSelection}; + /// let selection = vec!["eth", "admin", "eth", "admin"]; + /// let config = RpcModuleSelection::try_from_selection(selection).unwrap(); + /// assert_eq!(config, RpcModuleSelection::Selection(vec![RethRpcModule::Eth, RethRpcModule::Admin])); + /// ``` pub fn try_from_selection(selection: I) -> Result where I: IntoIterator, T: TryInto, { - let selection = - selection.into_iter().map(TryInto::try_into).collect::, _>>()?; - Ok(RpcModuleSelection::Selection(selection)) + let mut unique = HashSet::new(); + + let mut s = Vec::new(); + for item in selection.into_iter() { + let item = item.try_into()?; + if unique.insert(item) { + s.push(item); + } + } + Ok(RpcModuleSelection::Selection(s)) } /// Returns true if no selection is configured @@ -1834,6 +1854,19 @@ mod tests { assert_eq!(selection, RpcModuleSelection::All); } + #[test] + fn parse_rpc_unique_module_selection() { + let selection = "eth,admin,eth,net".parse::().unwrap(); + assert_eq!( + selection, + RpcModuleSelection::Selection(vec![ + RethRpcModule::Eth, + RethRpcModule::Admin, + RethRpcModule::Net, + ]) + ); + } + #[test] fn identical_selection() { assert!(RpcModuleSelection::are_identical( From a96dbb476c8587b40b26c1198f48785e93a535aa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Sep 2023 17:16:04 +0200 Subject: [PATCH 708/722] perf: optimize engine poll loop (#4655) --- crates/consensus/beacon/src/engine/mod.rs | 57 ++++++++++------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index fac3e350763d..66df6a083e5b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -38,7 +38,7 @@ use reth_tasks::TaskSpawner; use std::{ pin::Pin, sync::Arc, - task::{Context, Poll}, + task::{ready, Context, Poll}, time::Instant, }; use tokio::sync::{ @@ -1723,9 +1723,8 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - // Process all incoming messages from the CL, these can affect the state of the - // SyncController, hence they are polled first, and they're also time sensitive. - loop { + // Control loop that advances the state + 'main: loop { // Poll a running hook with db write access first, as we will not be able to process // any engine messages until it's finished. if let Poll::Ready(result) = this.hooks.poll_running_hook_with_db_write( @@ -1737,12 +1736,11 @@ where } } - let mut engine_messages_pending = false; - let mut sync_pending = false; - - // handle next engine message - match this.engine_message_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => match msg { + // Process all incoming messages from the CL, these can affect the state of the + // SyncController, hence they are polled first, and they're also time sensitive, hence + // they're always drained first. + while let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { + match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { match this.on_forkchoice_updated(state, payload_attrs, tx) { OnForkchoiceUpdateOutcome::Processed => {} @@ -1767,13 +1765,6 @@ where BeaconEngineMessage::EventListener(tx) => { this.listeners.push_listener(tx); } - }, - Poll::Ready(None) => { - unreachable!("Engine holds the a sender to the message channel") - } - Poll::Pending => { - // no more CL messages to process - engine_messages_pending = true; } } @@ -1783,36 +1774,38 @@ where if let Some(res) = this.on_sync_event(sync_event) { return Poll::Ready(res) } + // this could have taken a while, so we start the next cycle to handle any new + // engine messages + continue 'main } Poll::Pending => { // no more sync events to process - sync_pending = true; } } - // we're pending if both engine messages and sync events are pending (fully drained) - let is_pending = engine_messages_pending && sync_pending; + // at this point, all engine messages and sync events are fully drained // Poll next hook if all conditions are met: - // 1. No engine and sync messages are pending + // 1. Engine and sync messages are fully drained (both pending) // 2. Latest FCU status is not INVALID - if is_pending && !this.forkchoice_state_tracker.is_latest_invalid() { - if let Poll::Ready(result) = this.hooks.poll_next_hook( + if !this.forkchoice_state_tracker.is_latest_invalid() { + let action = ready!(this.hooks.poll_next_hook( cx, EngineContext { tip_block_number: this.blockchain.canonical_tip().number }, this.sync.is_pipeline_active(), - ) { - if let Err(err) = this.on_hook_action(result?) { - return Poll::Ready(Err(err)) - } + ))?; + if let Err(err) = this.on_hook_action(action) { + return Poll::Ready(Err(err)) } - } - if is_pending { - // incoming engine messages and sync events are drained, so we can yield back - // control - return Poll::Pending + // ensure we're polling until pending while also checking for new engine messages + // before polling the next hook + continue 'main } + + // incoming engine messages and sync events are drained, so we can yield back + // control + return Poll::Pending } } } From a80b72041b63d52b6f201316dc90f2feab94c872 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:16:48 -0400 Subject: [PATCH 709/722] feat: add pre-block EIP-4788 beacon root contract call (#4457) --- crates/consensus/auto-seal/src/lib.rs | 2 + crates/interfaces/src/executor.rs | 4 + crates/payload/basic/src/lib.rs | 79 +++- crates/primitives/src/constants/mod.rs | 9 +- crates/revm/Cargo.toml | 4 +- crates/revm/revm-primitives/src/config.rs | 4 +- crates/revm/revm-primitives/src/env.rs | 50 ++- crates/revm/src/processor.rs | 458 +++++++++++++++++++++- crates/revm/src/state_change.rs | 67 +++- 9 files changed, 660 insertions(+), 17 deletions(-) diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index ea6b87f5e49f..282cc34d4d4d 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -305,6 +305,8 @@ impl StorageInner { senders: Vec

, ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> { trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); + // TODO: there isn't really a parent beacon block root here, so not sure whether or not to + // call the 4788 beacon contract let (receipts, gas_used) = executor.execute_transactions(block, U256::ZERO, Some(senders))?; diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index cbddf4017e20..fbde59bf0bd3 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -26,6 +26,10 @@ pub enum BlockValidationError { BlockPreMerge { hash: H256 }, #[error("Missing total difficulty")] MissingTotalDifficulty { hash: H256 }, + #[error("EIP-4788 Parent beacon block root missing for active Cancun block")] + MissingParentBeaconBlockRoot, + #[error("The parent beacon block root is not zero for Cancun genesis block")] + CancunGenesisParentBeaconBlockRootNotZero, } /// BlockExecutor Errors diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index c209e985877e..4dd68d849d47 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -33,8 +33,10 @@ use reth_primitives::{ }; use reth_provider::{BlockReaderIdExt, BlockSource, BundleStateWithReceipts, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, env::tx_env_with_recovered, into_reth_log, - state_change::post_block_withdrawals_balance_increments, + database::StateProviderDatabase, + env::tx_env_with_recovered, + into_reth_log, + state_change::{apply_beacon_root_contract_call, post_block_withdrawals_balance_increments}, }; use reth_rlp::Encodable; use reth_tasks::TaskSpawner; @@ -45,6 +47,7 @@ use revm::{ Database, DatabaseCommit, State, }; use std::{ + fmt::Debug, future::Future, pin::Pin, sync::{atomic::AtomicBool, Arc}, @@ -664,6 +667,16 @@ where let block_number = initialized_block_env.number.to::(); + // apply eip-4788 pre block contract call + pre_block_beacon_root_contract_call( + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + )?; + let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -771,7 +784,8 @@ where let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; - // merge all transitions into bundle state. + // merge all transitions into bundle state, this would apply the withdrawal balance changes and + // 4788 contract call db.merge_transitions(BundleRetention::PlainState); let bundle = BundleStateWithReceipts::new(db.take_bundle(), vec![receipts], block_number); @@ -861,7 +875,7 @@ where extra_data, attributes, chain_spec, - .. + initialized_cfg, } = config; debug!(parent_hash=?parent_block.hash, parent_number=parent_block.number, "building empty payload"); @@ -876,10 +890,21 @@ where let block_number = initialized_block_env.number.to::(); let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + // apply eip-4788 pre block contract call + pre_block_beacon_root_contract_call( + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + )?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; - // merge transition, this will apply the withdrawal balance changes. + // merge all transitions into bundle state, this would apply the withdrawal balance changes and + // 4788 contract call db.merge_transitions(BundleRetention::PlainState); // calculate the state root @@ -967,6 +992,50 @@ fn commit_withdrawals>( }) } +/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. +/// +/// This constructs a new [EVM](revm::EVM) with the given DB, and environment ([CfgEnv] and +/// [BlockEnv]) to execute the pre block contract call. +/// +/// The parent beacon block root used for the call is gathered from the given +/// [PayloadBuilderAttributes]. +/// +/// This uses [apply_beacon_root_contract_call] to ultimately apply the beacon root contract state +/// change. +fn pre_block_beacon_root_contract_call( + db: &mut DB, + chain_spec: &ChainSpec, + block_number: u64, + initialized_cfg: &CfgEnv, + initialized_block_env: &BlockEnv, + attributes: &PayloadBuilderAttributes, +) -> Result<(), PayloadBuilderError> +where + DB: Database + DatabaseCommit, + ::Error: Debug, +{ + // Configure the environment for the block. + let env = Env { + cfg: initialized_cfg.clone(), + block: initialized_block_env.clone(), + ..Default::default() + }; + + // apply pre-block EIP-4788 contract call + let mut evm_pre_block = revm::EVM::with_env(env); + evm_pre_block.database(db); + + // initialize a block from the env, because the pre block call needs the block itself + apply_beacon_root_contract_call( + chain_spec, + attributes.timestamp, + block_number, + attributes.parent_beacon_block_root, + &mut evm_pre_block, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into())) +} + /// Checks if the new payload is better than the current best. /// /// This compares the total fees of the blocks, higher is better. diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index 7955a6e47d37..2fcdf09b33a1 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,6 +1,6 @@ //! Ethereum protocol-related constants -use crate::{H256, U256}; +use crate::{H160, H256, U256}; use hex_literal::hex; use std::time::Duration; @@ -132,6 +132,13 @@ pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; /// pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; +/// The address for the beacon roots contract defined in EIP-4788. +pub const BEACON_ROOTS_ADDRESS: H160 = H160(hex!("bEac00dDB15f3B6d645C48263dC93862413A222D")); + +/// The caller to be used when calling the EIP-4788 beacon roots contract at the beginning of the +/// block. +pub const SYSTEM_ADDRESS: H160 = H160(hex!("fffffffffffffffffffffffffffffffffffffffe")); + #[cfg(test)] mod tests { use super::*; diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index c6032f436bd1..faf692c7cbfe 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true description = "reth specific revm utilities" [dependencies] -# reth +# reth reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true @@ -21,4 +21,4 @@ reth-consensus-common = { path = "../consensus/common" } revm.workspace = true # common -tracing.workspace = true \ No newline at end of file +tracing.workspace = true diff --git a/crates/revm/revm-primitives/src/config.rs b/crates/revm/revm-primitives/src/config.rs index ec71880072a9..22d5a15cf0e4 100644 --- a/crates/revm/revm-primitives/src/config.rs +++ b/crates/revm/revm-primitives/src/config.rs @@ -19,7 +19,9 @@ pub fn revm_spec_by_timestamp_after_merge( /// return revm_spec from spec configuration. pub fn revm_spec(chain_spec: &ChainSpec, block: Head) -> revm::primitives::SpecId { - if chain_spec.fork(Hardfork::Shanghai).active_at_head(&block) { + if chain_spec.fork(Hardfork::Cancun).active_at_head(&block) { + revm::primitives::CANCUN + } else if chain_spec.fork(Hardfork::Shanghai).active_at_head(&block) { revm::primitives::SHANGHAI } else if chain_spec.fork(Hardfork::Paris).active_at_head(&block) { revm::primitives::MERGE diff --git a/crates/revm/revm-primitives/src/env.rs b/crates/revm/revm-primitives/src/env.rs index 3efa2b19c6c6..721a46e2d2c6 100644 --- a/crates/revm/revm-primitives/src/env.rs +++ b/crates/revm/revm-primitives/src/env.rs @@ -1,9 +1,10 @@ use crate::config::revm_spec; use reth_primitives::{ + constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, recover_signer, Address, Bytes, Chain, ChainSpec, Head, Header, Transaction, TransactionKind, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, U256, + TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy, H256, U256, }; -use revm::primitives::{AnalysisKind, BlockEnv, CfgEnv, SpecId, TransactTo, TxEnv}; +use revm::primitives::{AnalysisKind, BlockEnv, CfgEnv, Env, SpecId, TransactTo, TxEnv}; /// Convenience function to call both [fill_cfg_env] and [fill_block_env] pub fn fill_cfg_and_block_env( @@ -106,6 +107,51 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn tx_env } +/// Fill transaction environment with the EIP-4788 system contract message data. +/// +/// This requirements for the beacon root contract call defined by +/// [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) are: +/// +/// At the start of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. +/// before processing any transactions), call `BEACON_ROOTS_ADDRESS` as `SYSTEM_ADDRESS` with the +/// 32-byte input of `header.parent_beacon_block_root`, a gas limit of `30_000_000`, and `0` value. +/// This will trigger the `set()` routine of the beacon roots contract. This is a system operation +/// and therefore: +/// * the call must execute to completion +/// * the call does not count against the block’s gas limit +/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as +/// part of the call +/// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently +pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: H256) { + env.tx = TxEnv { + caller: SYSTEM_ADDRESS, + transact_to: TransactTo::Call(BEACON_ROOTS_ADDRESS), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data: parent_beacon_block_root.to_fixed_bytes().to_vec().into(), + // Setting the gas price to zero enforces that no value is transferred as part of the call, + // and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from the + // `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; +} + /// Fill transaction environment from [TransactionSignedEcRecovered]. pub fn fill_tx_env_with_recovered(tx_env: &mut TxEnv, transaction: &TransactionSignedEcRecovered) { fill_tx_env(tx_env, transaction.as_ref(), transaction.signer()) diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 9ded17dd1a6d..183e603d679e 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -4,7 +4,7 @@ use crate::{ eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, into_reth_log, stack::{InspectorStack, InspectorStackConfig}, - state_change::post_block_balance_increments, + state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, @@ -53,7 +53,7 @@ pub struct EVMProcessor<'a> { /// Outer vector stores receipts for each block sequentially. /// The inner vector stores receipts ordered by transaction number. /// - /// If receipt is None it means it is pruned. + /// If receipt is None it means it is pruned. receipts: Vec>>, /// First block will be initialized to `None` /// and be set to the block number of first block executed. @@ -172,6 +172,24 @@ impl<'a> EVMProcessor<'a> { ); } + /// Applies the pre-block call to the EIP-4788 beacon block root contract. + /// + /// If cancun is not activated or the block is the genesis block, then this is a no-op, and no + /// state changes are made. + pub fn apply_beacon_root_contract_call( + &mut self, + block: &Block, + ) -> Result<(), BlockExecutionError> { + apply_beacon_root_contract_call( + &self.chain_spec, + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut self.evm, + )?; + Ok(()) + } + /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO /// hardfork state change. pub fn apply_post_execution_state_change( @@ -256,6 +274,8 @@ impl<'a> EVMProcessor<'a> { total_difficulty: U256, senders: Option>, ) -> Result<(Vec, u64), BlockExecutionError> { + self.init_env(&block.header, total_difficulty); + // perf: do not execute empty blocks if block.body.is_empty() { return Ok((Vec::new(), 0)) @@ -263,8 +283,6 @@ impl<'a> EVMProcessor<'a> { let senders = self.recover_senders(&block.body, senders)?; - self.init_env(&block.header, total_difficulty); - let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.len()); for (transaction, sender) in block.body.iter().zip(senders) { @@ -318,6 +336,8 @@ impl<'a> EVMProcessor<'a> { total_difficulty: U256, senders: Option>, ) -> Result, BlockExecutionError> { + self.init_env(&block.header, total_difficulty); + self.apply_beacon_root_contract_call(block)?; let (receipts, cumulative_gas_used) = self.execute_transactions(block, total_difficulty, senders)?; @@ -529,3 +549,433 @@ pub fn verify_receipt<'a>( Ok(()) } + +#[cfg(test)] +mod tests { + use reth_primitives::{ + constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, + keccak256, Account, Bytecode, Bytes, ChainSpecBuilder, ForkCondition, StorageKey, MAINNET, + }; + use reth_provider::{AccountReader, BlockHashReader, StateRootProvider}; + use reth_revm_primitives::TransitionState; + use revm::Database; + use std::{collections::HashMap, str::FromStr}; + + use super::*; + + /// Returns the beacon root contract code + fn beacon_root_contract_code() -> Bytes { + Bytes::from_str("0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500").unwrap() + } + + #[derive(Debug, Default, Clone, Eq, PartialEq)] + struct StateProviderTest { + accounts: HashMap, Account)>, + contracts: HashMap, + block_hash: HashMap, + } + + impl StateProviderTest { + /// Insert account. + fn insert_account( + &mut self, + address: Address, + mut account: Account, + bytecode: Option, + storage: HashMap, + ) { + if let Some(bytecode) = bytecode { + let hash = keccak256(&bytecode); + account.bytecode_hash = Some(hash); + self.contracts.insert(hash, Bytecode::new_raw(bytecode.into())); + } + self.accounts.insert(address, (storage, account)); + } + } + + impl AccountReader for StateProviderTest { + fn basic_account(&self, address: Address) -> reth_interfaces::Result> { + let ret = Ok(self.accounts.get(&address).map(|(_, acc)| *acc)); + ret + } + } + + impl BlockHashReader for StateProviderTest { + fn block_hash(&self, number: u64) -> reth_interfaces::Result> { + Ok(self.block_hash.get(&number).cloned()) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> reth_interfaces::Result> { + let range = start..end; + Ok(self + .block_hash + .iter() + .filter_map(|(block, hash)| range.contains(block).then_some(*hash)) + .collect()) + } + } + + impl StateRootProvider for StateProviderTest { + fn state_root( + &self, + _bundle_state: BundleStateWithReceipts, + ) -> reth_interfaces::Result { + todo!() + } + } + + impl StateProvider for StateProviderTest { + fn storage( + &self, + account: Address, + storage_key: reth_primitives::StorageKey, + ) -> reth_interfaces::Result> { + Ok(self + .accounts + .get(&account) + .and_then(|(storage, _)| storage.get(&storage_key).cloned())) + } + + fn bytecode_by_hash(&self, code_hash: H256) -> reth_interfaces::Result> { + Ok(self.contracts.get(&code_hash).cloned()) + } + + fn proof( + &self, + _address: Address, + _keys: &[H256], + ) -> reth_interfaces::Result<(Vec, H256, Vec>)> { + todo!() + } + } + + #[test] + fn eip_4788_non_genesis_call() { + let mut header = Header { timestamp: 1, number: 1, ..Header::default() }; + + let mut db = StateProviderTest::default(); + + let beacon_root_contract_code = beacon_root_contract_code(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(beacon_root_contract_code.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(beacon_root_contract_code), + HashMap::new(), + ); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + // execute invalid header (no parent beacon block root) + let mut executor = EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db)); + + // attempt to execute a block without parent beacon block root, expect err + let err = executor + .execute_and_verify_receipt( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .expect_err( + "Executing cancun block without parent beacon block root field should fail", + ); + assert_eq!( + err, + BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) + ); + + // fix header, set a gas limit + header.parent_beacon_block_root = Some(H256::from_low_u64_be(0x1337)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // should be parent_beacon_block_root + let history_buffer_length = 98304u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = + executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor + .db_mut() + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist"); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x1337)); + } + + #[test] + fn eip_4788_no_code_cancun() { + // This test ensures that we "silently fail" when cancun is active and there is no code at + // BEACON_ROOTS_ADDRESS + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), + ..Header::default() + }; + + let db = StateProviderTest::default(); + + // DON'T deploy the contract at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let mut executor = EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db)); + executor.init_env(&header, U256::ZERO); + + // get the env + let previous_env = executor.evm.env.clone(); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_and_verify_receipt( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + + // ensure that the env has not changed + assert_eq!(executor.evm.env, previous_env); + } + + #[test] + fn eip_4788_empty_account_call() { + // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account + // during the pre-block call + let mut db = StateProviderTest::default(); + + let beacon_root_contract_code = beacon_root_contract_code(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(beacon_root_contract_code.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(beacon_root_contract_code), + HashMap::new(), + ); + + // insert an empty SYSTEM_ADDRESS + db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + let mut executor = EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db)); + + // construct the header for block one + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), + ..Header::default() + }; + + executor.init_env(&header, U256::ZERO); + + // attempt to execute an empty block with parent beacon block root, this should not fail + executor + .execute_and_verify_receipt( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .expect( + "Executing a block with no transactions while cancun is active should not fail", + ); + + // ensure that the nonce of the system address account has not changed + let nonce = executor.db_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; + assert_eq!(nonce, 0); + } + + #[test] + fn eip_4788_genesis_call() { + let mut db = StateProviderTest::default(); + + let beacon_root_contract_code = beacon_root_contract_code(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(beacon_root_contract_code.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(beacon_root_contract_code), + HashMap::new(), + ); + + // activate cancun at genesis + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) + .build(), + ); + + let mut header = chain_spec.genesis_header(); + + let mut executor = EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db)); + executor.init_env(&header, U256::ZERO); + + // attempt to execute the genesis block with non-zero parent beacon block root, expect err + header.parent_beacon_block_root = Some(H256::from_low_u64_be(0x1337)); + let _err = executor + .execute_and_verify_receipt( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .expect_err( + "Executing genesis cancun block with non-zero parent beacon block root field should fail", + ); + + // fix header + header.parent_beacon_block_root = Some(H256::zero()); + + // now try to process the genesis block again, this time ensuring that a system contract + // call does not occur + executor + .execute( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .unwrap(); + + // there is no system contract call so there should be NO STORAGE CHANGES + // this means we'll check the transition state + let state = executor.evm.db().unwrap(); + let transition_state = state + .transition_state + .clone() + .expect("the evm should be initialized with bundle updates"); + + // assert that it is the default (empty) transition state + assert_eq!(transition_state, TransitionState::default()); + } + + #[test] + fn eip_4788_high_base_fee() { + // This test ensures that if we have a base fee, then we don't return an error when the + // system contract is called, due to the gas price being less than the base fee. + let header = Header { + timestamp: 1, + number: 1, + parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), + base_fee_per_gas: Some(u64::MAX), + ..Header::default() + }; + + let mut db = StateProviderTest::default(); + + let beacon_root_contract_code = beacon_root_contract_code(); + + let beacon_root_contract_account = Account { + balance: U256::ZERO, + bytecode_hash: Some(keccak256(beacon_root_contract_code.clone())), + nonce: 1, + }; + + db.insert_account( + BEACON_ROOTS_ADDRESS, + beacon_root_contract_account, + Some(beacon_root_contract_code), + HashMap::new(), + ); + + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .build(), + ); + + // execute header + let mut executor = EVMProcessor::new_with_db(chain_spec, StateProviderDatabase::new(db)); + executor.init_env(&header, U256::ZERO); + + // ensure that the env is configured with a base fee + assert_eq!(executor.evm.env.block.basefee, U256::from(u64::MAX)); + + // Now execute a block with the fixed header, ensure that it does not fail + executor + .execute( + &Block { header: header.clone(), body: vec![], ommers: vec![], withdrawals: None }, + U256::ZERO, + None, + ) + .unwrap(); + + // check the actual storage of the contract - it should be: + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be + // header.timestamp + // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH + // should be parent_beacon_block_root + let history_buffer_length = 98304u64; + let timestamp_index = header.timestamp % history_buffer_length; + let parent_beacon_block_root_index = + timestamp_index % history_buffer_length + history_buffer_length; + + // get timestamp storage and compare + let timestamp_storage = + executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + assert_eq!(timestamp_storage, U256::from(header.timestamp)); + + // get parent beacon block root storage and compare + let parent_beacon_block_root_storage = executor + .db_mut() + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .unwrap(); + assert_eq!(parent_beacon_block_root_storage, U256::from(0x1337)); + } +} diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 6b4cbeff55b5..d641ad7c8d0c 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,6 +1,11 @@ use reth_consensus_common::calc; -use reth_primitives::{Address, ChainSpec, Hardfork, Header, Withdrawal, U256}; -use std::collections::HashMap; +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{ + constants::SYSTEM_ADDRESS, Address, ChainSpec, Hardfork, Header, Withdrawal, H256, U256, +}; +use reth_revm_primitives::{env::fill_tx_env_with_beacon_root_contract_call, Database}; +use revm::{primitives::ResultAndState, DatabaseCommit, EVM}; +use std::{collections::HashMap, fmt::Debug}; /// Collect all balance changes at the end of the block. /// @@ -46,6 +51,64 @@ pub fn post_block_balance_increments( balance_increments } +/// Applies the pre-block call to the EIP-4788 beacon block root contract, using the given block, +/// [ChainSpec], EVM. +/// +/// If cancun is not activated or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +#[inline] +pub fn apply_beacon_root_contract_call( + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + block_parent_beacon_block_root: Option, + evm: &mut EVM, +) -> Result<(), BlockExecutionError> +where + ::Error: Debug, +{ + if chain_spec.is_cancun_activated_at_timestamp(block_timestamp) { + // if the block number is zero (genesis block) then the parent beacon block root must + // be 0x0 and no system transaction may occur as per EIP-4788 + if block_number == 0 { + if block_parent_beacon_block_root != Some(H256::zero()) { + return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero.into()) + } + } else { + let parent_beacon_block_root = block_parent_beacon_block_root.ok_or( + BlockExecutionError::from(BlockValidationError::MissingParentBeaconBlockRoot), + )?; + + // get previous env + let previous_env = evm.env.clone(); + + // modify env for pre block call + fill_tx_env_with_beacon_root_contract_call(&mut evm.env, parent_beacon_block_root); + + let ResultAndState { mut state, .. } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.env = previous_env; + return Err(BlockExecutionError::from(BlockValidationError::EVM { + hash: Default::default(), + message: format!("{e:?}"), + })) + } + }; + + state.remove(&SYSTEM_ADDRESS); + state.remove(&evm.env.block.coinbase); + + let db = evm.db().expect("db to not be moved"); + db.commit(state); + + // re-set the previous env + evm.env = previous_env; + } + } + Ok(()) +} + /// Returns a map of addresses to their balance increments if shanghai is active at the given /// timestamp. #[inline] From 1406142af879567c214605b5a8189acf909b62b1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 19 Sep 2023 17:48:34 +0200 Subject: [PATCH 710/722] fix: clear buffered blocks on sync (#4658) --- crates/consensus/beacon/src/engine/sync.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 2042dd14293d..fa5cfe842e23 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -100,10 +100,11 @@ where self.max_block = Some(block); } - /// Cancels all download requests that are in progress. + /// Cancels all download requests that are in progress and buffered blocks. pub(crate) fn clear_block_download_requests(&mut self) { self.inflight_full_block_requests.clear(); self.inflight_block_range_requests.clear(); + self.range_buffered_blocks.clear(); self.update_block_download_metrics(); } From 57c10e5b6594d68c3ce6b88be6554d243043366f Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 19 Sep 2023 17:36:24 +0100 Subject: [PATCH 711/722] refactor(engine, tree): connect buffered blocks on pruner finish (#4613) --- crates/blockchain-tree/src/blockchain_tree.rs | 54 +++++++++++++------ crates/blockchain-tree/src/shareable.rs | 13 ++--- .../consensus/beacon/src/engine/hooks/mod.rs | 5 +- .../beacon/src/engine/hooks/prune.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 8 +-- crates/interfaces/src/blockchain_tree/mod.rs | 9 ++-- crates/storage/provider/src/providers/mod.rs | 8 +-- 7 files changed, 63 insertions(+), 36 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 7302baab47cd..3c6b04202dca 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -744,7 +744,8 @@ impl BlockchainTree } /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree. + /// tree by attempting to connect the buffered blocks to canonical hashes. + /// /// /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. @@ -753,21 +754,12 @@ impl BlockchainTree /// /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using /// [`BlockchainTree::finalize_block`]). - pub fn restore_canonical_hashes_and_finalize( + pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &mut self, last_finalized_block: BlockNumber, ) -> Result<(), Error> { self.finalize_block(last_finalized_block); - self.restore_canonical_hashes() - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree. - /// - /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - pub fn restore_canonical_hashes(&mut self) -> Result<(), Error> { let num_of_canonical_hashes = self.config.max_reorg_depth() + self.config.num_of_additional_canonical_block_hashes(); @@ -790,12 +782,44 @@ impl BlockchainTree } } - // check unconnected block buffer for the childs of new added blocks, - for added_block in last_canonical_hashes.into_iter() { + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + /// Reads the last `N` canonical hashes from the database and updates the block indices of the + /// tree by attempting to connect the buffered blocks to canonical hashes. + /// + /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the + /// `BLOCKHASH` opcode in the EVM. + pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> Result<(), Error> { + let num_of_canonical_hashes = + self.config.max_reorg_depth() + self.config.num_of_additional_canonical_block_hashes(); + + let last_canonical_hashes = self + .externals + .db + .tx()? + .cursor_read::()? + .walk_back(None)? + .take(num_of_canonical_hashes as usize) + .collect::, _>>()?; + + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + fn connect_buffered_blocks_to_hashes( + &mut self, + hashes: impl IntoIterator>, + ) -> Result<(), Error> { + // check unconnected block buffer for childs of the canonical hashes + for added_block in hashes.into_iter() { self.try_connect_buffered_blocks(added_block.into()) } - // check unconnected block buffer for childs of the chains. + // check unconnected block buffer for childs of the chains let mut all_chain_blocks = Vec::new(); for (_, chain) in self.chains.iter() { for (&number, blocks) in chain.blocks.iter() { @@ -1626,7 +1650,7 @@ mod tests { .assert(&tree); // update canonical block to b2, this would make b2a be removed - assert_eq!(tree.restore_canonical_hashes_and_finalize(12), Ok(())); + assert_eq!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12), Ok(())); assert_eq!(tree.is_block_known(block2.num_hash()).unwrap(), Some(BlockStatus::Valid)); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index ba766b236ea0..28ede546636a 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -66,21 +66,22 @@ impl BlockchainTreeEngine tree.update_chains_metrics(); } - fn restore_canonical_hashes_and_finalize( + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, ) -> Result<(), Error> { - trace!(target: "blockchain_tree", ?last_finalized_block, "Restoring canonical hashes for last finalized block"); + trace!(target: "blockchain_tree", ?last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); let mut tree = self.tree.write(); - let res = tree.restore_canonical_hashes_and_finalize(last_finalized_block); + let res = + tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); tree.update_chains_metrics(); res } - fn restore_canonical_hashes(&self) -> Result<(), Error> { - trace!(target: "blockchain_tree", "Restoring canonical hashes"); + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), Error> { + trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); let mut tree = self.tree.write(); - let res = tree.restore_canonical_hashes(); + let res = tree.connect_buffered_blocks_to_canonical_hashes(); tree.update_chains_metrics(); res } diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index 8f0877aa135f..d2770a77c1c7 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -88,9 +88,8 @@ impl EngineHookEvent { pub enum EngineHookAction { /// Notify about a [SyncState] update. UpdateSyncState(SyncState), - /// Read the last relevant canonical hashes from the database and update the block indices of - /// the blockchain tree. - RestoreCanonicalHashes, + /// Connect blocks buffered during the hook execution to canonical hashes. + ConnectBufferedBlocks, } /// An error returned by [hook][`EngineHook`]. diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index 1650f9f52158..f720082ee3bc 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -74,7 +74,7 @@ impl PruneHook { }; let action = if matches!(event, EngineHookEvent::Finished(Ok(_))) { - Some(EngineHookAction::RestoreCanonicalHashes) + Some(EngineHookAction::ConnectBufferedBlocks) } else { None }; diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 66df6a083e5b..2d6d12714ba3 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1334,7 +1334,7 @@ where let synced_to_finalized = match self.blockchain.block_number(block_hash)? { Some(number) => { // Attempt to restore the tree. - self.blockchain.restore_canonical_hashes_and_finalize(number)?; + self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number)?; true } None => false, @@ -1686,8 +1686,10 @@ where EngineHookAction::UpdateSyncState(state) => { self.sync_state_updater.update_sync_state(state) } - EngineHookAction::RestoreCanonicalHashes => { - if let Err(error) = self.blockchain.restore_canonical_hashes() { + // TODO(alexey): always connect buffered blocks if hook had the + // `EngineHookDBAccessLevel::ReadWrite` + EngineHookAction::ConnectBufferedBlocks => { + if let Err(error) = self.blockchain.connect_buffered_blocks_to_canonical_hashes() { error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); return Err(error.into()) } diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index d8334df7decb..896434eb6cdc 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -53,7 +53,8 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { fn finalize_block(&self, finalized_block: BlockNumber); /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree. + /// tree by attempting to connect the buffered blocks to canonical hashes. + /// /// /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. @@ -62,17 +63,17 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { /// /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using /// [`BlockchainTreeEngine::finalize_block`]). - fn restore_canonical_hashes_and_finalize( + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, ) -> Result<(), Error>; /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree. + /// tree by attempting to connect the buffered blocks to canonical hashes. /// /// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. - fn restore_canonical_hashes(&self) -> Result<(), Error>; + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), Error>; /// Make a block and its parent chain part of the canonical chain by committing it to the /// database. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 9b46aa2d6e96..d12c93d452f9 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -549,15 +549,15 @@ where self.tree.finalize_block(finalized_block) } - fn restore_canonical_hashes_and_finalize( + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, ) -> Result<()> { - self.tree.restore_canonical_hashes_and_finalize(last_finalized_block) + self.tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block) } - fn restore_canonical_hashes(&self) -> Result<()> { - self.tree.restore_canonical_hashes() + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<()> { + self.tree.connect_buffered_blocks_to_canonical_hashes() } fn make_canonical(&self, block_hash: &BlockHash) -> Result { From 801294252ed1ba70d671e723fa771a4635aae3fd Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Tue, 19 Sep 2023 22:27:32 +0530 Subject: [PATCH 712/722] feat: Duplicate Withdrawal and move try from impls to rpc-compat (#4186) --- Cargo.lock | 4 + crates/consensus/beacon/Cargo.toml | 2 +- crates/consensus/beacon/src/engine/mod.rs | 26 +- crates/payload/builder/Cargo.toml | 1 + crates/payload/builder/src/payload.rs | 26 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/tests/it/auth.rs | 8 +- crates/rpc/rpc-engine-api/Cargo.toml | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 20 +- crates/rpc/rpc-engine-api/src/payload.rs | 6 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 43 ++- crates/rpc/rpc-types-compat/Cargo.toml | 1 + crates/rpc/rpc-types-compat/src/engine/mod.rs | 6 + .../rpc-types-compat/src/engine/payload.rs | 328 ++++++++++++++++++ crates/rpc/rpc-types-compat/src/lib.rs | 2 + crates/rpc/rpc-types/Cargo.toml | 3 + crates/rpc/rpc-types/src/eth/engine/mod.rs | 3 +- .../rpc/rpc-types/src/eth/engine/payload.rs | 272 +-------------- crates/rpc/rpc-types/src/eth/mod.rs | 3 + crates/rpc/rpc-types/src/eth/withdrawal.rs | 40 +++ 20 files changed, 480 insertions(+), 317 deletions(-) create mode 100644 crates/rpc/rpc-types-compat/src/engine/mod.rs create mode 100644 crates/rpc/rpc-types-compat/src/engine/payload.rs create mode 100644 crates/rpc/rpc-types/src/eth/withdrawal.rs diff --git a/Cargo.lock b/Cargo.lock index 18ccf93cfcc9..ee5ae3e72ea5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5339,6 +5339,7 @@ dependencies = [ "reth-prune", "reth-revm", "reth-rpc-types", + "reth-rpc-types-compat", "reth-stages", "reth-tasks", "reth-tracing", @@ -5795,6 +5796,7 @@ dependencies = [ "reth-revm-primitives", "reth-rlp", "reth-rpc-types", + "reth-rpc-types-compat", "reth-transaction-pool", "revm-primitives", "sha2", @@ -6056,6 +6058,7 @@ dependencies = [ "reth-rpc-api", "reth-rpc-engine-api", "reth-rpc-types", + "reth-rpc-types-compat", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -6085,6 +6088,7 @@ dependencies = [ "reth-rlp", "reth-rpc-api", "reth-rpc-types", + "reth-rpc-types-compat", "reth-tasks", "thiserror", "tokio", diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index f44de6c555fe..d180187478d6 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -19,7 +19,7 @@ reth-rpc-types.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true reth-prune = { path = "../../prune" } - +reth-rpc-types-compat.workspace = true # async tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 2d6d12714ba3..f47da482789a 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -33,6 +33,7 @@ use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, PayloadAttributes, PayloadError, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; +use reth_rpc_types_compat::engine::payload::try_into_sealed_block; use reth_stages::{ControlFlow, Pipeline, PipelineError}; use reth_tasks::TaskSpawner; use std::{ @@ -1137,7 +1138,8 @@ where cancun_fields: Option, ) -> Result { let parent_hash = payload.parent_hash(); - let block = match payload.try_into_sealed_block( + let block = match try_into_sealed_block( + payload, cancun_fields.as_ref().map(|fields| fields.parent_beacon_block_root), ) { Ok(block) => block, @@ -1833,9 +1835,8 @@ mod tests { use assert_matches::assert_matches; use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; - use reth_rpc_types::engine::{ - ExecutionPayloadV1, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, - }; + use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; + use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc, time::Duration}; use tokio::sync::oneshot::error::TryRecvError; @@ -1895,7 +1896,8 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(ExecutionPayloadV1::from(SealedBlock::default()), None).await; + let _ = env.send_new_payload(try_block_to_payload_v1(SealedBlock::default()), None).await; + assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because pruning is running @@ -2017,7 +2019,6 @@ mod tests { use reth_db::{tables, transaction::DbTxMut}; use reth_interfaces::test_utils::{generators, generators::random_block}; use reth_rpc_types::engine::ForkchoiceUpdateError; - #[tokio::test] async fn empty_head() { let chain_spec = Arc::new( @@ -2311,20 +2312,22 @@ mod tests { // Send new payload let res = env .send_new_payload( - ExecutionPayloadV1::from(random_block(&mut rng, 0, None, None, Some(0))), + try_block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), None, ) .await; + // Invalid, because this is a genesis block assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); // Send new payload let res = env .send_new_payload( - ExecutionPayloadV1::from(random_block(&mut rng, 1, None, None, Some(0))), + try_block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), None, ) .await; + let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2374,9 +2377,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(ExecutionPayloadV1::from(block2.clone()), None) + .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) .await .unwrap(); + let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) .with_latest_valid_hash(block2.hash); assert_eq!(result, expected_result); @@ -2474,7 +2478,7 @@ mod tests { // Send new payload let block = random_block(&mut rng, 2, Some(H256::random()), None, Some(0)); - let res = env.send_new_payload(ExecutionPayloadV1::from(block), None).await; + let res = env.send_new_payload(try_block_to_payload_v1(block), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2537,7 +2541,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(ExecutionPayloadV1::from(block2.clone()), None) + .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) .await .unwrap(); diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 48a8a3ec6b44..b4dd6b1d737e 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -16,6 +16,7 @@ reth-rlp.workspace = true reth-transaction-pool.workspace = true reth-interfaces.workspace = true reth-revm-primitives = { path = "../../revm/revm-primitives" } +reth-rpc-types-compat.workspace = true ## ethereum revm-primitives.workspace = true diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index cbc30a0d20fd..d63d694e54ff 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -9,8 +9,11 @@ use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, PayloadAttributes, PayloadId, }; +use reth_rpc_types_compat::engine::payload::{ + convert_block_to_payload_field_v2, convert_standalonewithdraw_to_withdrawal, + try_block_to_payload_v1, try_block_to_payload_v3, +}; use revm_primitives::{BlockEnv, CfgEnv}; - /// Contains the built payload. /// /// According to the [engine API specification](https://github.com/ethereum/execution-apis/blob/main/src/engine/README.md) the execution layer should build the initial version of the payload with an empty transaction set and then keep update it in order to maximize the revenue. @@ -76,7 +79,7 @@ impl BuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: BuiltPayload) -> Self { - value.block.into() + try_block_to_payload_v1(value.block) } } @@ -85,7 +88,10 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: BuiltPayload) -> Self { let BuiltPayload { block, fees, .. } = value; - ExecutionPayloadEnvelopeV2 { block_value: fees, execution_payload: block.into() } + ExecutionPayloadEnvelopeV2 { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(block), + } } } @@ -94,7 +100,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let BuiltPayload { block, fees, sidecars, .. } = value; ExecutionPayloadEnvelopeV3 { - execution_payload: block.into(), + execution_payload: try_block_to_payload_v3(block), block_value: fees, // From the engine API spec: // @@ -137,13 +143,23 @@ impl PayloadBuilderAttributes { /// Derives the unique [PayloadId] for the given parent and attributes pub fn new(parent: H256, attributes: PayloadAttributes) -> Self { let id = payload_id(&parent, &attributes); + + let withdraw = attributes.withdrawals.map( + |withdrawals: Vec| { + withdrawals + .into_iter() + .map(convert_standalonewithdraw_to_withdrawal) // Removed the parentheses here + .collect::>() + }, + ); + Self { id, parent, timestamp: attributes.timestamp.as_u64(), suggested_fee_recipient: attributes.suggested_fee_recipient, prev_randao: attributes.prev_randao, - withdrawals: attributes.withdrawals.unwrap_or_default(), + withdrawals: withdraw.unwrap_or_default(), parent_beacon_block_root: attributes.parent_beacon_block_root, } } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index c9246376a539..477717c50f2c 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -21,6 +21,7 @@ reth-rpc-engine-api = { path = "../rpc-engine-api" } reth-rpc-types.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true +reth-rpc-types-compat.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 3e8063d73b73..fefa25024cd0 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -6,15 +6,17 @@ use reth_primitives::Block; use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; - +use reth_rpc_types_compat::engine::payload::{ + convert_block_to_payload_input_v2, try_block_to_payload_v1, +}; #[allow(unused_must_use)] async fn test_basic_engine_calls(client: &C) where C: ClientT + SubscriptionClientT + Sync, { let block = Block::default().seal_slow(); - EngineApiClient::new_payload_v1(client, block.clone().into()).await; - EngineApiClient::new_payload_v2(client, block.into()).await; + EngineApiClient::new_payload_v1(client, try_block_to_payload_v1(block.clone())).await; + EngineApiClient::new_payload_v2(client, convert_block_to_payload_input_v2(block)).await; EngineApiClient::fork_choice_updated_v1(client, ForkchoiceState::default(), None).await; EngineApiClient::get_payload_v1(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; EngineApiClient::get_payload_v2(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index d08e7c8faa8d..7d8161c5db54 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -18,7 +18,7 @@ reth-rpc-api = { path = "../rpc-api" } reth-beacon-consensus = { path = "../../consensus/beacon" } reth-payload-builder.workspace = true reth-tasks.workspace = true - +reth-rpc-types-compat.workspace = true # async tokio = { workspace = true, features = ["sync"] } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8f05f5d4aab4..632676af7ab9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -15,6 +15,9 @@ use reth_rpc_types::engine::{ ForkchoiceUpdated, PayloadAttributes, PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; +use reth_rpc_types_compat::engine::payload::{ + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, +}; use reth_tasks::TaskSpawner; use std::sync::Arc; use tokio::sync::oneshot; @@ -84,7 +87,7 @@ where &self, payload: ExecutionPayloadInputV2, ) -> EngineApiResult { - let payload = ExecutionPayload::from(payload); + let payload = convert_payload_input_v2_to_payload(payload); let payload_or_attrs = PayloadOrAttributes::from_execution_payload(&payload, None); self.validate_version_specific_fields(EngineApiMessageVersion::V2, &payload_or_attrs)?; Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) @@ -280,7 +283,7 @@ where let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); match block_result { Ok(block) => { - result.push(block.map(Into::into)); + result.push(block.map(convert_to_payload_body_v1)); } Err(err) => { tx.send(Err(EngineApiError::Internal(Box::new(err)))).ok(); @@ -311,7 +314,7 @@ where .provider .block(BlockHashOrNumber::Hash(hash)) .map_err(|err| EngineApiError::Internal(Box::new(err)))?; - result.push(block.map(Into::into)); + result.push(block.map(convert_to_payload_body_v1)); } Ok(result) @@ -836,8 +839,11 @@ mod tests { random_block_range(&mut rng, start..=start + count - 1, H256::default(), 0..2); handle.provider.extend_blocks(blocks.iter().cloned().map(|b| (b.hash(), b.unseal()))); - let expected = - blocks.iter().cloned().map(|b| Some(b.unseal().into())).collect::>(); + let expected = blocks + .iter() + .cloned() + .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) + .collect::>(); let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); assert_eq!(res, expected); @@ -875,7 +881,7 @@ mod tests { if first_missing_range.contains(&b.number) { None } else { - Some(b.unseal().into()) + Some(convert_to_payload_body_v1(b.unseal())) } }) .collect::>(); @@ -894,7 +900,7 @@ mod tests { { None } else { - Some(b.unseal().into()) + Some(convert_to_payload_body_v1(b.unseal())) } }) .collect::>(); diff --git a/crates/rpc/rpc-engine-api/src/payload.rs b/crates/rpc/rpc-engine-api/src/payload.rs index f738f6ef2695..0c9b1429a2ce 100644 --- a/crates/rpc/rpc-engine-api/src/payload.rs +++ b/crates/rpc/rpc-engine-api/src/payload.rs @@ -1,6 +1,6 @@ -use reth_primitives::{Withdrawal, H256}; -use reth_rpc_types::engine::{ExecutionPayload, PayloadAttributes}; +use reth_primitives::H256; +use reth_rpc_types::engine::{ExecutionPayload, PayloadAttributes}; /// Either an [ExecutionPayload] or a [PayloadAttributes]. pub(crate) enum PayloadOrAttributes<'a> { /// An [ExecutionPayload] and optional parent beacon block root. @@ -25,7 +25,7 @@ impl<'a> PayloadOrAttributes<'a> { } /// Return the withdrawals for the payload or attributes. - pub(crate) fn withdrawals(&self) -> Option<&Vec> { + pub(crate) fn withdrawals(&self) -> Option<&Vec> { match self { Self::ExecutionPayload { payload, .. } => payload.withdrawals(), Self::PayloadAttributes(attributes) => attributes.withdrawals.as_ref(), diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 1dcac070253b..fdc19854b5b5 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,6 +13,10 @@ use reth_rlp::{Decodable, DecodeError}; use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; +use reth_rpc_types_compat::engine::payload::{ + convert_standalonewithdraw_to_withdrawal, convert_to_payload_body_v1, try_block_to_payload, + try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, +}; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { let unsealed = src.unseal(); @@ -20,13 +24,12 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi // Recalculate roots transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); - SealedBlock { + try_block_to_payload(SealedBlock { header: transformed.header.seal_slow(), body: transformed.body, ommers: transformed.ommers, withdrawals: transformed.withdrawals, - } - .into() + }) } #[test] @@ -34,7 +37,7 @@ fn payload_body_roundtrip() { let mut rng = generators::rng(); for block in random_block_range(&mut rng, 0..=99, H256::default(), 0..2) { let unsealed = block.clone().unseal(); - let payload_body: ExecutionPayloadBodyV1 = unsealed.into(); + let payload_body: ExecutionPayloadBodyV1 = convert_to_payload_body_v1(unsealed); assert_eq!( Ok(block.body), @@ -44,8 +47,13 @@ fn payload_body_roundtrip() { .map(|x| TransactionSigned::decode(&mut &x[..])) .collect::, _>>(), ); - - assert_eq!(block.withdrawals, payload_body.withdrawals); + let withdraw = payload_body.withdrawals.map(|withdrawals| { + withdrawals + .into_iter() + .map(convert_standalonewithdraw_to_withdrawal) + .collect::>() + }); + assert_eq!(block.withdrawals, withdraw); } } @@ -59,7 +67,8 @@ fn payload_validation() { b.header.extra_data = BytesMut::zeroed(32).freeze().into(); b }); - assert_matches!(block_with_valid_extra_data.try_into_sealed_block(None), Ok(_)); + + assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None), Ok(_)); // Invalid extra data let block_with_invalid_extra_data: Bytes = BytesMut::zeroed(33).freeze(); @@ -68,7 +77,8 @@ fn payload_validation() { b }); assert_matches!( - invalid_extra_data_block.try_into_sealed_block(None), + + try_into_sealed_block(invalid_extra_data_block,None), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -78,16 +88,18 @@ fn payload_validation() { b }); assert_matches!( - block_with_zero_base_fee.try_into_sealed_block(None), + + try_into_sealed_block(block_with_zero_base_fee,None), Err(PayloadError::BaseFee(val)) if val == U256::ZERO ); // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayloadV1 = block.clone().into(); + let mut payload_with_invalid_txs: ExecutionPayloadV1 = try_block_to_payload_v1(block.clone()); + payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { *tx = Bytes::new().into(); }); - let payload_with_invalid_txs = Block::try_from(payload_with_invalid_txs); + let payload_with_invalid_txs = try_payload_v1_to_block(payload_with_invalid_txs); assert_matches!( payload_with_invalid_txs, Err(PayloadError::Decode(DecodeError::InputTooShort)) @@ -99,7 +111,8 @@ fn payload_validation() { b }); assert_matches!( - block_with_ommers.clone().try_into_sealed_block(None), + try_into_sealed_block(block_with_ommers.clone(),None), + Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -110,8 +123,9 @@ fn payload_validation() { b }); assert_matches!( - block_with_difficulty.clone().try_into_sealed_block(None), + try_into_sealed_block(block_with_difficulty.clone(),None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() + ); // None zero nonce @@ -120,8 +134,9 @@ fn payload_validation() { b }); assert_matches!( - block_with_nonce.clone().try_into_sealed_block(None), + try_into_sealed_block(block_with_nonce.clone(),None), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() + ); // Valid block diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index a7997e34102e..b6f203ef4d12 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,3 +14,4 @@ Compatibility layer for reth-primitives and ethereum RPC types reth-primitives.workspace = true reth-rpc-types.workspace = true reth-rlp.workspace = true + diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs new file mode 100644 index 000000000000..10e60327b871 --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -0,0 +1,6 @@ +//! Standalone functions for engine specific rpc type conversions +pub mod payload; +pub use payload::{ + convert_standalonewithdraw_to_withdrawal, convert_withdrawal_to_standalonewithdraw, + try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, +}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs new file mode 100644 index 000000000000..4158e9990188 --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -0,0 +1,328 @@ +//! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in +//! Ethereum's Engine +use reth_primitives::{ + constants::{MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, + proofs::{self, EMPTY_LIST_HASH}, + Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, H256, U256, +}; +use reth_rlp::Decodable; +use reth_rpc_types::engine::{ + payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, +}; + +/// Converts [ExecutionPayloadV1] to [Block] +pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { + if payload.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + return Err(PayloadError::ExtraData(payload.extra_data)) + } + + if payload.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { + return Err(PayloadError::BaseFee(payload.base_fee_per_gas)) + } + + let transactions = payload + .transactions + .iter() + .map(|tx| TransactionSigned::decode(&mut tx.as_ref())) + .collect::, _>>()?; + let transactions_root = proofs::calculate_transaction_root(&transactions); + + let header = Header { + parent_hash: payload.parent_hash, + beneficiary: payload.fee_recipient, + state_root: payload.state_root, + transactions_root, + receipts_root: payload.receipts_root, + withdrawals_root: None, + logs_bloom: payload.logs_bloom, + number: payload.block_number.as_u64(), + gas_limit: payload.gas_limit.as_u64(), + gas_used: payload.gas_used.as_u64(), + timestamp: payload.timestamp.as_u64(), + mix_hash: payload.prev_randao, + base_fee_per_gas: Some( + payload + .base_fee_per_gas + .uint_try_to() + .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?, + ), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + extra_data: payload.extra_data, + // Defaults + ommers_hash: EMPTY_LIST_HASH, + difficulty: Default::default(), + nonce: Default::default(), + }; + + Ok(Block { header, body: transactions, withdrawals: None, ommers: Default::default() }) +} + +/// Converts [ExecutionPayloadV2] to [Block] +pub fn try_payload_v2_to_block(payload: ExecutionPayloadV2) -> Result { + // this performs the same conversion as the underlying V1 payload, but calculates the + // withdrawals root and adds withdrawals + let mut base_sealed_block = try_payload_v1_to_block(payload.payload_inner)?; + let withdrawals: Vec<_> = payload + .withdrawals + .iter() + .map(|w| convert_standalonewithdraw_to_withdrawal(w.clone())) + .collect(); + let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); + base_sealed_block.withdrawals = Some(withdrawals); + base_sealed_block.header.withdrawals_root = Some(withdrawals_root); + Ok(base_sealed_block) +} + +/// Converts [ExecutionPayloadV3] to [Block] +pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result { + // this performs the same conversion as the underlying V2 payload, but inserts the blob gas + // used and excess blob gas + let mut base_block = try_payload_v2_to_block(payload.payload_inner)?; + + base_block.header.blob_gas_used = Some(payload.blob_gas_used.as_u64()); + base_block.header.excess_blob_gas = Some(payload.excess_blob_gas.as_u64()); + + Ok(base_block) +} + +/// Converts [SealedBlock] to [ExecutionPayload] +pub fn try_block_to_payload(value: SealedBlock) -> ExecutionPayload { + if value.header.parent_beacon_block_root.is_some() { + // block with parent beacon block root: V3 + ExecutionPayload::V3(try_block_to_payload_v3(value)) + } else if value.withdrawals.is_some() { + // block with withdrawals: V2 + ExecutionPayload::V2(try_block_to_payload_v2(value)) + } else { + // otherwise V1 + ExecutionPayload::V1(try_block_to_payload_v1(value)) + } +} + +/// Converts [SealedBlock] to [ExecutionPayloadV1] +pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { + let transactions = value + .body + .iter() + .map(|tx| { + let mut encoded = Vec::new(); + tx.encode_enveloped(&mut encoded); + encoded.into() + }) + .collect(); + ExecutionPayloadV1 { + parent_hash: value.parent_hash, + fee_recipient: value.beneficiary, + state_root: value.state_root, + receipts_root: value.receipts_root, + logs_bloom: value.logs_bloom, + prev_randao: value.mix_hash, + block_number: value.number.into(), + gas_limit: value.gas_limit.into(), + gas_used: value.gas_used.into(), + timestamp: value.timestamp.into(), + extra_data: value.extra_data.clone(), + base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + block_hash: value.hash(), + transactions, + } +} + +/// Converts [SealedBlock] to [ExecutionPayloadV2] +pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { + let transactions = value + .body + .iter() + .map(|tx| { + let mut encoded = Vec::new(); + tx.encode_enveloped(&mut encoded); + encoded.into() + }) + .collect(); + let standalone_withdrawals: Vec = value + .withdrawals + .clone() + .unwrap_or_default() + .into_iter() + .map(convert_withdrawal_to_standalonewithdraw) + .collect(); + + ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: value.parent_hash, + fee_recipient: value.beneficiary, + state_root: value.state_root, + receipts_root: value.receipts_root, + logs_bloom: value.logs_bloom, + prev_randao: value.mix_hash, + block_number: value.number.into(), + gas_limit: value.gas_limit.into(), + gas_used: value.gas_used.into(), + timestamp: value.timestamp.into(), + extra_data: value.extra_data.clone(), + base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + block_hash: value.hash(), + transactions, + }, + withdrawals: standalone_withdrawals, + } +} + +/// Converts [SealedBlock] to [ExecutionPayloadV3] +pub fn try_block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { + let transactions = value + .body + .iter() + .map(|tx| { + let mut encoded = Vec::new(); + tx.encode_enveloped(&mut encoded); + encoded.into() + }) + .collect(); + + let withdrawals: Vec = value + .withdrawals + .clone() + .unwrap_or_default() + .into_iter() + .map(convert_withdrawal_to_standalonewithdraw) + .collect(); + + ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: value.parent_hash, + fee_recipient: value.beneficiary, + state_root: value.state_root, + receipts_root: value.receipts_root, + logs_bloom: value.logs_bloom, + prev_randao: value.mix_hash, + block_number: value.number.into(), + gas_limit: value.gas_limit.into(), + gas_used: value.gas_used.into(), + timestamp: value.timestamp.into(), + extra_data: value.extra_data.clone(), + base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), + block_hash: value.hash(), + transactions, + }, + withdrawals, + }, + + blob_gas_used: value.blob_gas_used.unwrap_or_default().into(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default().into(), + } +} + +/// Converts [SealedBlock] to [ExecutionPayloadFieldV2] +pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { + // if there are withdrawals, return V2 + if value.withdrawals.is_some() { + ExecutionPayloadFieldV2::V2(try_block_to_payload_v2(value)) + } else { + ExecutionPayloadFieldV2::V1(try_block_to_payload_v1(value)) + } +} + +/// Converts [ExecutionPayloadFieldV2] to [ExecutionPayload] +pub fn convert_payload_field_v2_to_payload(value: ExecutionPayloadFieldV2) -> ExecutionPayload { + match value { + ExecutionPayloadFieldV2::V1(payload) => ExecutionPayload::V1(payload), + ExecutionPayloadFieldV2::V2(payload) => ExecutionPayload::V2(payload), + } +} + +/// Converts [ExecutionPayloadInputV2] to [ExecutionPayload] +pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> ExecutionPayload { + match value.withdrawals { + Some(withdrawals) => ExecutionPayload::V2(ExecutionPayloadV2 { + payload_inner: value.execution_payload, + withdrawals, + }), + None => ExecutionPayload::V1(value.execution_payload), + } +} + +/// Converts [SealedBlock] to [ExecutionPayloadInputV2] +pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { + let withdraw = value.withdrawals.clone().map(|withdrawals| { + withdrawals.into_iter().map(convert_withdrawal_to_standalonewithdraw).collect::>() + }); + ExecutionPayloadInputV2 { + withdrawals: withdraw, + execution_payload: try_block_to_payload_v1(value), + } +} + +/// Tries to create a new block from the given payload and optional parent beacon block root. +/// Perform additional validation of `extra_data` and `base_fee_per_gas` fields. +/// +/// NOTE: The log bloom is assumed to be validated during serialization. +/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and +/// comparing the value with `payload.block_hash`. +/// +/// See +pub fn try_into_sealed_block( + value: ExecutionPayload, + parent_beacon_block_root: Option, +) -> Result { + let block_hash = value.block_hash(); + let mut base_payload = match value { + ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, + ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, + ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, + }; + + base_payload.header.parent_beacon_block_root = parent_beacon_block_root; + + let payload = base_payload.seal_slow(); + + if block_hash != payload.hash() { + return Err(PayloadError::BlockHash { execution: payload.hash(), consensus: block_hash }) + } + Ok(payload) +} + +/// Converts [Withdrawal] to [reth_rpc_types::engine::payload::Withdrawal] +pub fn convert_withdrawal_to_standalonewithdraw( + withdrawal: Withdrawal, +) -> reth_rpc_types::engine::payload::Withdrawal { + reth_rpc_types::engine::payload::Withdrawal { + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, + } +} + +/// Converts [reth_rpc_types::engine::payload::Withdrawal] to [Withdrawal] +pub fn convert_standalonewithdraw_to_withdrawal( + standalone: reth_rpc_types::engine::payload::Withdrawal, +) -> Withdrawal { + Withdrawal { + index: standalone.index, + validator_index: standalone.validator_index, + address: standalone.address, + amount: standalone.amount, + } +} + +/// Converts [Block] to [ExecutionPayloadBodyV1] +pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { + let transactions = value.body.into_iter().map(|tx| { + let mut out = Vec::new(); + tx.encode_enveloped(&mut out); + out.into() + }); + let withdraw: Option> = + value.withdrawals.map(|withdrawals| { + withdrawals + .into_iter() + .map(convert_withdrawal_to_standalonewithdraw) + .collect::>() + }); + ExecutionPayloadBodyV1 { transactions: transactions.collect(), withdrawals: withdraw } +} diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index b4628cfcbab1..3774a08374a9 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -19,3 +19,5 @@ pub mod block; pub use block::*; pub mod transaction; pub use transaction::*; +pub mod engine; +pub use engine::*; diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 3cce6c1a8417..3dbe236b1540 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -23,6 +23,7 @@ serde = { workspace = true, features = ["derive"] } serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } + [features] default = ["jsonrpsee-types"] @@ -30,3 +31,5 @@ default = ["jsonrpsee-types"] # misc rand.workspace = true similar-asserts = "1.4" + + diff --git a/crates/rpc/rpc-types/src/eth/engine/mod.rs b/crates/rpc/rpc-types/src/eth/engine/mod.rs index ad76be1e309d..86df72fb57b1 100644 --- a/crates/rpc/rpc-types/src/eth/engine/mod.rs +++ b/crates/rpc/rpc-types/src/eth/engine/mod.rs @@ -4,9 +4,8 @@ mod cancun; mod forkchoice; -mod payload; +pub mod payload; mod transition; - pub use self::{cancun::*, forkchoice::*, payload::*, transition::*}; /// The list of all supported Engine capabilities available over the engine endpoint. diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 9b6dcea07209..b805b29454ff 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -1,11 +1,8 @@ +pub use crate::Withdrawal; use reth_primitives::{ - constants::{MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, kzg::{Blob, Bytes48}, - proofs::{self, EMPTY_LIST_HASH}, - Address, BlobTransactionSidecar, Block, Bloom, Bytes, Header, SealedBlock, TransactionSigned, - UintTryTo, Withdrawal, H256, H64, U256, U64, + Address, BlobTransactionSidecar, Bloom, Bytes, SealedBlock, H256, H64, U256, U64, }; -use reth_rlp::Decodable; use serde::{ser::SerializeMap, Deserialize, Serialize, Serializer}; /// The execution payload body response that allows for `null` values. @@ -60,26 +57,6 @@ impl ExecutionPayloadFieldV2 { } } -impl From for ExecutionPayloadFieldV2 { - fn from(value: SealedBlock) -> Self { - // if there are withdrawals, return V2 - if value.withdrawals.is_some() { - ExecutionPayloadFieldV2::V2(value.into()) - } else { - ExecutionPayloadFieldV2::V1(value.into()) - } - } -} - -impl From for ExecutionPayload { - fn from(value: ExecutionPayloadFieldV2) -> Self { - match value { - ExecutionPayloadFieldV2::V1(payload) => ExecutionPayload::V1(payload), - ExecutionPayloadFieldV2::V2(payload) => ExecutionPayload::V2(payload), - } - } -} - /// This is the input to `engine_newPayloadV2`, which may or may not have a withdrawals field. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -92,27 +69,6 @@ pub struct ExecutionPayloadInputV2 { pub withdrawals: Option>, } -impl From for ExecutionPayload { - fn from(value: ExecutionPayloadInputV2) -> Self { - match value.withdrawals { - Some(withdrawals) => ExecutionPayload::V2(ExecutionPayloadV2 { - payload_inner: value.execution_payload, - withdrawals, - }), - None => ExecutionPayload::V1(value.execution_payload), - } - } -} - -impl From for ExecutionPayloadInputV2 { - fn from(value: SealedBlock) -> Self { - ExecutionPayloadInputV2 { - withdrawals: value.withdrawals.clone(), - execution_payload: value.into(), - } - } -} - /// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for /// V2. /// @@ -211,66 +167,6 @@ impl From for ExecutionPayloadV1 { } } -/// Try to construct a block from given payload. Perform addition validation of `extra_data` and -/// `base_fee_per_gas` fields. -/// -/// NOTE: The log bloom is assumed to be validated during serialization. -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. -/// -/// See -impl TryFrom for Block { - type Error = PayloadError; - - fn try_from(payload: ExecutionPayloadV1) -> Result { - if payload.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - return Err(PayloadError::ExtraData(payload.extra_data)) - } - - if payload.base_fee_per_gas < MIN_PROTOCOL_BASE_FEE_U256 { - return Err(PayloadError::BaseFee(payload.base_fee_per_gas)) - } - - let transactions = payload - .transactions - .iter() - .map(|tx| TransactionSigned::decode(&mut tx.as_ref())) - .collect::, _>>()?; - let transactions_root = proofs::calculate_transaction_root(&transactions); - - let header = Header { - parent_hash: payload.parent_hash, - beneficiary: payload.fee_recipient, - state_root: payload.state_root, - transactions_root, - receipts_root: payload.receipts_root, - withdrawals_root: None, - logs_bloom: payload.logs_bloom, - number: payload.block_number.as_u64(), - gas_limit: payload.gas_limit.as_u64(), - gas_used: payload.gas_used.as_u64(), - timestamp: payload.timestamp.as_u64(), - mix_hash: payload.prev_randao, - base_fee_per_gas: Some( - payload - .base_fee_per_gas - .uint_try_to() - .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?, - ), - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - extra_data: payload.extra_data, - // Defaults - ommers_hash: EMPTY_LIST_HASH, - difficulty: Default::default(), - nonce: Default::default(), - }; - - Ok(Block { header, body: transactions, withdrawals: None, ommers: Default::default() }) - } -} - /// This structure maps on the ExecutionPayloadV2 structure of the beacon chain spec. /// /// See also: @@ -293,55 +189,6 @@ impl ExecutionPayloadV2 { } } -impl From for ExecutionPayloadV2 { - fn from(value: SealedBlock) -> Self { - let transactions = value - .body - .iter() - .map(|tx| { - let mut encoded = Vec::new(); - tx.encode_enveloped(&mut encoded); - encoded.into() - }) - .collect(); - - ExecutionPayloadV2 { - payload_inner: ExecutionPayloadV1 { - parent_hash: value.parent_hash, - fee_recipient: value.beneficiary, - state_root: value.state_root, - receipts_root: value.receipts_root, - logs_bloom: value.logs_bloom, - prev_randao: value.mix_hash, - block_number: value.number.into(), - gas_limit: value.gas_limit.into(), - gas_used: value.gas_used.into(), - timestamp: value.timestamp.into(), - extra_data: value.extra_data.clone(), - base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), - block_hash: value.hash(), - transactions, - }, - withdrawals: value.withdrawals.unwrap_or_default(), - } - } -} - -impl TryFrom for Block { - type Error = PayloadError; - - fn try_from(payload: ExecutionPayloadV2) -> Result { - // this performs the same conversion as the underlying V1 payload, but calculates the - // withdrawals root and adds withdrawals - let mut base_sealed_block = Block::try_from(payload.payload_inner)?; - - let withdrawals_root = proofs::calculate_withdrawals_root(&payload.withdrawals); - base_sealed_block.withdrawals = Some(payload.withdrawals); - base_sealed_block.header.withdrawals_root = Some(withdrawals_root); - Ok(base_sealed_block) - } -} - /// This structure maps on the ExecutionPayloadV3 structure of the beacon chain spec. /// /// See also: @@ -372,62 +219,6 @@ impl ExecutionPayloadV3 { } } -impl From for ExecutionPayloadV3 { - fn from(mut value: SealedBlock) -> Self { - let transactions = value - .body - .iter() - .map(|tx| { - let mut encoded = Vec::new(); - tx.encode_enveloped(&mut encoded); - encoded.into() - }) - .collect(); - - let withdrawals = value.withdrawals.take().unwrap_or_default(); - - ExecutionPayloadV3 { - payload_inner: ExecutionPayloadV2 { - payload_inner: ExecutionPayloadV1 { - parent_hash: value.parent_hash, - fee_recipient: value.beneficiary, - state_root: value.state_root, - receipts_root: value.receipts_root, - logs_bloom: value.logs_bloom, - prev_randao: value.mix_hash, - block_number: value.number.into(), - gas_limit: value.gas_limit.into(), - gas_used: value.gas_used.into(), - timestamp: value.timestamp.into(), - extra_data: value.extra_data.clone(), - base_fee_per_gas: U256::from(value.base_fee_per_gas.unwrap_or_default()), - block_hash: value.hash(), - transactions, - }, - withdrawals, - }, - - blob_gas_used: value.blob_gas_used.unwrap_or_default().into(), - excess_blob_gas: value.excess_blob_gas.unwrap_or_default().into(), - } - } -} - -impl TryFrom for Block { - type Error = PayloadError; - - fn try_from(payload: ExecutionPayloadV3) -> Result { - // this performs the same conversion as the underlying V2 payload, but inserts the blob gas - // used and excess blob gas - let mut base_block = Block::try_from(payload.payload_inner)?; - - base_block.header.blob_gas_used = Some(payload.blob_gas_used.as_u64()); - base_block.header.excess_blob_gas = Some(payload.excess_blob_gas.as_u64()); - - Ok(base_block) - } -} - /// This includes all bundled blob related data of an executed payload. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BlobsBundleV1 { @@ -510,36 +301,6 @@ impl ExecutionPayload { } } } - - /// Tries to create a new block from the given payload and optional parent beacon block root. - /// Perform additional validation of `extra_data` and `base_fee_per_gas` fields. - /// - /// NOTE: The log bloom is assumed to be validated during serialization. - /// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and - /// comparing the value with `payload.block_hash`. - /// - /// See - pub fn try_into_sealed_block( - self, - parent_beacon_block_root: Option, - ) -> Result { - let block_hash = self.block_hash(); - let mut base_payload = match self { - ExecutionPayload::V1(payload) => Block::try_from(payload)?, - ExecutionPayload::V2(payload) => Block::try_from(payload)?, - ExecutionPayload::V3(payload) => Block::try_from(payload)?, - }; - - base_payload.header.parent_beacon_block_root = parent_beacon_block_root; - - let payload = base_payload.seal_slow(); - - if block_hash != payload.hash() { - return Err(PayloadError::BlockHash { execution: payload.hash(), consensus: block_hash }) - } - - Ok(payload) - } } impl From for ExecutionPayload { @@ -560,21 +321,6 @@ impl From for ExecutionPayload { } } -impl From for ExecutionPayload { - fn from(block: SealedBlock) -> Self { - if block.header.parent_beacon_block_root.is_some() { - // block with parent beacon block root: V3 - Self::V3(block.into()) - } else if block.withdrawals.is_some() { - // block with withdrawals: V2 - Self::V2(block.into()) - } else { - // otherwise V1 - Self::V1(block.into()) - } - } -} - /// Error that can occur when handling payloads. #[derive(thiserror::Error, Debug)] pub enum PayloadError { @@ -626,20 +372,6 @@ pub struct ExecutionPayloadBodyV1 { pub withdrawals: Option>, } -impl From for ExecutionPayloadBodyV1 { - fn from(value: Block) -> Self { - let transactions = value.body.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_enveloped(&mut out); - out.into() - }); - ExecutionPayloadBodyV1 { - transactions: transactions.collect(), - withdrawals: value.withdrawals, - } - } -} - /// This structure contains the attributes required to initiate a payload build process in the /// context of an `engine_forkchoiceUpdated` call. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index ae249e197280..81d19d07d478 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -15,15 +15,18 @@ mod syncing; pub mod trace; mod transaction; pub mod txpool; +mod withdrawal; mod work; pub use account::*; pub use block::*; pub use call::{Bundle, CallInput, CallInputError, CallRequest, EthCallResponse, StateContext}; +pub use engine::{ExecutionPayload, PayloadError}; pub use fee::{FeeHistory, TxGasAndReward}; pub use filter::*; pub use index::Index; pub use log::Log; pub use syncing::*; pub use transaction::*; +pub use withdrawal::Withdrawal; pub use work::Work; diff --git a/crates/rpc/rpc-types/src/eth/withdrawal.rs b/crates/rpc/rpc-types/src/eth/withdrawal.rs new file mode 100644 index 000000000000..41314ebb5ade --- /dev/null +++ b/crates/rpc/rpc-types/src/eth/withdrawal.rs @@ -0,0 +1,40 @@ +use reth_primitives::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address, U256}; +use reth_rlp::RlpEncodable; +use serde::{Deserialize, Serialize}; +/// Withdrawal represents a validator withdrawal from the consensus layer. +#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, Serialize, Deserialize)] +pub struct Withdrawal { + /// Monotonically increasing identifier issued by consensus layer. + #[serde(with = "u64_hex")] + pub index: u64, + /// Index of validator associated with withdrawal. + #[serde(with = "u64_hex", rename = "validatorIndex")] + pub validator_index: u64, + /// Target address for withdrawn ether. + pub address: Address, + /// Value of the withdrawal in gwei. + #[serde(with = "u64_hex")] + pub amount: u64, +} + +impl Withdrawal { + /// Return the withdrawal amount in wei. + pub fn amount_wei(&self) -> U256 { + U256::from(self.amount) * U256::from(GWEI_TO_WEI) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // + #[test] + fn test_withdrawal_serde_roundtrip() { + let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100a","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100c","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100d","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#; + + let withdrawals: Vec = serde_json::from_str(input).unwrap(); + let s = serde_json::to_string(&withdrawals).unwrap(); + assert_eq!(input, s); + } +} From 449a9c002369db892a552e8d0ba3a2424866e5ad Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 19 Sep 2023 16:07:54 -0400 Subject: [PATCH 713/722] fix: deny unknown payload fields (#4667) --- .../rpc/rpc-types/src/eth/engine/payload.rs | 119 +++++++++++++++++- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index b805b29454ff..de75f3967eb9 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -59,7 +59,7 @@ impl ExecutionPayloadFieldV2 { /// This is the input to `engine_newPayloadV2`, which may or may not have a withdrawals field. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ExecutionPayloadInputV2 { /// The V1 execution payload #[serde(flatten)] @@ -119,7 +119,7 @@ pub struct ExecutionPayloadEnvelopeV3 { /// /// See also: #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ExecutionPayloadV1 { pub parent_hash: H256, pub fee_recipient: Address, @@ -171,7 +171,7 @@ impl From for ExecutionPayloadV1 { /// /// See also: #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ExecutionPayloadV2 { /// Inner V1 payload #[serde(flatten)] @@ -732,4 +732,117 @@ mod tests { let payload: ExecutionPayloadInputV2 = serde_json::from_str(response).unwrap(); assert_eq!(payload.withdrawals, None); } + + #[test] + fn serde_deserialize_v3_with_unknown_fields() { + let input = r#" +{ + "parentHash": "0xaaa4c5b574f37e1537c78931d1bca24a4d17d4f29f1ee97e1cd48b704909de1f", + "feeRecipient": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "stateRoot": "0x308ee9c5c6fab5e3d08763a3b5fe0be8ada891fa5010a49a3390e018dd436810", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0xf", + "gasLimit": "0x16345785d8a0000", + "gasUsed": "0x0", + "timestamp": "0x3a97", + "extraData": "0x", + "baseFeePerGas": "0x7", + "blockHash": "0x38bb6ba645c7e6bd970f9c7d492fafe1e04d85349054cb48d16c9d2c3e3cd0bf", + "transactions": [], + "withdrawals": [], + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" +} + "#; + + // ensure that deserializing this succeeds + let _payload_res: ExecutionPayloadV3 = serde_json::from_str(input).unwrap(); + + // construct a payload with a random field in the middle + let input = r#" +{ + "parentHash": "0xaaa4c5b574f37e1537c78931d1bca24a4d17d4f29f1ee97e1cd48b704909de1f", + "feeRecipient": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "stateRoot": "0x308ee9c5c6fab5e3d08763a3b5fe0be8ada891fa5010a49a3390e018dd436810", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0xf", + "gasLimit": "0x16345785d8a0000", + "gasUsed": "0x0", + "timestamp": "0x3a97", + "extraData": "0x", + "baseFeePerGas": "0x7", + "blockHash": "0x38bb6ba645c7e6bd970f9c7d492fafe1e04d85349054cb48d16c9d2c3e3cd0bf", + "transactions": [], + "withdrawals": [], + "randomStuff": [], + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" +} + "#; + + // ensure that deserializing this fails + let _payload_res = serde_json::from_str::(input).unwrap_err(); + + // construct a payload with a random field at the end + let input = r#" +{ + "parentHash": "0xaaa4c5b574f37e1537c78931d1bca24a4d17d4f29f1ee97e1cd48b704909de1f", + "feeRecipient": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "stateRoot": "0x308ee9c5c6fab5e3d08763a3b5fe0be8ada891fa5010a49a3390e018dd436810", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0xf", + "gasLimit": "0x16345785d8a0000", + "gasUsed": "0x0", + "timestamp": "0x3a97", + "extraData": "0x", + "baseFeePerGas": "0x7", + "blockHash": "0x38bb6ba645c7e6bd970f9c7d492fafe1e04d85349054cb48d16c9d2c3e3cd0bf", + "transactions": [], + "withdrawals": [], + "randomStuff": [], + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" + "moreRandomStuff": "0x0", +} + "#; + + // ensure that deserializing this fails + let _payload_res = serde_json::from_str::(input).unwrap_err(); + } + + #[test] + fn serde_deserialize_v2_input_with_blob_fields() { + let input = r#" +{ + "parentHash": "0xaaa4c5b574f37e1537c78931d1bca24a4d17d4f29f1ee97e1cd48b704909de1f", + "feeRecipient": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "stateRoot": "0x308ee9c5c6fab5e3d08763a3b5fe0be8ada891fa5010a49a3390e018dd436810", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0xf", + "gasLimit": "0x16345785d8a0000", + "gasUsed": "0x0", + "timestamp": "0x3a97", + "extraData": "0x", + "baseFeePerGas": "0x7", + "blockHash": "0x38bb6ba645c7e6bd970f9c7d492fafe1e04d85349054cb48d16c9d2c3e3cd0bf", + "transactions": [], + "withdrawals": [], + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" +} + "#; + + // ensure that deserializing this (it includes blob fields) fails + let payload_res: Result = + serde_json::from_str(input); + assert!(payload_res.is_err()); + } } From ad10a17c29a35229b0796fba38d39307e9a39d12 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Sep 2023 01:07:00 +0200 Subject: [PATCH 714/722] fix: set block env excess blob gas (#4672) --- crates/revm/revm-primitives/src/env.rs | 3 +++ crates/revm/src/processor.rs | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-primitives/src/env.rs b/crates/revm/revm-primitives/src/env.rs index 721a46e2d2c6..9257d0450223 100644 --- a/crates/revm/revm-primitives/src/env.rs +++ b/crates/revm/revm-primitives/src/env.rs @@ -73,6 +73,9 @@ pub fn fill_block_env_with_coinbase( } block_env.basefee = U256::from(header.base_fee_per_gas.unwrap_or_default()); block_env.gas_limit = U256::from(header.gas_limit); + + // EIP-4844 excess blob gas of this block, introduced in Cancun + block_env.excess_blob_gas = header.excess_blob_gas; } /// Return the coinbase address for the given header and chain spec. diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index 183e603d679e..4fcfef527536 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -655,7 +655,8 @@ mod tests { #[test] fn eip_4788_non_genesis_call() { - let mut header = Header { timestamp: 1, number: 1, ..Header::default() }; + let mut header = + Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; let mut db = StateProviderTest::default(); @@ -742,6 +743,7 @@ mod tests { timestamp: 1, number: 1, parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), + excess_blob_gas: Some(0), ..Header::default() }; @@ -814,6 +816,7 @@ mod tests { timestamp: 1, number: 1, parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), + excess_blob_gas: Some(0), ..Header::default() }; @@ -913,6 +916,7 @@ mod tests { number: 1, parent_beacon_block_root: Some(H256::from_low_u64_be(0x1337)), base_fee_per_gas: Some(u64::MAX), + excess_blob_gas: Some(0), ..Header::default() }; From 24d7bdeafd5be1f4c1c6b0678d2b70f256c75441 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Sep 2023 01:38:12 +0200 Subject: [PATCH 715/722] fix: convert SpecId::Cancun to cancun precompile id (#4675) --- crates/rpc/rpc/src/eth/revm_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 751627d52be5..34dba8c40ba9 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -107,8 +107,8 @@ pub(crate) fn get_precompiles(spec_id: &SpecId) -> Vec { SpecId::ARROW_GLACIER | SpecId::GRAY_GLACIER | SpecId::MERGE | - SpecId::SHANGHAI | - SpecId::CANCUN => PrecompilesSpecId::BERLIN, + SpecId::SHANGHAI => PrecompilesSpecId::BERLIN, + SpecId::CANCUN => PrecompilesSpecId::CANCUN, SpecId::LATEST => PrecompilesSpecId::LATEST, }; Precompiles::new(spec).addresses().into_iter().map(Address::from).collect() From a964a7e89815f49e4e34b75a657f5e7d43e315e0 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Sep 2023 08:51:13 +0200 Subject: [PATCH 716/722] feat(net): add helper functions to `TransactionsHandle` implementation (#4668) --- crates/net/network/src/transactions.rs | 44 ++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index d765436c5ce5..16ab7252dab9 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -64,8 +64,7 @@ pub struct TransactionsHandle { manager_tx: mpsc::UnboundedSender, } -// === impl TransactionsHandle === - +/// Implementation of the `TransactionsHandle` API. impl TransactionsHandle { fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); @@ -75,6 +74,31 @@ impl TransactionsHandle { pub fn propagate(&self, hash: TxHash) { self.send(TransactionsCommand::PropagateHash(hash)) } + + /// Manually propagate the transaction that belongs to the hash to a specific peer. + pub fn propagate_hash_to(&self, hash: TxHash, peer: PeerId) { + self.send(TransactionsCommand::PropagateHashTo(hash, peer)) + } + + /// Request the active peer IDs from the [`TransactionsManager`]. + pub fn get_active_peers(&self) { + self.send(TransactionsCommand::GetActivePeers) + } + + /// Manually propagate full transactions to a specific peer. + pub fn propagate_transactions_to(&self, transactions: Vec, peer: PeerId) { + self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer)) + } + + /// Request the transaction hashes known by specific peers. + pub fn get_transaction_hashes(&self, peers: Vec) { + self.send(TransactionsCommand::GetTransactionHashes(peers)) + } + + /// Request the transaction hashes known by a specific peer. + pub fn get_peer_transaction_hashes(&self, peer: PeerId) { + self.send(TransactionsCommand::GetPeerTransactionHashes(peer)) + } } /// Manages transactions on top of the p2p network. @@ -404,6 +428,11 @@ where fn on_command(&mut self, cmd: TransactionsCommand) { match cmd { TransactionsCommand::PropagateHash(hash) => self.on_new_transactions(vec![hash]), + TransactionsCommand::PropagateHashTo(_hash, _peer) => todo!(), + TransactionsCommand::GetActivePeers => todo!(), + TransactionsCommand::PropagateTransactionsTo(_txs, _peer) => todo!(), + TransactionsCommand::GetTransactionHashes(_peers) => todo!(), + TransactionsCommand::GetPeerTransactionHashes(_peer) => todo!(), } } @@ -829,7 +858,18 @@ struct Peer { /// Commands to send to the [`TransactionsManager`] enum TransactionsCommand { + /// Propagate a transaction hash to the network. PropagateHash(H256), + /// Propagate a transaction hash to a specific peer. + PropagateHashTo(H256, PeerId), + /// Request the list of active peer IDs from the [`TransactionsManager`]. + GetActivePeers, + /// Propagate a collection of full transactions to a specific peer. + PropagateTransactionsTo(Vec, PeerId), + /// Request transaction hashes known by specific peers from the [`TransactionsManager`]. + GetTransactionHashes(Vec), + /// Request transaction hashes known by a specific peer from the [`TransactionsManager`]. + GetPeerTransactionHashes(PeerId), } /// All events related to transactions emitted by the network. From a979f5b661fed8a9a19647535b566154498e29af Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Wed, 20 Sep 2023 08:54:21 +0200 Subject: [PATCH 717/722] add `TransactionsProvider` implementation for `MockEthProvider` (#4656) --- .../storage/provider/src/test_utils/mock.rs | 99 ++++++++++++++++--- 1 file changed, 84 insertions(+), 15 deletions(-) diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 11e690715601..4ea1597edda3 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -181,16 +181,33 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { - fn transaction_id(&self, _tx_hash: TxHash) -> Result> { - todo!() + fn transaction_id(&self, tx_hash: TxHash) -> Result> { + let lock = self.blocks.lock(); + let tx_number = lock + .values() + .flat_map(|block| &block.body) + .position(|tx| tx.hash() == tx_hash) + .map(|pos| pos as TxNumber); + + Ok(tx_number) } - fn transaction_by_id(&self, _id: TxNumber) -> Result> { - Ok(None) + fn transaction_by_id(&self, id: TxNumber) -> Result> { + let lock = self.blocks.lock(); + let transaction = lock.values().flat_map(|block| &block.body).nth(id as usize).cloned(); + + Ok(transaction) } - fn transaction_by_id_no_hash(&self, _id: TxNumber) -> Result> { - Ok(None) + fn transaction_by_id_no_hash(&self, id: TxNumber) -> Result> { + let lock = self.blocks.lock(); + let transaction = lock + .values() + .flat_map(|block| &block.body) + .nth(id as usize) + .map(|tx| Into::::into(tx.clone())); + + Ok(transaction) } fn transaction_by_hash(&self, hash: TxHash) -> Result> { @@ -203,13 +220,37 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, - _hash: TxHash, + hash: TxHash, ) -> Result> { + let lock = self.blocks.lock(); + for (block_hash, block) in lock.iter() { + for (index, tx) in block.body.iter().enumerate() { + if tx.hash() == hash { + let meta = TransactionMeta { + tx_hash: hash, + index: index as u64, + block_hash: *block_hash, + block_number: block.header.number, + base_fee: block.header.base_fee_per_gas, + excess_blob_gas: block.header.excess_blob_gas, + }; + return Ok(Some((tx.clone(), meta))) + } + } + } Ok(None) } - fn transaction_block(&self, _id: TxNumber) -> Result> { - unimplemented!() + fn transaction_block(&self, id: TxNumber) -> Result> { + let lock = self.blocks.lock(); + let mut current_tx_number: TxNumber = 0; + for block in lock.values() { + if current_tx_number + (block.body.len() as TxNumber) > id { + return Ok(Some(block.header.number)) + } + current_tx_number += block.body.len() as TxNumber; + } + Ok(None) } fn transactions_by_block( @@ -236,17 +277,45 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, - _range: impl RangeBounds, + range: impl RangeBounds, ) -> Result> { - unimplemented!() + let lock = self.blocks.lock(); + let transactions = lock + .values() + .flat_map(|block| &block.body) + .enumerate() + .filter_map(|(tx_number, tx)| { + if range.contains(&(tx_number as TxNumber)) { + Some(tx.clone().into()) + } else { + None + } + }) + .collect(); + + Ok(transactions) } - fn senders_by_tx_range(&self, _range: impl RangeBounds) -> Result> { - unimplemented!() + fn senders_by_tx_range(&self, range: impl RangeBounds) -> Result> { + let lock = self.blocks.lock(); + let transactions = lock + .values() + .flat_map(|block| &block.body) + .enumerate() + .filter_map(|(tx_number, tx)| { + if range.contains(&(tx_number as TxNumber)) { + Some(tx.recover_signer()?) + } else { + None + } + }) + .collect(); + + Ok(transactions) } - fn transaction_sender(&self, _id: TxNumber) -> Result> { - unimplemented!() + fn transaction_sender(&self, id: TxNumber) -> Result> { + self.transaction_by_id(id).map(|tx_option| tx_option.map(|tx| tx.recover_signer().unwrap())) } } From 0fbc3f6c94efc2f23321c1c0f85ecf26187422fb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 20 Sep 2023 08:56:48 +0200 Subject: [PATCH 718/722] chore: shorten info interval (#4666) --- bin/reth/src/node/events.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bin/reth/src/node/events.rs b/bin/reth/src/node/events.rs index d6f835db6b5f..9a52ac7ae653 100644 --- a/bin/reth/src/node/events.rs +++ b/bin/reth/src/node/events.rs @@ -21,7 +21,7 @@ use tokio::time::Interval; use tracing::{info, warn}; /// Interval of reporting node state. -const INFO_MESSAGE_INTERVAL: Duration = Duration::from_secs(30); +const INFO_MESSAGE_INTERVAL: Duration = Duration::from_secs(25); /// The current high-level state of the node. struct NodeState { @@ -199,7 +199,8 @@ pub async fn handle_events( { let state = NodeState::new(network, latest_block_number); - let mut info_interval = tokio::time::interval(INFO_MESSAGE_INTERVAL); + let start = tokio::time::Instant::now() + Duration::from_secs(3); + let mut info_interval = tokio::time::interval_at(start, INFO_MESSAGE_INTERVAL); info_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); let handler = EventHandler { state, events, info_interval }; From ec4b302079b6a6042002b5209a91c5c42939f451 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 20 Sep 2023 02:57:06 -0400 Subject: [PATCH 719/722] feat: add pyspec cancun tests to hive workflow (#4677) --- .github/workflows/hive.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index a921f4b94767..2e527e31b0a2 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -103,6 +103,26 @@ jobs: - sim: ethereum/rpc-compat include: [debug_] experimental: true + # Pyspec cancun jobs + - sim: pyspec + include: [cancun/eip4844] + experimental: true + - sim: pyspec + include: [cancun/eip4788] + experimental: true + - sim: pyspec + include: [cancun/eip6780] + experimental: true + - sim: pyspec + include: [cancun/eip5656] + experimental: true + - sim: pyspec + include: [cancun/eip1153] + experimental: true + # TODO: uncomment once there are hive tests for EIP-7516 + # - sim: pyspec + # include: [cancun/eip7516] + # experimental: true fail-fast: false needs: prepare name: run From 24a8590e69e728517e4242e5c98115630426511f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 20 Sep 2023 02:58:48 -0400 Subject: [PATCH 720/722] feat: validate blob_gas_used in header (#4676) --- crates/consensus/common/src/validation.rs | 78 ++++++++++++++++++- crates/interfaces/src/consensus.rs | 2 + .../rpc/rpc-types/src/eth/engine/payload.rs | 4 +- 3 files changed, 79 insertions(+), 5 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index d7feec42c972..075cd2872abb 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -247,6 +247,21 @@ pub fn validate_block_standalone( } } + // EIP-4844: Shard Blob Transactions + if chain_spec.is_cancun_activated_at_timestamp(block.timestamp) { + // Check that the blob gas used in the header matches the sum of the blob gas used by each + // blob tx + let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; + let total_blob_gas = + block.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum(); + if total_blob_gas != header_blob_gas_used { + return Err(ConsensusError::BlobGasUsedDiff { + header_blob_gas_used, + expected_blob_gas_used: total_blob_gas, + }) + } + } + Ok(()) } @@ -491,9 +506,9 @@ mod tests { use mockall::mock; use reth_interfaces::{Error::Consensus, Result}; use reth_primitives::{ - hex_literal::hex, proofs, Account, Address, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Header, Signature, TransactionKind, TransactionSigned, Withdrawal, - MAINNET, U256, + constants::eip4844::DATA_GAS_PER_BLOB, hex_literal::hex, proofs, Account, Address, + BlockBody, BlockHash, BlockHashOrNumber, Bytes, ChainSpecBuilder, Header, Signature, + TransactionKind, TransactionSigned, Withdrawal, H256, MAINNET, U256, }; use std::ops::RangeBounds; @@ -615,6 +630,26 @@ mod tests { TransactionSignedEcRecovered::from_signed_transaction(tx, signer) } + fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned { + let request = Transaction::Eip4844(TxEip4844 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + max_fee_per_blob_gas: 0x7, + gas_limit: 10, + to: TransactionKind::Call(Address::default()), + value: 3, + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + blob_versioned_hashes: vec![H256::random(); num_blobs], + }); + + let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; + + TransactionSigned::from_transaction_and_signature(request, signature) + } + /// got test block fn mock_block() -> (SealedBlock, Header) { // https://etherscan.io/block/15867168 where transaction root and receipts root are cleared @@ -822,4 +857,41 @@ mod tests { assert_eq!(validate_header_standalone(&header, &chain_spec), Ok(())); } + + #[test] + fn cancun_block_incorrect_blob_gas_used() { + let chain_spec = ChainSpecBuilder::mainnet().cancun_activated().build(); + + // create a tx with 10 blobs + let transaction = mock_blob_tx(1, 10); + + let header = Header { + base_fee_per_gas: Some(1337u64), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(1), + transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), + ..Default::default() + } + .seal_slow(); + + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(vec![]), + }; + + let block = SealedBlock::new(header, body); + + // 10 blobs times the blob gas per blob + let expected_blob_gas_used = 10 * DATA_GAS_PER_BLOB; + + // validate blob, it should fail blob gas used validation + assert_eq!( + validate_block_standalone(&block, &chain_spec), + Err(ConsensusError::BlobGasUsedDiff { + header_blob_gas_used: 1, + expected_blob_gas_used + }) + ); + } } diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index 9274cdcbe52f..5bc212bd77f4 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -153,6 +153,8 @@ pub enum ConsensusError { "Blob gas used {blob_gas_used} is not a multiple of blob gas per blob {blob_gas_per_blob}" )] BlobGasUsedNotMultipleOfBlobGasPerBlob { blob_gas_used: u64, blob_gas_per_blob: u64 }, + #[error("Blob gas used in the header {header_blob_gas_used} does not match the expected blob gas used {expected_blob_gas_used}")] + BlobGasUsedDiff { header_blob_gas_used: u64, expected_blob_gas_used: u64 }, #[error("Invalid excess blob gas. Expected: {expected}, got: {got}. Parent excess blob gas: {parent_excess_blob_gas}, parent blob gas used: {parent_blob_gas_used}.")] ExcessBlobGasDiff { expected: u64, diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index de75f3967eb9..b9f281766954 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -330,10 +330,10 @@ pub enum PayloadError { /// Invalid payload base fee. #[error("Invalid payload base fee: {0}")] BaseFee(U256), - /// Invalid payload base fee. + /// Invalid payload blob gas used. #[error("Invalid payload blob gas used: {0}")] BlobGasUsed(U256), - /// Invalid payload base fee. + /// Invalid payload excess blob gas. #[error("Invalid payload excess blob gas: {0}")] ExcessBlobGas(U256), /// Invalid payload block hash. From afbe88f5837acfc8f5bc11deb8a181502691d3b7 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Wed, 20 Sep 2023 09:25:54 +0200 Subject: [PATCH 721/722] feat(error): revamp `make_canonical` error (#3899) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/blockchain_tree.rs | 14 +++---- crates/consensus/beacon/src/engine/mod.rs | 14 +++---- .../interfaces/src/blockchain_tree/error.rs | 38 +++++++++++++++++++ crates/interfaces/src/error.rs | 3 ++ 4 files changed, 55 insertions(+), 14 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3c6b04202dca..f2be9262c7cf 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -9,7 +9,7 @@ use crate::{ use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_interfaces::{ blockchain_tree::{ - error::{BlockchainTreeError, InsertBlockError, InsertBlockErrorKind}, + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, CanonicalOutcome, InsertPayloadOk, }, consensus::{Consensus, ConsensusError}, @@ -937,12 +937,12 @@ impl BlockchainTree if let Some(header) = self.find_canonical_header(block_hash)? { info!(target: "blockchain_tree", ?block_hash, "Block is already canonical, ignoring."); let td = self.externals.database().provider()?.header_td(block_hash)?.ok_or( - BlockExecutionError::from(BlockValidationError::MissingTotalDifficulty { + CanonicalError::from(BlockValidationError::MissingTotalDifficulty { hash: *block_hash, }), )?; if !self.externals.chain_spec.fork(Hardfork::Paris).active_at_ttd(td, U256::ZERO) { - return Err(BlockExecutionError::from(BlockValidationError::BlockPreMerge { + return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { hash: *block_hash, }) .into()) @@ -952,10 +952,10 @@ impl BlockchainTree let Some(chain_id) = self.block_indices.get_blocks_chain_id(block_hash) else { warn!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); - // TODO: better error - return Err( - BlockExecutionError::BlockHashNotFoundInChain { block_hash: *block_hash }.into() - ) + return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { + block_hash: *block_hash, + }) + .into()) }; let chain = self.chains.remove(&chain_id).expect("To be present"); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f47da482789a..cd772bf13253 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -11,7 +11,7 @@ use futures::{Future, StreamExt}; use reth_db::database::Database; use reth_interfaces::{ blockchain_tree::{ - error::{InsertBlockError, InsertBlockErrorKind}, + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, consensus::ForkchoiceState, @@ -690,7 +690,7 @@ where PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)) } Err(error) => { - if let Error::Execution(ref err) = error { + if let Error::Canonical(ref err) = error { if err.is_fatal() { tracing::error!(target: "consensus::engine", ?err, "Encountered fatal error"); return Err(error) @@ -929,10 +929,8 @@ where #[allow(clippy::single_match)] match &error { - Error::Execution( - error @ BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { - .. - }), + Error::Canonical( + error @ CanonicalError::Validation(BlockValidationError::BlockPreMerge { .. }), ) => { warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash"); return PayloadStatus::from_status(PayloadStatusEnum::Invalid { @@ -1497,7 +1495,9 @@ where // it's part of the canonical chain: if it's the safe or the finalized block if matches!( err, - Error::Execution(BlockExecutionError::BlockHashNotFoundInChain { .. }) + Error::Canonical(CanonicalError::BlockchainTree( + BlockchainTreeError::BlockHashNotFoundInChain { .. } + )) ) { // if the inserted block is the currently targeted `finalized` or `safe` // block, we will attempt to make them canonical, diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 83bdf0023802..f6a467222cbc 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -33,6 +33,34 @@ pub enum BlockchainTreeError { BlockBufferingFailed { block_hash: BlockHash }, } +/// Result alias for `CanonicalError` +pub type CanonicalResult = std::result::Result; + +/// Canonical Errors +#[allow(missing_docs)] +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CanonicalError { + /// Error originating from validation operations. + #[error(transparent)] + Validation(#[from] BlockValidationError), + /// Error originating from blockchain tree operations. + #[error(transparent)] + BlockchainTree(#[from] BlockchainTreeError), + /// Error indicating a transaction reverted during execution. + #[error("Transaction error on revert: {inner:?}")] + CanonicalRevert { inner: String }, + /// Error indicating a transaction failed to commit during execution. + #[error("Transaction error on commit: {inner:?}")] + CanonicalCommit { inner: String }, +} + +impl CanonicalError { + /// Returns `true` if the error is fatal. + pub fn is_fatal(&self) -> bool { + matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. }) + } +} + /// Error thrown when inserting a block failed because the block is considered invalid. #[derive(thiserror::Error)] #[error(transparent)] @@ -161,6 +189,9 @@ pub enum InsertBlockErrorKind { /// An internal error occurred, like interacting with the database. #[error("Internal error")] Internal(Box), + /// Canonical error. + #[error(transparent)] + Canonical(CanonicalError), } impl InsertBlockErrorKind { @@ -214,6 +245,12 @@ impl InsertBlockErrorKind { // any other error, such as database errors, are considered internal errors false } + InsertBlockErrorKind::Canonical(err) => match err { + CanonicalError::BlockchainTree(_) | + CanonicalError::CanonicalCommit { .. } | + CanonicalError::CanonicalRevert { .. } => false, + CanonicalError::Validation(_) => true, + }, } } @@ -274,6 +311,7 @@ impl From for InsertBlockErrorKind { Error::Provider(err) => InsertBlockErrorKind::Internal(Box::new(err)), Error::Network(err) => InsertBlockErrorKind::Internal(Box::new(err)), Error::Custom(err) => InsertBlockErrorKind::Internal(err.into()), + Error::Canonical(err) => InsertBlockErrorKind::Canonical(err), } } } diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index b8b72fc92faa..284d6c481e88 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -20,6 +20,9 @@ pub enum Error { #[error(transparent)] Network(#[from] reth_network_api::NetworkError), + #[error(transparent)] + Canonical(#[from] crate::blockchain_tree::error::CanonicalError), + #[error("{0}")] Custom(std::string::String), } From 6016da7a1234de70afb3b6d796909504faeb1627 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 20 Sep 2023 13:13:45 +0100 Subject: [PATCH 722/722] refactor(engine): always connect buffered blocks on r/w hook finish (#4657) --- .../beacon/src/engine/hooks/controller.rs | 38 ++++++++------ .../consensus/beacon/src/engine/hooks/mod.rs | 5 +- .../beacon/src/engine/hooks/prune.rs | 8 +-- crates/consensus/beacon/src/engine/mod.rs | 50 +++++++++---------- 4 files changed, 51 insertions(+), 50 deletions(-) diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 1449629c70f8..30c0a969772e 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -1,10 +1,20 @@ -use crate::hooks::{EngineContext, EngineHook, EngineHookAction, EngineHookError, EngineHooks}; +use crate::hooks::{ + EngineContext, EngineHook, EngineHookAction, EngineHookDBAccessLevel, EngineHookError, + EngineHookEvent, EngineHooks, +}; use std::{ collections::VecDeque, task::{Context, Poll}, }; use tracing::debug; +#[derive(Debug)] +pub(crate) struct PolledHook { + pub(crate) event: EngineHookEvent, + pub(crate) action: Option, + pub(crate) db_access_level: EngineHookDBAccessLevel, +} + /// Manages hooks under the control of the engine. /// /// This type polls the initialized hooks one by one, respecting the DB access level @@ -41,28 +51,27 @@ impl EngineHooksController { &mut self, cx: &mut Context<'_>, args: EngineContext, - ) -> Poll> { + ) -> Poll> { let Some(mut hook) = self.running_hook_with_db_write.take() else { return Poll::Pending }; match hook.poll(cx, args) { Poll::Ready((event, action)) => { + let result = PolledHook { event, action, db_access_level: hook.db_access_level() }; + debug!( target: "consensus::engine::hooks", hook = hook.name(), - ?action, - ?event, + ?result, "Polled running hook with db write access" ); - if !event.is_finished() { + if !result.event.is_finished() { self.running_hook_with_db_write = Some(hook); } else { self.hooks.push_back(hook); } - if let Some(action) = action { - return Poll::Ready(Ok(action)) - } + return Poll::Ready(Ok(result)) } Poll::Pending => { self.running_hook_with_db_write = Some(hook); @@ -89,7 +98,7 @@ impl EngineHooksController { cx: &mut Context<'_>, args: EngineContext, db_write_active: bool, - ) -> Poll> { + ) -> Poll> { let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; // Hook with DB write access level is not allowed to run due to already running hook with DB @@ -101,23 +110,22 @@ impl EngineHooksController { } if let Poll::Ready((event, action)) = hook.poll(cx, args) { + let result = PolledHook { event, action, db_access_level: hook.db_access_level() }; + debug!( target: "consensus::engine::hooks", hook = hook.name(), - ?action, - ?event, + ?result, "Polled next hook" ); - if event.is_started() && hook.db_access_level().is_read_write() { + if result.event.is_started() && result.db_access_level.is_read_write() { self.running_hook_with_db_write = Some(hook); } else { self.hooks.push_back(hook); } - if let Some(action) = action { - return Poll::Ready(Ok(action)) - } + return Poll::Ready(Ok(result)) } else { self.hooks.push_back(hook); } diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index d2770a77c1c7..f031231b23a0 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -6,7 +6,7 @@ use std::{ }; mod controller; -pub(crate) use controller::EngineHooksController; +pub(crate) use controller::{EngineHooksController, PolledHook}; mod prune; pub use prune::PruneHook; @@ -88,8 +88,6 @@ impl EngineHookEvent { pub enum EngineHookAction { /// Notify about a [SyncState] update. UpdateSyncState(SyncState), - /// Connect blocks buffered during the hook execution to canonical hashes. - ConnectBufferedBlocks, } /// An error returned by [hook][`EngineHook`]. @@ -107,6 +105,7 @@ pub enum EngineHookError { } /// Level of database access the hook needs for execution. +#[derive(Debug, Copy, Clone)] pub enum EngineHookDBAccessLevel { /// Read-only database access. ReadOnly, diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index f720082ee3bc..f18c9eafdf6f 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -73,13 +73,7 @@ impl PruneHook { } }; - let action = if matches!(event, EngineHookEvent::Finished(Ok(_))) { - Some(EngineHookAction::ConnectBufferedBlocks) - } else { - None - }; - - Poll::Ready((event, action)) + Poll::Ready((event, None)) } /// This will try to spawn the pruner if it is idle: diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index cd772bf13253..f8d893d6b981 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -39,7 +39,7 @@ use reth_tasks::TaskSpawner; use std::{ pin::Pin, sync::Arc, - task::{ready, Context, Poll}, + task::{Context, Poll}, time::Instant, }; use tokio::sync::{ @@ -69,7 +69,7 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -use crate::hooks::EngineHooks; +use crate::hooks::{EngineHooks, PolledHook}; pub use forkchoice::ForkchoiceStatus; mod metrics; @@ -1683,21 +1683,24 @@ where None } - fn on_hook_action(&self, action: EngineHookAction) -> Result<(), BeaconConsensusEngineError> { - match action { - EngineHookAction::UpdateSyncState(state) => { - self.sync_state_updater.update_sync_state(state) - } - // TODO(alexey): always connect buffered blocks if hook had the - // `EngineHookDBAccessLevel::ReadWrite` - EngineHookAction::ConnectBufferedBlocks => { - if let Err(error) = self.blockchain.connect_buffered_blocks_to_canonical_hashes() { - error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state"); - return Err(error.into()) + fn on_hook_result(&self, result: PolledHook) -> Result<(), BeaconConsensusEngineError> { + if let Some(action) = result.action { + match action { + EngineHookAction::UpdateSyncState(state) => { + self.sync_state_updater.update_sync_state(state) } } } + if result.event.is_finished() && result.db_access_level.is_read_write() { + // If the hook had read-write access to the database, + // it means that the engine may have accumulated some buffered blocks. + if let Err(error) = self.blockchain.connect_buffered_blocks_to_canonical_hashes() { + error!(target: "consensus::engine", ?error, "Error connecting buffered blocks to canonical hashes on hook result"); + return Err(error.into()) + } + } + Ok(()) } } @@ -1734,10 +1737,8 @@ where if let Poll::Ready(result) = this.hooks.poll_running_hook_with_db_write( cx, EngineContext { tip_block_number: this.blockchain.canonical_tip().number }, - ) { - if let Err(err) = this.on_hook_action(result?) { - return Poll::Ready(Err(err)) - } + )? { + this.on_hook_result(result)?; } // Process all incoming messages from the CL, these can affect the state of the @@ -1793,18 +1794,17 @@ where // 1. Engine and sync messages are fully drained (both pending) // 2. Latest FCU status is not INVALID if !this.forkchoice_state_tracker.is_latest_invalid() { - let action = ready!(this.hooks.poll_next_hook( + if let Poll::Ready(result) = this.hooks.poll_next_hook( cx, EngineContext { tip_block_number: this.blockchain.canonical_tip().number }, this.sync.is_pipeline_active(), - ))?; - if let Err(err) = this.on_hook_action(action) { - return Poll::Ready(Err(err)) - } + )? { + this.on_hook_result(result)?; - // ensure we're polling until pending while also checking for new engine messages - // before polling the next hook - continue 'main + // ensure we're polling until pending while also checking for new engine + // messages before polling the next hook + continue 'main + } } // incoming engine messages and sync events are drained, so we can yield back