diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 2daa9a058a4..d437905ee16 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.17.0", + "core": "24.18.0", "prover": "16.3.0", "zk_toolbox": "0.1.1" } diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index a04e64ae3ea..85eefc86227 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -63,7 +63,11 @@ jobs: run: ci_run yarn l1-contracts test - name: Rust unit tests - run: ci_run zk test rust + run: | + ci_run zk test rust + # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible + # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. + ci_run zk f cargo test --release -p vm-benchmark --bench criterion --bench fill_bootloader loadtest: runs-on: [matterlabs-ci-runner] diff --git a/Cargo.lock b/Cargo.lock index 5dbaac90eca..b65826900d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7281,8 +7281,10 @@ version = "0.1.0" dependencies = [ "criterion", "iai", + "rand 0.8.5", "tokio", "vise", + "zksync_types", "zksync_vlog", "zksync_vm_benchmark_harness", ] @@ -8541,6 +8543,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", ] [[package]] @@ -8674,7 +8677,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.17.0" +version = "24.18.0" dependencies = [ "anyhow", "assert_matches", @@ -8828,6 +8831,17 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_logs_bloom_backfill" +version = "0.1.0" +dependencies = [ + "anyhow", + "tokio", + "tracing", + "zksync_dal", + "zksync_types", +] + [[package]] name = "zksync_mempool" version = "0.1.0" @@ -9115,6 +9129,7 @@ dependencies = [ "zksync_external_proof_integration_api", "zksync_health_check", "zksync_house_keeper", + "zksync_logs_bloom_backfill", "zksync_metadata_calculator", "zksync_node_api_server", "zksync_node_consensus", @@ -9740,6 +9755,7 @@ dependencies = [ name = "zksync_vm_benchmark_harness" version = "0.1.0" dependencies = [ + "assert_matches", "once_cell", "zk_evm 0.133.0", "zksync_contracts", @@ -9761,7 +9777,6 @@ dependencies = [ "zksync_contracts", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6619fd26175..d32b6c6a673 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -300,3 +301,4 @@ zksync_contract_verification_server = { version = "0.1.0", path = "core/node/con zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } +zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 962113833f0..67fdc8cddc9 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [24.18.0](https://github.com/matter-labs/zksync-era/compare/core-v24.17.0...core-v24.18.0) (2024-08-14) + + +### Features + +* add logs bloom ([#2633](https://github.com/matter-labs/zksync-era/issues/2633)) ([1067462](https://github.com/matter-labs/zksync-era/commit/10674620d1a04333507ca17b9a34ab3cb58846cf)) +* **zk_toolbox:** Minting base token ([#2571](https://github.com/matter-labs/zksync-era/issues/2571)) ([ae2dd3b](https://github.com/matter-labs/zksync-era/commit/ae2dd3bbccdffc25b040313b2c7983a936f36aac)) + ## [24.17.0](https://github.com/matter-labs/zksync-era/compare/core-v24.16.0...core-v24.17.0) (2024-08-13) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 68f7e8c29a4..5b7309a55a2 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.17.0" # x-release-please-version +version = "24.18.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 0b150c9872a..c30cc1a432b 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -22,6 +22,7 @@ use zksync_node_framework::{ consistency_checker::ConsistencyCheckerLayer, healtcheck_server::HealthCheckLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, + logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, metadata_calculator::MetadataCalculatorLayer, @@ -412,6 +413,11 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_logs_bloom_backfill_layer(mut self) -> anyhow::Result { + self.node.add_layer(LogsBloomBackfillLayer); + Ok(self) + } + fn web3_api_optional_config(&self) -> Web3ServerOptionalConfig { // The refresh interval should be several times lower than the pruning removal delay, so that // soft-pruning will timely propagate to the API server. @@ -602,7 +608,8 @@ impl ExternalNodeBuilder { .add_pruning_layer()? .add_consistency_checker_layer()? .add_commitment_generator_layer()? - .add_batch_status_updater_layer()?; + .add_batch_status_updater_layer()? + .add_logs_bloom_backfill_layer()?; } } } diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 89a3807422b..990dd672975 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -154,6 +154,7 @@ async fn create_l2_block( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index f4b3dbe9b40..d9bc4690300 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -37,6 +37,7 @@ use zksync_node_framework::{ house_keeper::HouseKeeperLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, l1_gas::L1GasLayer, + logs_bloom_backfill::LogsBloomBackfillLayer, metadata_calculator::MetadataCalculatorLayer, node_storage_init::{ main_node_strategy::MainNodeInitStrategyLayer, NodeStorageInitializerLayer, @@ -609,6 +610,12 @@ impl MainNodeBuilder { Ok(self) } + fn add_logs_bloom_backfill_layer(mut self) -> anyhow::Result { + self.node.add_layer(LogsBloomBackfillLayer); + + Ok(self) + } + /// This layer will make sure that the database is initialized correctly, /// e.g. genesis will be performed if it's required. /// @@ -679,7 +686,8 @@ impl MainNodeBuilder { self = self .add_l1_gas_layer()? .add_storage_initialization_layer(LayerKind::Task)? - .add_state_keeper_layer()?; + .add_state_keeper_layer()? + .add_logs_bloom_backfill_layer()?; } Component::HttpApi => { self = self diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 5633fa3e10d..6e73d9f5fac 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -15,7 +15,9 @@ use std::{ pub use ethabi::{ self, - ethereum_types::{Address, Bloom as H2048, H128, H160, H256, H512, H520, H64, U128, U256, U64}, + ethereum_types::{ + Address, Bloom, BloomInput, H128, H160, H256, H512, H520, H64, U128, U256, U64, + }, }; use serde::{de, Deserialize, Deserializer, Serialize}; diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index 9bc10c8ab36..ecbe73f785b 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -13,7 +13,7 @@ use serde::{ }; use serde_json::Value; -use crate::{H160, H2048, H256, U256, U64}; +use crate::{Bloom, H160, H256, U256, U64}; pub mod contract; #[cfg(test)] @@ -389,7 +389,7 @@ pub struct BlockHeader { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Timestamp pub timestamp: U256, /// Difficulty @@ -441,7 +441,7 @@ pub struct Block { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: Option, + pub logs_bloom: Option, /// Timestamp pub timestamp: U256, /// Difficulty @@ -727,7 +727,7 @@ pub struct TransactionReceipt { pub root: Option, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub transaction_type: Option, diff --git a/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json b/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json new file mode 100644 index 00000000000..160c20d3988 --- /dev/null +++ b/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n (logs_bloom IS NOT NULL) AS \"logs_bloom_not_null!\"\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs_bloom_not_null!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae" +} diff --git a/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json b/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json new file mode 100644 index 00000000000..45b58a1c833 --- /dev/null +++ b/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json @@ -0,0 +1,53 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n miniblock_number\n FROM\n events\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n miniblock_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "address", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "topic1", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "topic2", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "topic3", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "topic4", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "miniblock_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd" +} diff --git a/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json b/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json new file mode 100644 index 00000000000..7582e0f64e7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n logs_bloom = data.logs_bloom\n FROM\n (\n SELECT\n UNNEST($1::BIGINT[]) AS number,\n UNNEST($2::BYTEA[]) AS logs_bloom\n ) AS data\n WHERE\n miniblocks.number = data.number\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array", + "ByteaArray" + ] + }, + "nullable": [] + }, + "hash": "29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f" +} diff --git a/core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json b/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json similarity index 87% rename from core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json rename to core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json index 8981f7e8a08..26a3458bff9 100644 --- a/core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json +++ b/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "gas_limit", "type_info": "Int8" + }, + { + "ordinal": 16, + "name": "logs_bloom", + "type_info": "Bytea" } ], "parameters": { @@ -103,8 +108,9 @@ true, false, true, + true, true ] }, - "hash": "f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318" + "hash": "39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded" } diff --git a/core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json b/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json similarity index 88% rename from core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json rename to core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json index a8a811f2580..74a6187e644 100644 --- a/core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json +++ b/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "gas_limit", "type_info": "Int8" + }, + { + "ordinal": 16, + "name": "logs_bloom", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ true, false, true, + true, true ] }, - "hash": "8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18" + "hash": "45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d" } diff --git a/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json b/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json new file mode 100644 index 00000000000..f0fca373443 --- /dev/null +++ b/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n logs_bloom = NULL\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e" +} diff --git a/core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json b/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json similarity index 63% rename from core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json rename to core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json index 580a5370c89..4ea4aea2ea6 100644 --- a/core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json +++ b/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom AS \"block_logs_bloom?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -35,11 +35,16 @@ }, { "ordinal": 6, + "name": "block_logs_bloom?", + "type_info": "Bytea" + }, + { + "ordinal": 7, "name": "transaction_gas_limit?", "type_info": "Numeric" }, { - "ordinal": 7, + "ordinal": 8, "name": "transaction_refunded_gas?", "type_info": "Int8" } @@ -57,8 +62,9 @@ false, true, true, + true, false ] }, - "hash": "e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf" + "hash": "4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd" } diff --git a/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json b/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json new file mode 100644 index 00000000000..e980f08b0da --- /dev/null +++ b/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n miniblocks\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a" +} diff --git a/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json b/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json new file mode 100644 index 00000000000..30a22873196 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(number) AS \"max?\"\n FROM\n miniblocks\n WHERE\n logs_bloom IS NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max?", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2" +} diff --git a/core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json b/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json similarity index 56% rename from core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json rename to core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json index 4de23050455..9ae9d2e50cd 100644 --- a/core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json +++ b/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -20,10 +20,11 @@ "Int4", "Int8", "Int8", - "Int8" + "Int8", + "Bytea" ] }, "nullable": [] }, - "hash": "e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f" + "hash": "c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046" } diff --git a/core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json b/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json similarity index 63% rename from core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json rename to core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json index c61299c0d21..36e56da404e 100644 --- a/core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json +++ b/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom,\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -35,26 +35,31 @@ }, { "ordinal": 6, - "name": "parent_hash?", + "name": "logs_bloom", "type_info": "Bytea" }, { "ordinal": 7, + "name": "parent_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 8, "name": "l1_batch_timestamp?", "type_info": "Int8" }, { - "ordinal": 8, + "ordinal": 9, "name": "transaction_gas_limit?", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 10, "name": "refunded_gas?", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "tx_hash?", "type_info": "Bytea" } @@ -71,6 +76,7 @@ false, false, true, + true, false, false, true, @@ -78,5 +84,5 @@ false ] }, - "hash": "d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a" + "hash": "dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index c046b3d3b42..9c13eeb3014 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -14,6 +14,7 @@ links = "zksync_dal_proto" [dependencies] vise.workspace = true +zksync_vm_interface.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true diff --git a/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql new file mode 100644 index 00000000000..d6d67c3aa52 --- /dev/null +++ b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS logs_bloom; diff --git a/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql new file mode 100644 index 00000000000..83eca63239f --- /dev/null +++ b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS logs_bloom BYTEA; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 4f4b3e99ff7..dbb56b42a46 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -19,12 +19,12 @@ use zksync_types::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, StorageOracleInfo, }, - circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; +use zksync_vm_interface::CircuitStatistic; pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptionalMetadata}; use crate::{ @@ -150,6 +150,22 @@ impl BlocksDal<'_, '_> { Ok(row.number.map(|num| L1BatchNumber(num as u32))) } + pub async fn get_earliest_l2_block_number(&mut self) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MIN(number) AS "number" + FROM + miniblocks + "# + ) + .instrument("get_earliest_l2_block_number") + .fetch_one(self.storage) + .await?; + + Ok(row.number.map(|num| L2BlockNumber(num as u32))) + } + pub async fn get_last_l1_batch_number_with_tree_data( &mut self, ) -> DalResult> { @@ -691,6 +707,7 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, + logs_bloom, created_at, updated_at ) @@ -712,6 +729,7 @@ impl BlocksDal<'_, '_> { $14, $15, $16, + $17, NOW(), NOW() ) @@ -738,6 +756,7 @@ impl BlocksDal<'_, '_> { i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, + l2_block_header.logs_bloom.as_bytes(), ); instrumentation.with(query).execute(self.storage).await?; @@ -764,7 +783,8 @@ impl BlocksDal<'_, '_> { protocol_version, virtual_blocks, fair_pubdata_price, - gas_limit + gas_limit, + logs_bloom FROM miniblocks ORDER BY @@ -803,7 +823,8 @@ impl BlocksDal<'_, '_> { protocol_version, virtual_blocks, fair_pubdata_price, - gas_limit + gas_limit, + logs_bloom FROM miniblocks WHERE @@ -2334,6 +2355,84 @@ impl BlocksDal<'_, '_> { Ok(results.into_iter().map(L::from).collect()) } + + pub async fn has_last_l2_block_bloom(&mut self) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + (logs_bloom IS NOT NULL) AS "logs_bloom_not_null!" + FROM + miniblocks + ORDER BY + number DESC + LIMIT + 1 + "#, + ) + .instrument("has_last_l2_block_bloom") + .fetch_optional(self.storage) + .await?; + + Ok(row.map(|row| row.logs_bloom_not_null).unwrap_or(false)) + } + + pub async fn get_max_l2_block_without_bloom(&mut self) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MAX(number) AS "max?" + FROM + miniblocks + WHERE + logs_bloom IS NULL + "#, + ) + .instrument("get_max_l2_block_without_bloom") + .fetch_one(self.storage) + .await?; + + Ok(row.max.map(|n| L2BlockNumber(n as u32))) + } + + pub async fn range_update_logs_bloom( + &mut self, + from_l2_block: L2BlockNumber, + blooms: &[Bloom], + ) -> DalResult<()> { + if blooms.is_empty() { + return Ok(()); + } + + let to_l2_block = from_l2_block + (blooms.len() - 1) as u32; + let numbers: Vec<_> = (i64::from(from_l2_block.0)..=i64::from(to_l2_block.0)).collect(); + + let blooms = blooms + .iter() + .map(|blooms| blooms.as_bytes()) + .collect::>(); + sqlx::query!( + r#" + UPDATE miniblocks + SET + logs_bloom = data.logs_bloom + FROM + ( + SELECT + UNNEST($1::BIGINT[]) AS number, + UNNEST($2::BYTEA[]) AS logs_bloom + ) AS data + WHERE + miniblocks.number = data.number + "#, + &numbers, + &blooms as &[&[u8]], + ) + .instrument("range_update_logs_bloom") + .execute(self.storage) + .await?; + + Ok(()) + } } /// These methods should only be used for tests. @@ -2416,6 +2515,24 @@ impl BlocksDal<'_, '_> { .context("storage contains neither L2 blocks, nor snapshot recovery info")?; Ok(snapshot_recovery.protocol_version) } + + pub async fn drop_l2_block_bloom(&mut self, l2_block_number: L2BlockNumber) -> DalResult<()> { + sqlx::query!( + r#" + UPDATE miniblocks + SET + logs_bloom = NULL + WHERE + number = $1 + "#, + i64::from(l2_block_number.0) + ) + .instrument("drop_l2_block_bloom") + .with_arg("l2_block_number", &l2_block_number) + .execute(self.storage) + .await?; + Ok(()) + } } #[cfg(test)] diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 13fa9070f82..281a44436a7 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -9,7 +9,7 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::{BlockHeader, Bytes}, - L1BatchNumber, L2BlockNumber, ProtocolVersionId, H160, H2048, H256, U256, U64, + Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H160, H256, U256, U64, }; use zksync_utils::bigdecimal_to_u256; @@ -44,6 +44,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.timestamp, miniblocks.base_fee_per_gas, miniblocks.gas_limit AS "block_gas_limit?", + miniblocks.logs_bloom, prev_miniblock.hash AS "parent_hash?", l1_batches.timestamp AS "l1_batch_timestamp?", transactions.gas_limit AS "transaction_gas_limit?", @@ -87,7 +88,10 @@ impl BlocksWeb3Dal<'_, '_> { .unwrap_or(i64::from(LEGACY_BLOCK_GAS_LIMIT)) as u64) .into(), - // TODO: include logs + logs_bloom: row + .logs_bloom + .map(|b| Bloom::from_slice(&b)) + .unwrap_or_default(), ..api::Block::default() } }); @@ -175,6 +179,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.timestamp AS "block_timestamp", miniblocks.base_fee_per_gas AS "base_fee_per_gas", miniblocks.gas_limit AS "block_gas_limit?", + miniblocks.logs_bloom AS "block_logs_bloom?", transactions.gas_limit AS "transaction_gas_limit?", transactions.refunded_gas AS "transaction_refunded_gas?" FROM @@ -219,7 +224,11 @@ impl BlocksWeb3Dal<'_, '_> { .into(), base_fee_per_gas: Some(bigdecimal_to_u256(row.base_fee_per_gas.clone())), extra_data: Bytes::default(), - logs_bloom: H2048::default(), + logs_bloom: row + .block_logs_bloom + .as_ref() + .map(|b| Bloom::from_slice(b)) + .unwrap_or_default(), timestamp: U256::from(row.block_timestamp), difficulty: U256::zero(), mix_hash: None, @@ -757,9 +766,9 @@ mod tests { use zksync_types::{ aggregated_operations::AggregatedActionType, block::{L2BlockHasher, L2BlockHeader}, - fee::TransactionExecutionMetrics, Address, L2BlockNumber, ProtocolVersion, ProtocolVersionId, }; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index c2b296fc085..d4286a5bced 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt}; +use std::{collections::HashMap, fmt, ops::RangeInclusive}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ @@ -409,6 +409,47 @@ impl EventsDal<'_, '_> { .collect(); Ok(Some(events)) } + + pub async fn get_bloom_items_for_l2_blocks( + &mut self, + l2_block_range: RangeInclusive, + ) -> DalResult>>> { + let rows = sqlx::query!( + r#" + SELECT + address, + topic1, + topic2, + topic3, + topic4, + miniblock_number + FROM + events + WHERE + miniblock_number BETWEEN $1 AND $2 + ORDER BY + miniblock_number + "#, + i64::from(l2_block_range.start().0), + i64::from(l2_block_range.end().0), + ) + .instrument("get_bloom_items_for_l2_blocks") + .fetch_all(self.storage) + .await?; + + let mut items = HashMap::new(); + for row in rows { + let block = L2BlockNumber(row.miniblock_number as u32); + let vec: &mut Vec<_> = items.entry(block).or_default(); + + let iter = [row.address, row.topic1, row.topic2, row.topic3, row.topic4] + .into_iter() + .filter(|x| !x.is_empty()); + vec.extend(iter); + } + + Ok(items) + } } #[cfg(test)] diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index be8b4e4152b..34e14387ca6 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -10,7 +10,7 @@ use zksync_types::{ commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H2048, H256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, }; /// This is the gas limit that was used inside blocks before we started saving block gas limit into the database. @@ -76,7 +76,7 @@ impl StorageL1BatchHeader { l2_to_l1_logs, l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&self.bloom), + bloom: Bloom::from_slice(&self.bloom), used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( @@ -171,7 +171,7 @@ impl StorageL1Batch { l2_to_l1_logs, l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&self.bloom), + bloom: Bloom::from_slice(&self.bloom), used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( @@ -433,6 +433,7 @@ pub(crate) struct StorageL2BlockHeader { /// The formal value of the gas limit for the miniblock. /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, + pub logs_bloom: Option>, } impl From for L2BlockHeader { @@ -475,6 +476,10 @@ impl From for L2BlockHeader { protocol_version, virtual_blocks: row.virtual_blocks as u32, gas_limit: row.gas_limit.unwrap_or(i64::from(LEGACY_BLOCK_GAS_LIMIT)) as u64, + logs_bloom: row + .logs_bloom + .map(|b| Bloom::from_slice(&b)) + .unwrap_or_default(), } } } diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 0999e2be164..4f94ff7f63d 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -2,10 +2,10 @@ use std::ops; use zksync_db_connection::connection::Connection; use zksync_types::{ - fee::TransactionExecutionMetrics, tx::IncludedTxLocation, AccountTreeId, Address, - L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, - StorageLog, H256, + tx::IncludedTxLocation, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersion, ProtocolVersionId, StorageKey, StorageLog, H256, }; +use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 898770c38f5..ec6ee0f9281 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -106,9 +106,9 @@ impl SyncDal<'_, '_> { mod tests { use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, - fee::TransactionExecutionMetrics, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 11f88ba8a70..275881febdd 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,7 +4,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -12,10 +12,12 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, snapshots::SnapshotRecoveryStatus, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, K256PrivateKey, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersion, ProtocolVersionId, VmEvent, H160, H256, U256, }; +use zksync_vm_interface::{ + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics, +}; use crate::{ blocks_dal::BlocksDal, @@ -48,6 +50,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { protocol_version: Some(protocol_version), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), } } pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { @@ -151,7 +154,7 @@ pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionRe TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index f76b61ec164..89d7499e49d 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,17 +10,14 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, - fee::TransactionExecutionMetrics, - l1::L1Tx, - l2::L2Tx, - protocol_upgrade::ProtocolUpgradeTx, - tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, - vm_trace::Call, - Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, + vm_trace::Call, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, + PriorityOpId, ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; +use zksync_vm_interface::{ + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, +}; use crate::{ models::storage_transaction::{CallTrace, StorageTransaction}, diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index ff82664109d..0df3cedbc82 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, iter::once}; use anyhow::Context as _; use sqlx::types::chrono::NaiveDateTime; @@ -9,8 +9,8 @@ use zksync_db_connection::{ interpolate_query, match_query_as, }; use zksync_types::{ - api, api::TransactionReceipt, event::DEPLOY_EVENT_SIGNATURE, Address, L2BlockNumber, L2ChainId, - Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, + api, api::TransactionReceipt, block::build_bloom, event::DEPLOY_EVENT_SIGNATURE, Address, + BloomInput, L2BlockNumber, L2ChainId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; use crate::{ @@ -118,6 +118,13 @@ impl TransactionsWeb3Dal<'_, '_> { let logs_for_tx = logs.remove(&receipt.transaction_hash); if let Some(logs) = logs_for_tx { + let iter = logs.iter().flat_map(|log| { + log.topics + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain(once(BloomInput::Raw(log.address.as_bytes()))) + }); + receipt.logs_bloom = build_bloom(iter); receipt.logs = logs .into_iter() .map(|mut log| { @@ -479,9 +486,8 @@ impl TransactionsWeb3Dal<'_, '_> { mod tests { use std::collections::HashMap; - use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId, - }; + use zksync_types::{l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId}; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 80d1ef8a294..2dc680ba77d 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -1,8 +1,8 @@ -use zksync_types::tx::tx_execution_info::TxExecutionStatus; - use crate::{ glue::{GlueFrom, GlueInto}, - interface::{ExecutionResult, Refunds, TxRevertReason, VmExecutionResultAndLogs}, + interface::{ + ExecutionResult, Refunds, TxExecutionStatus, TxRevertReason, VmExecutionResultAndLogs, + }, }; impl GlueFrom for VmExecutionResultAndLogs { diff --git a/core/lib/multivm/src/utils/bytecode.rs b/core/lib/multivm/src/utils/bytecode.rs new file mode 100644 index 00000000000..260749b44f3 --- /dev/null +++ b/core/lib/multivm/src/utils/bytecode.rs @@ -0,0 +1,154 @@ +use std::collections::HashMap; + +use zksync_types::ethabi::{self, Token}; +use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; + +use crate::interface::CompressedBytecodeInfo; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum FailedToCompressBytecodeError { + #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] + DictionaryOverflow, + #[error("Bytecode is invalid: {0}")] + InvalidBytecode(#[from] InvalidBytecodeError), +} + +/// Implements, a simple compression algorithm for the bytecode. +fn compress_to_bytes(code: &[u8]) -> Result, FailedToCompressBytecodeError> { + validate_bytecode(code)?; + + // Statistic is a hash map of values (number of occurrences, first occurrence position), + // this is needed to ensure that the determinism during sorting of the statistic, i.e. + // each element will have unique first occurrence position + let mut statistic: HashMap = HashMap::new(); + let mut dictionary: HashMap = HashMap::new(); + let mut encoded_data: Vec = Vec::new(); + + // Split original bytecode into 8-byte chunks. + for (position, chunk_bytes) in code.chunks(8).enumerate() { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Count the number of occurrences of each chunk. + statistic.entry(chunk).or_insert((0, position)).0 += 1; + } + + let mut statistic_sorted_by_value: Vec<_> = statistic.into_iter().collect::>(); + statistic_sorted_by_value.sort_by_key(|x| x.1); + + // The dictionary size is limited by 2^16 - 1, + if statistic_sorted_by_value.len() > u16::MAX.into() { + return Err(FailedToCompressBytecodeError::DictionaryOverflow); + } + + // Fill the dictionary with the most popular chunks. + // The most popular chunks will be encoded with the smallest indexes, so that + // the 255 most popular chunks will be encoded with one zero byte. + // And the encoded data will be filled with more zeros, so + // the calldata that will be sent to L1 will be cheaper. + for (chunk, _) in statistic_sorted_by_value.iter().rev() { + dictionary.insert(*chunk, dictionary.len() as u16); + } + + for chunk_bytes in code.chunks(8) { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Add the index of the chunk to the encoded data. + encoded_data.extend(dictionary.get(&chunk).unwrap().to_be_bytes()); + } + + // Prepare the raw compressed bytecode in the following format: + // - 2 bytes: the length of the dictionary (N) + // - N bytes: packed dictionary bytes + // - remaining bytes: packed encoded data bytes + + let mut compressed: Vec = Vec::new(); + compressed.extend((dictionary.len() as u16).to_be_bytes()); + + let mut entries: Vec<_> = dictionary.into_iter().map(|(k, v)| (v, k)).collect(); + entries.sort_unstable(); + for (_, chunk) in entries { + compressed.extend(chunk.to_be_bytes()); + } + compressed.extend(encoded_data); + Ok(compressed) +} + +pub(crate) fn compress( + bytecode: Vec, +) -> Result { + Ok(CompressedBytecodeInfo { + compressed: compress_to_bytes(&bytecode)?, + original: bytecode, + }) +} + +pub(crate) fn encode_call(bytecode: &CompressedBytecodeInfo) -> Vec { + let mut bytecode_hash = hash_bytecode(&bytecode.original).as_bytes().to_vec(); + let empty_cell = [0_u8; 32]; + bytecode_hash.extend_from_slice(&empty_cell); + + let bytes_encoded = ethabi::encode(&[ + Token::Bytes(bytecode.original.clone()), + Token::Bytes(bytecode.compressed.clone()), + ]); + bytecode_hash.extend_from_slice(&bytes_encoded); + bytecode_hash +} + +#[cfg(test)] +mod tests { + use super::*; + + fn decompress_bytecode(raw_compressed_bytecode: &[u8]) -> Vec { + let mut decompressed: Vec = Vec::new(); + let mut dictionary: Vec = Vec::new(); + + let dictionary_len = u16::from_be_bytes(raw_compressed_bytecode[0..2].try_into().unwrap()); + for index in 0..dictionary_len { + let chunk = u64::from_be_bytes( + raw_compressed_bytecode[2 + index as usize * 8..10 + index as usize * 8] + .try_into() + .unwrap(), + ); + dictionary.push(chunk); + } + + let encoded_data = &raw_compressed_bytecode[2 + dictionary_len as usize * 8..]; + for index_bytes in encoded_data.chunks(2) { + let index = u16::from_be_bytes(index_bytes.try_into().unwrap()); + + let chunk = dictionary[index as usize]; + decompressed.extend(chunk.to_be_bytes()); + } + + decompressed + } + + #[test] + fn bytecode_compression() { + let example_code = hex::decode("000200000000000200010000000103550000006001100270000000150010019d0000000101200190000000080000c13d0000000001000019004e00160000040f0000000101000039004e00160000040f0000001504000041000000150510009c000000000104801900000040011002100000000001310019000000150320009c0000000002048019000000600220021000000000012100190000004f0001042e000000000100001900000050000104300000008002000039000000400020043f0000000002000416000000000110004c000000240000613d000000000120004c0000004d0000c13d000000200100003900000100001004430000012000000443000001000100003900000040020000390000001d03000041004e000a0000040f000000000120004c0000004d0000c13d0000000001000031000000030110008c0000004d0000a13d0000000101000367000000000101043b0000001601100197000000170110009c0000004d0000c13d0000000101000039000000000101041a0000000202000039000000000202041a000000400300043d00000040043000390000001805200197000000000600041a0000000000540435000000180110019700000020043000390000000000140435000000a0012002700000001901100197000000600430003900000000001404350000001a012001980000001b010000410000000001006019000000b8022002700000001c02200197000000000121019f0000008002300039000000000012043500000018016001970000000000130435000000400100043d0000000002130049000000a0022000390000000003000019004e000a0000040f004e00140000040f0000004e000004320000004f0001042e000000500001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000008903573000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000ffffff0000000000008000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000007fffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let compressed = compress_to_bytes(&example_code).unwrap(); + let decompressed = decompress_bytecode(&compressed); + + assert_eq!(example_code, decompressed); + } + + #[test] + fn bytecode_compression_statisticst() { + let example_code = + hex::decode("0000000000000000111111111111111111111111111111112222222222222222") + .unwrap(); + // The size of the dictionary should be `0x0003` + // The dictionary itself should put the most common chunk first, i.e. `0x1111111111111111` + // Then, the ordering does not matter, but the algorithm will return the one with the highest position, i.e. `0x2222222222222222` + let expected_encoding = + hex::decode("00031111111111111111222222222222222200000000000000000002000000000001") + .unwrap(); + + assert_eq!(expected_encoding, compress_to_bytes(&example_code).unwrap()); + } +} diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/multivm/src/utils/deduplicator.rs similarity index 98% rename from core/lib/types/src/storage_writes_deduplicator.rs rename to core/lib/multivm/src/utils/deduplicator.rs index f9f3cc323b9..e9a870e6901 100644 --- a/core/lib/types/src/storage_writes_deduplicator.rs +++ b/core/lib/multivm/src/utils/deduplicator.rs @@ -1,13 +1,12 @@ use std::collections::HashMap; -use zksync_basic_types::H256; -use zksync_utils::h256_to_u256; - -use crate::{ - tx::tx_execution_info::DeduplicatedWritesMetrics, +use zksync_types::{ writes::compression::compress_with_best_strategy, StorageKey, StorageLogKind, - StorageLogWithPreviousValue, + StorageLogWithPreviousValue, H256, }; +use zksync_utils::h256_to_u256; + +use crate::interface::DeduplicatedWritesMetrics; #[derive(Debug, Clone, Copy, PartialEq, Default)] pub struct ModifiedSlot { @@ -212,11 +211,10 @@ impl StorageWritesDeduplicator { #[cfg(test)] mod tests { - use zksync_basic_types::{AccountTreeId, U256}; + use zksync_types::{AccountTreeId, StorageLog, H160, U256}; use zksync_utils::u256_to_h256; use super::*; - use crate::{StorageLog, H160}; fn storage_log( key: U256, diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils/mod.rs similarity index 96% rename from core/lib/multivm/src/utils.rs rename to core/lib/multivm/src/utils/mod.rs index 4ea613252d0..602c2c4e0f7 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -4,8 +4,12 @@ use zksync_types::{ U256, }; +pub use self::deduplicator::{ModifiedSlot, StorageWritesDeduplicator}; use crate::interface::L1BatchEnv; +pub(crate) mod bytecode; +mod deduplicator; + /// Calculates the base fee and gas per pubdata for the given L1 gas price. pub fn derive_base_fee_and_gas_per_pubdata( batch_fee_input: BatchFeeInput, @@ -496,3 +500,21 @@ pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { } } } + +/// Holds information about number of cycles used per circuit type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub(crate) struct CircuitCycleStatistic { + pub main_vm_cycles: u32, + pub ram_permutation_cycles: u32, + pub storage_application_cycles: u32, + pub storage_sorter_cycles: u32, + pub code_decommitter_cycles: u32, + pub code_decommitter_sorter_cycles: u32, + pub log_demuxer_cycles: u32, + pub events_sorter_cycles: u32, + pub keccak256_cycles: u32, + pub ecrecover_cycles: u32, + pub sha256_cycles: u32, + pub secp256k1_verify_cycles: u32, + pub transient_storage_checker_cycles: u32, +} diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 8fe10f83367..6af546318af 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -5,14 +5,14 @@ use std::{ use anyhow::Context as _; use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_fast, }; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 3bf5ae25e39..f86beb2d400 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -5,21 +5,19 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::{ - bytecode::{hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, u256_to_h256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, + utils::bytecode, vm_1_3_2::{events::merge_events, VmInstance}, }; @@ -173,7 +171,7 @@ impl VmInterface for Vm { None } else { bytecode_hashes.push(bytecode_hash); - CompressedBytecodeInfo::from_original(bytecode.clone()).ok() + bytecode::compress(bytecode.clone()).ok() } }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index a2bc552e9ec..d9d0931e09b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -12,14 +12,13 @@ use zk_evm_1_3_3::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, vm_trace::Call, L1BatchNumber, VmEvent, H256, U256, }; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, TxExecutionStatus, VmExecutionLogs}, versions::shared::{VmExecutionTrace, VmTrace}, vm_1_3_2::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index aef5b1dc78a..d1acdf7708e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -19,14 +19,12 @@ use zksync_types::{ BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::{ - address_to_u256, - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, h256_to_u256, - misc::ceil_div, + address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, }; use crate::{ - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, CompressedBytecodeInfo, L1BatchEnv}, + utils::bytecode, vm_1_3_2::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -448,7 +446,7 @@ pub fn get_bootloader_memory( let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { - total_compressed_len_words += i.encode_call().len() / 32; + total_compressed_len_words += bytecode::encode_call(i).len() / 32; } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -521,20 +519,13 @@ pub fn push_raw_transaction_to_bootloader_memory = compressed_bytecodes .into_iter() - .flat_map(|x| x.encode_call()) + .flat_map(|x| bytecode::encode_call(&x)) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs index 9a1a727aab3..22d7b2814cf 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_1_4_1::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs index f82f34a7b0e..4c6b6d3d061 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_1_4_1::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_1_4_1::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index d203542b16b..393eb043cb7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_1_4_1::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs index cc03b53aa53..6e0e31d461d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_1_4_1::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index dfdd42be718..71ae20d4406 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_1_4_1::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs index b93eb88a21b..a32328bbc18 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_1::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs index 43a41897fdd..04842ab7bb6 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_1::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::CircuitCycleStatistic, vm_1_4_1::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -137,7 +137,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index e37a8757ee1..96f07e69d00 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -4,15 +4,15 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_1_4_1::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs index 059d2a93e27..e692c8a2640 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_1_4_2::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs index 2ec99c34ec3..f2c177ee684 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_1_4_2::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_1_4_2::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 0da7502186b..600ab83bf48 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_1_4_2::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs index a4bd40110f2..54e69289521 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_1_4_2::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 4d1675227fb..92a2eaa650c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_1_4_2::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs index 8cabd911cc6..974e0757721 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_2::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs index b781ee186fd..04b6e532b2b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_1::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::CircuitCycleStatistic, vm_1_4_2::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -138,7 +138,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index 434e8ea1c42..84eca786e02 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -4,15 +4,15 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_1_4_2::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs index db13d2aace5..8a605978a1e 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_boojum_integration::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs index 3030427281b..7ae8f9612cd 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_boojum_integration::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_boojum_integration::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 77a8ed2ce9b..1a1c620c2b2 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_boojum_integration::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs index 00ff620727b..b7e702b7a95 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_boojum_integration::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index fe5b8abd683..46f8bc2f400 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_boojum_integration::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs index fedbfd47c8e..a9e5e17e797 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs index 9bcf2a3783f..c92f261d9cb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_0::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_0::DynTracer, + utils::CircuitCycleStatistic, vm_boojum_integration::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -137,7 +137,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 1e9f73be598..c0bf918bd70 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -4,15 +4,15 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_boojum_integration::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index ae1c70db586..ce37636d2cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -2,7 +2,6 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{ l2_block::BootloaderL2Block, @@ -11,7 +10,7 @@ use super::{ BootloaderStateSnapshot, }; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, versions::vm_fast::{pubdata::PubdataInput, transaction_data::TransactionData}, vm_latest::{constants::TX_DESCRIPTION_OFFSET, utils::l2_blocks::assert_next_block}, }; diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs index 36c1d65ddd3..dc0706561d5 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::versions::vm_fast::transaction_data::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, versions::vm_fast::transaction_data::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index 21259e366d1..f280f56a828 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::{l2_block::BootloaderL2Block, tx::BootloaderTx}; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, versions::vm_fast::pubdata::PubdataInput, vm_latest::constants::{ BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, @@ -19,7 +20,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs index 3507b84840e..02122e5f29c 100644 --- a/core/lib/multivm/src/versions/vm_fast/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -1,12 +1,12 @@ use itertools::Itertools; use zksync_types::H256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use super::Vm; -use crate::interface::storage::ReadStorage; +use crate::{ + interface::{storage::ReadStorage, CompressedBytecodeInfo}, + utils::bytecode, +}; impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. @@ -38,15 +38,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !is_bytecode_known(hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 01fc8dc07d0..9c39952a03a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,8 +1,8 @@ use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::bytecode, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::read_test_contract, @@ -22,7 +22,7 @@ fn test_bytecode_publishing() { let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let compressed_bytecode = compress_bytecode(&counter).unwrap(); + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); vm.vm.push_transaction(tx); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 033a7b2658f..f1411497c24 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -4,13 +4,13 @@ use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::StorageWritesDeduplicator, vm_fast::{ tests::{ tester::{TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 56d98a537bf..bcd28e22253 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -34,11 +34,11 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, + storage::ReadStorage, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, + Refunds, SystemEnv, TxRevertReason, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, VmRevertReason, }, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, @@ -533,9 +533,7 @@ impl VmInterface for Vm { self.bootloader_state.bootloader_memory() } - fn get_last_tx_compressed_bytecodes( - &self, - ) -> Vec { + fn get_last_tx_compressed_bytecodes(&self) -> Vec { self.bootloader_state.get_last_tx_compressed_bytecodes() } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index a3f59937d57..f15199a74f8 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs index 8f14976be34..2c63db7e435 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_latest::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_latest::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index db4c834fbc7..4931082d6da 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs index 30a428bb834..d0a41ce69f4 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_latest::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index ed61962648a..34c1e1f81da 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_latest::vm::Vm, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index a0c10addff9..93d99a6a0d4 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,8 +1,8 @@ use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::bytecode, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, @@ -25,7 +25,7 @@ fn test_bytecode_publishing() { let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let compressed_bytecode = compress_bytecode(&counter).unwrap(); + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); vm.vm.push_transaction(tx); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 6b3be989fb3..4d42bb96cc9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -5,13 +5,13 @@ use zksync_test_account::Account; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, K256PrivateKey, U256, }; use zksync_utils::u256_to_h256; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::StorageWritesDeduplicator, vm_latest::{ tests::{ tester::{TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs index a570d3bd99b..0977a323d19 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs index b3a0e2480dc..6a47f3ae2fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_5_0::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_5_0::DynTracer, + utils::CircuitCycleStatistic, vm_latest::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -162,7 +162,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a5e7d8ef8be..26f8a91f2d3 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -5,15 +5,15 @@ use zksync_types::{ vm::VmVersion, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_latest::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index a0d6ea39cea..8f232c95b38 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -6,15 +6,15 @@ use zksync_types::{ vm::VmVersion, Transaction, }; -use zksync_utils::{bytecode::CompressedBytecodeInfo, h256_to_u256, u256_to_h256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ events::merge_events, diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index b97b5e047c6..2a63a91ccaf 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -11,13 +11,12 @@ use zk_evm_1_3_1::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, L1BatchNumber, VmEvent, U256, }; use crate::{ glue::GlueInto, - interface::VmExecutionLogs, + interface::{TxExecutionStatus, VmExecutionLogs}, versions::shared::VmExecutionTrace, vm_m5::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 3626378ce59..b59561319f5 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -8,20 +8,18 @@ use zksync_types::{ vm::VmVersion, Transaction, }; -use zksync_utils::{ - bytecode::{hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, u256_to_h256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, + utils::bytecode, vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, }; @@ -216,7 +214,7 @@ impl VmInterface for Vm { None } else { bytecode_hashes.push(bytecode_hash); - CompressedBytecodeInfo::from_original(bytecode.clone()).ok() + bytecode::compress(bytecode.clone()).ok() } }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index 5d6a9bf9149..a5f0dd25811 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -11,14 +11,13 @@ use zk_evm_1_3_1::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, vm_trace::Call, L1BatchNumber, VmEvent, H256, U256, }; use crate::{ glue::GlueInto, - interface::VmExecutionLogs, + interface::{TxExecutionStatus, VmExecutionLogs}, versions::shared::{VmExecutionTrace, VmTrace}, vm_m6::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 4409a7a8958..7a9fbb73fe4 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -18,14 +18,12 @@ use zksync_types::{ L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::{ - address_to_u256, - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, h256_to_u256, - misc::ceil_div, + address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, }; use crate::{ - interface::L1BatchEnv, + interface::{CompressedBytecodeInfo, L1BatchEnv}, + utils::bytecode, vm_m6::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -482,7 +480,7 @@ fn get_bootloader_memory_v1( let mut total_compressed_len = 0; for i in compressed_bytecodes.iter() { - total_compressed_len += i.encode_call().len() + total_compressed_len += bytecode::encode_call(i).len() } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -527,7 +525,7 @@ fn get_bootloader_memory_v2( let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { - total_compressed_len_words += i.encode_call().len() / 32; + total_compressed_len_words += bytecode::encode_call(i).len() / 32; } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -624,13 +622,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { return None; } - - compress_bytecode(bytecode) - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: bytecode.clone(), - compressed, - }) + bytecode::compress(bytecode.clone()).ok() }) .collect() }); @@ -701,20 +693,14 @@ fn push_raw_transaction_to_bootloader_memory_v2( if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { return None; } - - compress_bytecode(bytecode) - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: bytecode.clone(), - compressed, - }) + bytecode::compress(bytecode.clone()).ok() }) .collect() }); let compressed_bytecodes_encoding_len_words = compressed_bytecodes .iter() .map(|bytecode| { - let encoding_length_bytes = bytecode.encode_call().len(); + let encoding_length_bytes = bytecode::encode_call(bytecode).len(); assert!( encoding_length_bytes % 32 == 0, "ABI encoding of bytecode is not 32-byte aligned" @@ -830,7 +816,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( let memory_addition: Vec<_> = compressed_bytecodes .into_iter() - .flat_map(|x| x.encode_call()) + .flat_map(|x| bytecode::encode_call(&x)) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index d436a2adb0a..12aab3c7364 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -1,11 +1,10 @@ use std::cmp::Ordering; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_refunds_enhancement::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs index e7f833e5bad..b4581d066d1 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_refunds_enhancement::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_refunds_enhancement::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index f47b95d6cbf..7bd488f90a9 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::U256; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_refunds_enhancement::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -20,7 +21,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs index b3f578302c0..2289cca7a47 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_refunds_enhancement::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 59938788466..821a8144249 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,14 +1,13 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 685b1821fd5..562d7451371 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -1,11 +1,10 @@ use std::cmp::Ordering; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_virtual_blocks::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs index 067d62a9fdd..e37320cf5ac 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_virtual_blocks::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_virtual_blocks::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 9a682da3a5a..2ccedcc6aa9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::U256; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_virtual_blocks::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -20,7 +21,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs index 7c1b15027b4..96a30d50805 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_virtual_blocks::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 9d234ec117a..8991ee1b4b9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,14 +1,13 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 0cc8916a104..0e4cefd3c80 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,13 +1,13 @@ use zksync_types::vm::{FastVmMode, VmVersion}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, versions::shadow::ShadowVm, diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index c546fb60c09..2c9b1440af2 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -181,6 +181,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), } } diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory.rs index d3b978356a5..4792200a463 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory.rs @@ -10,11 +10,12 @@ use zksync_vm_interface::storage::ReadStorage; use crate::{PostgresStorage, RocksdbStorage, RocksdbStorageBuilder, StateKeeperColumnFamily}; -/// Factory that can produce [`OwnedStorage`] instances on demand. +/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param +/// (mostly for testing purposes); the default is [`OwnedStorage`]. #[async_trait] -pub trait ReadStorageFactory: Debug + Send + Sync + 'static { - /// Creates an [`OwnedStorage`] entity over either a Postgres connection or RocksDB - /// instance. The specific criteria on which one are left up to the implementation. +pub trait ReadStorageFactory: Debug + Send + Sync + 'static { + /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. + /// The specific criteria on which one are left up to the implementation. /// /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives /// a stop signal; this is the only case in which `Ok(None)` should be returned. @@ -22,7 +23,7 @@ pub trait ReadStorageFactory: Debug + Send + Sync + 'static { &self, stop_receiver: &watch::Receiver, l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } /// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 1d1731bf001..decb2a0f403 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -87,6 +87,7 @@ pub(crate) async fn create_l2_block( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 0210a28f2a2..102a31438bb 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -5,7 +5,7 @@ use strum::Display; use zksync_basic_types::{ tee_types::TeeType, web3::{AccessList, Bytes, Index}, - L1BatchNumber, H160, H2048, H256, H64, U256, U64, + Bloom, L1BatchNumber, H160, H256, H64, U256, U64, }; use zksync_contracts::BaseSystemContractsHashes; @@ -259,7 +259,7 @@ pub struct TransactionReceipt { pub root: H256, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub transaction_type: Option, @@ -311,7 +311,7 @@ pub struct Block { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Timestamp pub timestamp: U256, /// Timestamp of the l1 batch this L2 block was included within @@ -355,7 +355,7 @@ impl Default for Block { gas_limit: U256::default(), base_fee_per_gas: U256::default(), extra_data: Bytes::default(), - logs_bloom: H2048::default(), + logs_bloom: Bloom::default(), timestamp: U256::default(), l1_batch_timestamp: None, difficulty: U256::default(), diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index bc13bed457b..9c1609bf175 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, H2048, H256, U256}; +use zksync_basic_types::{Address, Bloom, BloomInput, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -56,7 +56,7 @@ pub struct L1BatchHeader { /// Preimages of the hashes that were sent as value of L2 logs by special system L2 contract. pub l2_to_l1_messages: Vec>, /// Bloom filter for the event logs in the block. - pub bloom: H2048, + pub bloom: Bloom, /// Hashes of contracts used this block pub used_contract_hashes: Vec, pub base_system_contracts_hashes: BaseSystemContractsHashes, @@ -90,6 +90,7 @@ pub struct L2BlockHeader { /// Note, that it is an `u64`, i.e. while the computational limit for the bootloader is an `u32` a much larger /// amount of gas can be spent on pubdata. pub gas_limit: u64, + pub logs_bloom: Bloom, } /// Structure that represents the data is returned by the storage oracle during batch execution. @@ -125,7 +126,7 @@ impl L1BatchHeader { priority_ops_onchain_data: vec![], l2_to_l1_logs: vec![], l2_to_l1_messages: vec![], - bloom: H2048::default(), + bloom: Bloom::default(), used_contract_hashes: vec![], base_system_contracts_hashes, system_logs: vec![], @@ -294,8 +295,19 @@ pub struct L1BatchTreeData { pub rollup_last_leaf_index: u64, } +pub fn build_bloom<'a, I: IntoIterator>>(items: I) -> Bloom { + let mut bloom = Bloom::zero(); + for item in items { + bloom.accrue(item); + } + + bloom +} + #[cfg(test)] mod tests { + use std::{iter, str::FromStr}; + use super::*; #[test] @@ -345,4 +357,76 @@ mod tests { assert_eq!(block_number, unpacked_block_number); assert_eq!(block_timestamp, unpacked_block_timestamp); } + + #[test] + fn test_build_bloom() { + let logs = [ + ( + Address::from_str("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0").unwrap(), + vec![ + H256::from_str( + "0x3452f51d00000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000d0a6e6c54dbc68db5db3a091b171a77407ff7ccf", + ) + .unwrap(), + H256::from_str( + "0x0000000000000000000000000f5e378a82a55f24e88317a8fb7cd2ed8bd3873f", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000000000000000000000000004f0e6ade1e67bb719", + ) + .unwrap(), + ], + ), + ( + Address::from_str("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0").unwrap(), + vec![ + H256::from_str( + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000d0a6e6c54dbc68db5db3a091b171a77407ff7ccf", + ) + .unwrap(), + H256::from_str( + "0x0000000000000000000000000f5e378a82a55f24e88317a8fb7cd2ed8bd3873f", + ) + .unwrap(), + ], + ), + ( + Address::from_str("0xd0a6E6C54DbC68Db5db3A091B171A77407Ff7ccf").unwrap(), + vec![H256::from_str( + "0x51223fdc0a25891366fb358b4af9fe3c381b1566e287c61a29d01c8a173fe4f4", + ) + .unwrap()], + ), + ]; + let iter = logs.iter().flat_map(|log| { + log.1 + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain(iter::once(BloomInput::Raw(log.0.as_bytes()))) + }); + + let bloom = build_bloom(iter); + let expected = Bloom::from_str( + "0000000004000000000000000100000000000000000000000000000000000000\ + 0000000000000000000040000000000000000000000000000000000000000200\ + 0000000000020000400000180000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000080000000000201000000000\ + 2000000000000000400000000000080000008000000000000000000000000000\ + 0000000000000000000000000004000000000001000000000000804000000000\ + 0000000200000000000000000000000400000000000000000000000800200000\ + 0000000000000010000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + assert_eq!(bloom, expected); + } } diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs deleted file mode 100644 index 2aeb226e165..00000000000 --- a/core/lib/types/src/circuit.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::ops::Add; - -use serde::{Deserialize, Serialize}; - -/// Holds information about number of cycles used per circuit type. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub struct CircuitCycleStatistic { - pub main_vm_cycles: u32, - pub ram_permutation_cycles: u32, - pub storage_application_cycles: u32, - pub storage_sorter_cycles: u32, - pub code_decommitter_cycles: u32, - pub code_decommitter_sorter_cycles: u32, - pub log_demuxer_cycles: u32, - pub events_sorter_cycles: u32, - pub keccak256_cycles: u32, - pub ecrecover_cycles: u32, - pub sha256_cycles: u32, - pub secp256k1_verify_cycles: u32, - pub transient_storage_checker_cycles: u32, -} - -impl CircuitCycleStatistic { - pub fn new() -> Self { - Self::default() - } -} - -/// Holds information about number of circuits used per circuit type. -#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] -pub struct CircuitStatistic { - pub main_vm: f32, - pub ram_permutation: f32, - pub storage_application: f32, - pub storage_sorter: f32, - pub code_decommitter: f32, - pub code_decommitter_sorter: f32, - pub log_demuxer: f32, - pub events_sorter: f32, - pub keccak256: f32, - pub ecrecover: f32, - pub sha256: f32, - #[serde(default)] - pub secp256k1_verify: f32, - #[serde(default)] - pub transient_storage_checker: f32, -} - -impl CircuitStatistic { - /// Rounds up numbers and adds them. - pub fn total(&self) -> usize { - self.main_vm.ceil() as usize - + self.ram_permutation.ceil() as usize - + self.storage_application.ceil() as usize - + self.storage_sorter.ceil() as usize - + self.code_decommitter.ceil() as usize - + self.code_decommitter_sorter.ceil() as usize - + self.log_demuxer.ceil() as usize - + self.events_sorter.ceil() as usize - + self.keccak256.ceil() as usize - + self.ecrecover.ceil() as usize - + self.sha256.ceil() as usize - + self.secp256k1_verify.ceil() as usize - + self.transient_storage_checker.ceil() as usize - } - - /// Adds numbers. - pub fn total_f32(&self) -> f32 { - self.main_vm - + self.ram_permutation - + self.storage_application - + self.storage_sorter - + self.code_decommitter - + self.code_decommitter_sorter - + self.log_demuxer - + self.events_sorter - + self.keccak256 - + self.ecrecover - + self.sha256 - + self.secp256k1_verify - + self.transient_storage_checker - } -} - -impl Add for CircuitStatistic { - type Output = CircuitStatistic; - - fn add(self, other: CircuitStatistic) -> CircuitStatistic { - CircuitStatistic { - main_vm: self.main_vm + other.main_vm, - ram_permutation: self.ram_permutation + other.ram_permutation, - storage_application: self.storage_application + other.storage_application, - storage_sorter: self.storage_sorter + other.storage_sorter, - code_decommitter: self.code_decommitter + other.code_decommitter, - code_decommitter_sorter: self.code_decommitter_sorter + other.code_decommitter_sorter, - log_demuxer: self.log_demuxer + other.log_demuxer, - events_sorter: self.events_sorter + other.events_sorter, - keccak256: self.keccak256 + other.keccak256, - ecrecover: self.ecrecover + other.ecrecover, - sha256: self.sha256 + other.sha256, - secp256k1_verify: self.secp256k1_verify + other.secp256k1_verify, - transient_storage_checker: self.transient_storage_checker - + other.transient_storage_checker, - } - } -} diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 524015cdd09..9dc2cda9e62 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -1,57 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_utils::ceil_div; -use crate::{circuit::CircuitStatistic, U256}; - -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", tag = "result")] -pub struct TransactionExecutionMetrics { - pub initial_storage_writes: usize, - pub repeated_storage_writes: usize, - pub gas_used: usize, - pub gas_remaining: u32, - pub event_topics: u16, - pub published_bytecode_bytes: usize, - pub l2_l1_long_messages: usize, - pub l2_l1_logs: usize, - pub contracts_used: usize, - pub contracts_deployed: u16, - pub vm_events: usize, - pub storage_logs: usize, - // it's the sum of storage logs, vm events, l2->l1 logs, - // and the number of precompile calls - pub total_log_queries: usize, - pub cycles_used: u32, - pub computational_gas_used: u32, - pub total_updated_values_size: usize, - pub pubdata_published: u32, - pub circuit_statistic: CircuitStatistic, -} - -impl Default for TransactionExecutionMetrics { - fn default() -> Self { - Self { - initial_storage_writes: 0, - repeated_storage_writes: 0, - gas_used: 0, - gas_remaining: u32::MAX, - event_topics: 0, - published_bytecode_bytes: 0, - l2_l1_long_messages: 0, - l2_l1_logs: 0, - contracts_used: 0, - contracts_deployed: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, - total_updated_values_size: 0, - pubdata_published: 0, - circuit_statistic: Default::default(), - } - } -} +use crate::U256; #[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Fee { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index a55f6b5753d..9e24d7156f9 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -34,7 +34,6 @@ pub mod abi; pub mod aggregated_operations; pub mod blob; pub mod block; -pub mod circuit; pub mod commitment; pub mod contract_verification_api; pub mod debug_flat_call; @@ -49,7 +48,6 @@ pub mod protocol_upgrade; pub mod pubdata_da; pub mod snapshots; pub mod storage; -pub mod storage_writes_deduplicator; pub mod system_contracts; pub mod tokens; pub mod tx; diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index 7078f4ee3fe..ed6e61184c4 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -4,50 +4,13 @@ //! it makes more sense to define the contents of each transaction chain-agnostic, and extent this data //! with metadata (such as fees and/or signatures) for L1 and L2 separately. -use std::fmt::Debug; - use zksync_basic_types::{Address, H256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use self::tx_execution_info::TxExecutionStatus; -pub use self::{execute::Execute, tx_execution_info::ExecutionMetrics}; -use crate::{vm_trace::Call, Transaction}; +pub use self::execute::Execute; pub mod execute; -pub mod tx_execution_info; pub use zksync_crypto_primitives as primitives; -#[derive(Debug, Clone, PartialEq)] -pub struct TransactionExecutionResult { - pub transaction: Transaction, - pub hash: H256, - pub execution_info: ExecutionMetrics, - pub execution_status: TxExecutionStatus, - pub refunded_gas: u64, - pub operator_suggested_refund: u64, - pub compressed_bytecodes: Vec, - pub call_traces: Vec, - pub revert_reason: Option, -} - -impl TransactionExecutionResult { - pub fn call_trace(&self) -> Option { - if self.call_traces.is_empty() { - None - } else { - Some(Call::new_high_level( - self.transaction.gas_limit().as_u64(), - self.transaction.gas_limit().as_u64() - self.refunded_gas, - self.transaction.execute.value, - self.transaction.execute.calldata.clone(), - vec![], - self.revert_reason.clone(), - self.call_traces.clone(), - )) - } - } -} - #[derive(Debug, Clone, Copy)] pub struct IncludedTxLocation { pub tx_hash: H256, diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs deleted file mode 100644 index 7b2b0dbd27e..00000000000 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::ops::{Add, AddAssign}; - -use crate::{ - circuit::CircuitStatistic, - commitment::SerializeCommitment, - fee::TransactionExecutionMetrics, - l2_to_l1_log::L2ToL1Log, - writes::{ - InitialStorageWrite, RepeatedStorageWrite, BYTES_PER_DERIVED_KEY, - BYTES_PER_ENUMERATION_INDEX, - }, - ProtocolVersionId, -}; - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum TxExecutionStatus { - Success, - Failure, -} - -impl TxExecutionStatus { - pub fn from_has_failed(has_failed: bool) -> Self { - if has_failed { - Self::Failure - } else { - Self::Success - } - } -} - -#[derive(Debug, Default, Clone, Copy, PartialEq)] -pub struct DeduplicatedWritesMetrics { - pub initial_storage_writes: usize, - pub repeated_storage_writes: usize, - pub total_updated_values_size: usize, -} - -impl DeduplicatedWritesMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - initial_storage_writes: tx_metrics.initial_storage_writes, - repeated_storage_writes: tx_metrics.repeated_storage_writes, - total_updated_values_size: tx_metrics.total_updated_values_size, - } - } - - pub fn size(&self, protocol_version: ProtocolVersionId) -> usize { - if protocol_version.is_pre_boojum() { - self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE - + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE - } else { - self.total_updated_values_size - + (BYTES_PER_DERIVED_KEY as usize) * self.initial_storage_writes - + (BYTES_PER_ENUMERATION_INDEX as usize) * self.repeated_storage_writes - } - } -} - -#[derive(Debug, Clone, Copy, Default, PartialEq, serde::Serialize)] -pub struct ExecutionMetrics { - pub gas_used: usize, - pub published_bytecode_bytes: usize, - pub l2_l1_long_messages: usize, - pub l2_to_l1_logs: usize, - pub contracts_used: usize, - pub contracts_deployed: u16, - pub vm_events: usize, - pub storage_logs: usize, - pub total_log_queries: usize, - pub cycles_used: u32, - pub computational_gas_used: u32, - pub pubdata_published: u32, - pub circuit_statistic: CircuitStatistic, -} - -impl ExecutionMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - published_bytecode_bytes: tx_metrics.published_bytecode_bytes, - l2_l1_long_messages: tx_metrics.l2_l1_long_messages, - l2_to_l1_logs: tx_metrics.l2_l1_logs, - contracts_deployed: tx_metrics.contracts_deployed, - contracts_used: tx_metrics.contracts_used, - gas_used: tx_metrics.gas_used, - storage_logs: tx_metrics.storage_logs, - vm_events: tx_metrics.vm_events, - total_log_queries: tx_metrics.total_log_queries, - cycles_used: tx_metrics.cycles_used, - computational_gas_used: tx_metrics.computational_gas_used, - pubdata_published: tx_metrics.pubdata_published, - circuit_statistic: tx_metrics.circuit_statistic, - } - } - - pub fn size(&self) -> usize { - self.l2_to_l1_logs * L2ToL1Log::SERIALIZED_SIZE - + self.l2_l1_long_messages - + self.published_bytecode_bytes - // TODO(PLA-648): refactor this constant - // It represents the need to store the length's of messages as well as bytecodes. - // It works due to the fact that each bytecode/L2->L1 long message is accompanied by a corresponding - // user L2->L1 log. - + self.l2_to_l1_logs * 4 - } -} - -impl Add for ExecutionMetrics { - type Output = ExecutionMetrics; - - fn add(self, other: ExecutionMetrics) -> ExecutionMetrics { - ExecutionMetrics { - published_bytecode_bytes: self.published_bytecode_bytes - + other.published_bytecode_bytes, - contracts_deployed: self.contracts_deployed + other.contracts_deployed, - contracts_used: self.contracts_used + other.contracts_used, - l2_l1_long_messages: self.l2_l1_long_messages + other.l2_l1_long_messages, - l2_to_l1_logs: self.l2_to_l1_logs + other.l2_to_l1_logs, - gas_used: self.gas_used + other.gas_used, - vm_events: self.vm_events + other.vm_events, - storage_logs: self.storage_logs + other.storage_logs, - total_log_queries: self.total_log_queries + other.total_log_queries, - cycles_used: self.cycles_used + other.cycles_used, - computational_gas_used: self.computational_gas_used + other.computational_gas_used, - pubdata_published: self.pubdata_published + other.pubdata_published, - circuit_statistic: self.circuit_statistic + other.circuit_statistic, - } - } -} - -impl AddAssign for ExecutionMetrics { - fn add_assign(&mut self, other: Self) { - *self = *self + other; - } -} diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index f9554c6f72b..48bdb433020 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,10 +1,6 @@ -use std::{collections::HashMap, convert::TryInto}; +// FIXME: move to basic_types? -use itertools::Itertools; -use zksync_basic_types::{ - ethabi::{encode, Token}, - H256, -}; +use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -21,117 +17,6 @@ pub enum InvalidBytecodeError { BytecodeLengthIsNotDivisibleBy32, } -#[derive(Debug, thiserror::Error)] -pub enum FailedToCompressBytecodeError { - #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] - DictionaryOverflow, - #[error("Bytecode is invalid: {0}")] - InvalidBytecode(#[from] InvalidBytecodeError), -} - -/// Implements, a simple compression algorithm for the bytecode. -pub fn compress_bytecode(code: &[u8]) -> Result, FailedToCompressBytecodeError> { - validate_bytecode(code)?; - - // Statistic is a hash map of values (number of occurrences, first occurrence position), - // this is needed to ensure that the determinism during sorting of the statistic, i.e. - // each element will have unique first occurrence position - let mut statistic: HashMap = HashMap::new(); - let mut dictionary: HashMap = HashMap::new(); - let mut encoded_data: Vec = Vec::new(); - - // Split original bytecode into 8-byte chunks. - for (position, chunk_bytes) in code.chunks(8).enumerate() { - // It is safe to unwrap here, because each chunk is exactly 8 bytes, since - // valid bytecodes are divisible by 8. - let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); - - // Count the number of occurrences of each chunk. - statistic.entry(chunk).or_insert((0, position)).0 += 1; - } - - let mut statistic_sorted_by_value: Vec<_> = statistic.into_iter().collect::>(); - statistic_sorted_by_value.sort_by_key(|x| x.1); - - // The dictionary size is limited by 2^16 - 1, - if statistic_sorted_by_value.len() > u16::MAX.into() { - return Err(FailedToCompressBytecodeError::DictionaryOverflow); - } - - // Fill the dictionary with the most popular chunks. - // The most popular chunks will be encoded with the smallest indexes, so that - // the 255 most popular chunks will be encoded with one zero byte. - // And the encoded data will be filled with more zeros, so - // the calldata that will be sent to L1 will be cheaper. - for (chunk, _) in statistic_sorted_by_value.iter().rev() { - dictionary.insert(*chunk, dictionary.len() as u16); - } - - for chunk_bytes in code.chunks(8) { - // It is safe to unwrap here, because each chunk is exactly 8 bytes, since - // valid bytecodes are divisible by 8. - let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); - - // Add the index of the chunk to the encoded data. - encoded_data.extend(dictionary.get(&chunk).unwrap().to_be_bytes()); - } - - // Prepare the raw compressed bytecode in the following format: - // - 2 bytes: the length of the dictionary (N) - // - N bytes: packed dictionary bytes - // - remaining bytes: packed encoded data bytes - - let mut compressed: Vec = Vec::new(); - compressed.extend((dictionary.len() as u16).to_be_bytes()); - - dictionary - .into_iter() - .map(|(k, v)| (v, k)) - .sorted() - .for_each(|(_, chunk)| { - compressed.extend(chunk.to_be_bytes()); - }); - - compressed.extend(encoded_data); - - Ok(compressed) -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CompressedBytecodeInfo { - pub original: Vec, - pub compressed: Vec, -} - -impl CompressedBytecodeInfo { - pub fn from_original(bytecode: Vec) -> Result { - let compressed = compress_bytecode(&bytecode)?; - - let result = Self { - original: bytecode, - compressed, - }; - - Ok(result) - } - - pub fn encode_call(&self) -> Vec { - let bytecode_hash = hash_bytecode(&self.original).as_bytes().to_vec(); - let empty_cell = vec![0u8; 32]; - - let bytes_encoded = encode(&[ - Token::Bytes(self.original.clone()), - Token::Bytes(self.compressed.clone()), - ]); - - bytecode_hash - .into_iter() - .chain(empty_cell) - .chain(bytes_encoded) - .collect() - } -} - pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { let bytecode_len = code.len(); @@ -170,57 +55,3 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } - -#[cfg(test)] -mod test { - use super::*; - - fn decompress_bytecode(raw_compressed_bytecode: &[u8]) -> Vec { - let mut decompressed: Vec = Vec::new(); - let mut dictionary: Vec = Vec::new(); - - let dictionary_len = u16::from_be_bytes(raw_compressed_bytecode[0..2].try_into().unwrap()); - for index in 0..dictionary_len { - let chunk = u64::from_be_bytes( - raw_compressed_bytecode[2 + index as usize * 8..10 + index as usize * 8] - .try_into() - .unwrap(), - ); - dictionary.push(chunk); - } - - let encoded_data = &raw_compressed_bytecode[2 + dictionary_len as usize * 8..]; - for index_bytes in encoded_data.chunks(2) { - let index = u16::from_be_bytes(index_bytes.try_into().unwrap()); - - let chunk = dictionary[index as usize]; - decompressed.extend(chunk.to_be_bytes()); - } - - decompressed - } - - #[test] - fn bytecode_compression_test() { - let example_code = hex::decode("000200000000000200010000000103550000006001100270000000150010019d0000000101200190000000080000c13d0000000001000019004e00160000040f0000000101000039004e00160000040f0000001504000041000000150510009c000000000104801900000040011002100000000001310019000000150320009c0000000002048019000000600220021000000000012100190000004f0001042e000000000100001900000050000104300000008002000039000000400020043f0000000002000416000000000110004c000000240000613d000000000120004c0000004d0000c13d000000200100003900000100001004430000012000000443000001000100003900000040020000390000001d03000041004e000a0000040f000000000120004c0000004d0000c13d0000000001000031000000030110008c0000004d0000a13d0000000101000367000000000101043b0000001601100197000000170110009c0000004d0000c13d0000000101000039000000000101041a0000000202000039000000000202041a000000400300043d00000040043000390000001805200197000000000600041a0000000000540435000000180110019700000020043000390000000000140435000000a0012002700000001901100197000000600430003900000000001404350000001a012001980000001b010000410000000001006019000000b8022002700000001c02200197000000000121019f0000008002300039000000000012043500000018016001970000000000130435000000400100043d0000000002130049000000a0022000390000000003000019004e000a0000040f004e00140000040f0000004e000004320000004f0001042e000000500001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000008903573000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000ffffff0000000000008000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000007fffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let compressed = compress_bytecode(&example_code).unwrap(); - let decompressed = decompress_bytecode(&compressed); - - assert_eq!(example_code, decompressed); - } - - #[test] - fn bytecode_compression_statistics_test() { - let example_code = - hex::decode("0000000000000000111111111111111111111111111111112222222222222222") - .unwrap(); - // The size of the dictionary should be `0x0003` - // The dictionary itself should put the most common chunk first, i.e. `0x1111111111111111` - // Then, the ordering does not matter, but the algorithm will return the one with the highest position, i.e. `0x2222222222222222` - let expected_encoding = - hex::decode("00031111111111111111222222222222222200000000000000000002000000000001") - .unwrap(); - - assert_eq!(expected_encoding, compress_bytecode(&example_code).unwrap()); - } -} diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 75362d7da3f..1d4efe06634 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true hex.workspace = true serde.workspace = true diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 1837bec4aff..3934709822d 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -1,4 +1,21 @@ //! ZKsync Era VM interfaces. +//! +//! # Developer guidelines +//! +//! Which types should be put in this crate and which ones in `zksync_multivm` or other downstream crates? +//! +//! - This crate should contain logic not tied to a particular VM version; in contrast, most logic in `zksync_multivm` +//! is version-specific. +//! - This crate should not have heavyweight dependencies (like VM implementations). Anything heavier than `serde` is discouraged. +//! In contrast, `zksync_multivm` depends on old VM versions. +//! - If a type belongs in this crate, still be thorough about its methods. VM implementation details belong to `zksync_multivm` +//! and should be implemented as functions / extension traits there, rather than as methods here. +//! +//! Which types should be put in this crate vs `zksync_types`? +//! +//! - In this case, we want to separate types by domain. If a certain type clearly belongs to the VM domain +//! (e.g., can only be produced by VM execution), it probably belongs here. In contrast, if a type is more general / fundamental, +//! it may belong to `zksync_types`. pub use crate::{ types::{ @@ -8,8 +25,10 @@ pub use crate::{ }, inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, outputs::{ - BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, - Refunds, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + BootloaderMemory, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, + DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, L2Block, Refunds, + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, diff --git a/core/lib/vm_interface/src/types/outputs/bytecode.rs b/core/lib/vm_interface/src/types/outputs/bytecode.rs new file mode 100644 index 00000000000..100acb3d3d2 --- /dev/null +++ b/core/lib/vm_interface/src/types/outputs/bytecode.rs @@ -0,0 +1,5 @@ +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CompressedBytecodeInfo { + pub original: Vec, + pub compressed: Vec, +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 1037cc1d6e8..da96a3e15f8 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -2,11 +2,13 @@ use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_types::{ event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - tx::ExecutionMetrics, + vm_trace::Call, StorageLogWithPreviousValue, Transaction, VmEvent, H256, }; -use crate::{Halt, VmExecutionStatistics, VmRevertReason}; +use crate::{ + CompressedBytecodeInfo, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, +}; pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { usize::from(u16::from_be_bytes([bytecodehash[2], bytecodehash[3]])) * 32 @@ -65,7 +67,7 @@ impl ExecutionResult { } impl VmExecutionResultAndLogs { - pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> ExecutionMetrics { + pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> VmExecutionMetrics { let contracts_deployed = tx .map(|tx| tx.execute.factory_deps.len() as u16) .unwrap_or(0); @@ -86,7 +88,7 @@ impl VmExecutionResultAndLogs { }) .sum(); - ExecutionMetrics { + VmExecutionMetrics { gas_used: self.statistics.gas_used as usize, published_bytecode_bytes, l2_l1_long_messages, @@ -103,3 +105,50 @@ impl VmExecutionResultAndLogs { } } } + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum TxExecutionStatus { + Success, + Failure, +} + +impl TxExecutionStatus { + pub fn from_has_failed(has_failed: bool) -> Self { + if has_failed { + Self::Failure + } else { + Self::Success + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct TransactionExecutionResult { + pub transaction: Transaction, + pub hash: H256, + pub execution_info: VmExecutionMetrics, + pub execution_status: TxExecutionStatus, + pub refunded_gas: u64, + pub operator_suggested_refund: u64, + pub compressed_bytecodes: Vec, + pub call_traces: Vec, + pub revert_reason: Option, +} + +impl TransactionExecutionResult { + pub fn call_trace(&self) -> Option { + if self.call_traces.is_empty() { + None + } else { + Some(Call::new_high_level( + self.transaction.gas_limit().as_u64(), + self.transaction.gas_limit().as_u64() - self.refunded_gas, + self.transaction.execute.value, + self.transaction.execute.calldata.clone(), + vec![], + self.revert_reason.clone(), + self.call_traces.clone(), + )) + } + } +} diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index eec19826e0b..88b96aaafff 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,11 +1,19 @@ pub use self::{ - execution_result::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionResultAndLogs}, + bytecode::CompressedBytecodeInfo, + execution_result::{ + ExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, VmExecutionLogs, + VmExecutionResultAndLogs, + }, execution_state::{BootloaderMemory, CurrentExecutionState}, finished_l1batch::FinishedL1Batch, l2_block::L2Block, - statistic::{VmExecutionStatistics, VmMemoryMetrics}, + statistic::{ + CircuitStatistic, DeduplicatedWritesMetrics, TransactionExecutionMetrics, + VmExecutionMetrics, VmExecutionStatistics, VmMemoryMetrics, + }, }; +mod bytecode; mod execution_result; mod execution_state; mod finished_l1batch; diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index fb99ba7e36b..095547076d4 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -1,4 +1,94 @@ -use zksync_types::circuit::CircuitStatistic; +use std::ops; + +use serde::{Deserialize, Serialize}; +use zksync_types::{ + commitment::SerializeCommitment, + l2_to_l1_log::L2ToL1Log, + writes::{ + InitialStorageWrite, RepeatedStorageWrite, BYTES_PER_DERIVED_KEY, + BYTES_PER_ENUMERATION_INDEX, + }, + ProtocolVersionId, +}; + +/// Holds information about number of circuits used per circuit type. +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] +pub struct CircuitStatistic { + pub main_vm: f32, + pub ram_permutation: f32, + pub storage_application: f32, + pub storage_sorter: f32, + pub code_decommitter: f32, + pub code_decommitter_sorter: f32, + pub log_demuxer: f32, + pub events_sorter: f32, + pub keccak256: f32, + pub ecrecover: f32, + pub sha256: f32, + #[serde(default)] + pub secp256k1_verify: f32, + #[serde(default)] + pub transient_storage_checker: f32, +} + +impl CircuitStatistic { + /// Rounds up numbers and adds them. + pub fn total(&self) -> usize { + self.main_vm.ceil() as usize + + self.ram_permutation.ceil() as usize + + self.storage_application.ceil() as usize + + self.storage_sorter.ceil() as usize + + self.code_decommitter.ceil() as usize + + self.code_decommitter_sorter.ceil() as usize + + self.log_demuxer.ceil() as usize + + self.events_sorter.ceil() as usize + + self.keccak256.ceil() as usize + + self.ecrecover.ceil() as usize + + self.sha256.ceil() as usize + + self.secp256k1_verify.ceil() as usize + + self.transient_storage_checker.ceil() as usize + } + + /// Adds numbers. + pub fn total_f32(&self) -> f32 { + self.main_vm + + self.ram_permutation + + self.storage_application + + self.storage_sorter + + self.code_decommitter + + self.code_decommitter_sorter + + self.log_demuxer + + self.events_sorter + + self.keccak256 + + self.ecrecover + + self.sha256 + + self.secp256k1_verify + + self.transient_storage_checker + } +} + +impl ops::Add for CircuitStatistic { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self { + main_vm: self.main_vm + other.main_vm, + ram_permutation: self.ram_permutation + other.ram_permutation, + storage_application: self.storage_application + other.storage_application, + storage_sorter: self.storage_sorter + other.storage_sorter, + code_decommitter: self.code_decommitter + other.code_decommitter, + code_decommitter_sorter: self.code_decommitter_sorter + other.code_decommitter_sorter, + log_demuxer: self.log_demuxer + other.log_demuxer, + events_sorter: self.events_sorter + other.events_sorter, + keccak256: self.keccak256 + other.keccak256, + ecrecover: self.ecrecover + other.ecrecover, + sha256: self.sha256 + other.sha256, + secp256k1_verify: self.secp256k1_verify + other.secp256k1_verify, + transient_storage_checker: self.transient_storage_checker + + other.transient_storage_checker, + } + } +} /// Statistics of the tx execution. #[derive(Debug, Default, Clone)] @@ -47,3 +137,156 @@ impl VmMemoryMetrics { .sum::() } } + +#[derive(Debug, Default, Clone, Copy, PartialEq)] +pub struct DeduplicatedWritesMetrics { + pub initial_storage_writes: usize, + pub repeated_storage_writes: usize, + pub total_updated_values_size: usize, +} + +impl DeduplicatedWritesMetrics { + pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { + Self { + initial_storage_writes: tx_metrics.initial_storage_writes, + repeated_storage_writes: tx_metrics.repeated_storage_writes, + total_updated_values_size: tx_metrics.total_updated_values_size, + } + } + + pub fn size(&self, protocol_version: ProtocolVersionId) -> usize { + if protocol_version.is_pre_boojum() { + self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE + + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE + } else { + self.total_updated_values_size + + (BYTES_PER_DERIVED_KEY as usize) * self.initial_storage_writes + + (BYTES_PER_ENUMERATION_INDEX as usize) * self.repeated_storage_writes + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct TransactionExecutionMetrics { + pub initial_storage_writes: usize, + pub repeated_storage_writes: usize, + pub gas_used: usize, + pub gas_remaining: u32, + pub event_topics: u16, + pub published_bytecode_bytes: usize, + pub l2_l1_long_messages: usize, + pub l2_l1_logs: usize, + pub contracts_used: usize, + pub contracts_deployed: u16, + pub vm_events: usize, + pub storage_logs: usize, + /// Sum of storage logs, vm events, l2->l1 logs, and the number of precompile calls. + pub total_log_queries: usize, + pub cycles_used: u32, + pub computational_gas_used: u32, + pub total_updated_values_size: usize, + pub pubdata_published: u32, + pub circuit_statistic: CircuitStatistic, +} + +impl Default for TransactionExecutionMetrics { + fn default() -> Self { + Self { + initial_storage_writes: 0, + repeated_storage_writes: 0, + gas_used: 0, + gas_remaining: u32::MAX, + event_topics: 0, + published_bytecode_bytes: 0, + l2_l1_long_messages: 0, + l2_l1_logs: 0, + contracts_used: 0, + contracts_deployed: 0, + vm_events: 0, + storage_logs: 0, + total_log_queries: 0, + cycles_used: 0, + computational_gas_used: 0, + total_updated_values_size: 0, + pubdata_published: 0, + circuit_statistic: Default::default(), + } + } +} + +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize)] +pub struct VmExecutionMetrics { + pub gas_used: usize, + pub published_bytecode_bytes: usize, + pub l2_l1_long_messages: usize, + pub l2_to_l1_logs: usize, + pub contracts_used: usize, + pub contracts_deployed: u16, + pub vm_events: usize, + pub storage_logs: usize, + pub total_log_queries: usize, + pub cycles_used: u32, + pub computational_gas_used: u32, + pub pubdata_published: u32, + pub circuit_statistic: CircuitStatistic, +} + +impl VmExecutionMetrics { + pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { + Self { + published_bytecode_bytes: tx_metrics.published_bytecode_bytes, + l2_l1_long_messages: tx_metrics.l2_l1_long_messages, + l2_to_l1_logs: tx_metrics.l2_l1_logs, + contracts_deployed: tx_metrics.contracts_deployed, + contracts_used: tx_metrics.contracts_used, + gas_used: tx_metrics.gas_used, + storage_logs: tx_metrics.storage_logs, + vm_events: tx_metrics.vm_events, + total_log_queries: tx_metrics.total_log_queries, + cycles_used: tx_metrics.cycles_used, + computational_gas_used: tx_metrics.computational_gas_used, + pubdata_published: tx_metrics.pubdata_published, + circuit_statistic: tx_metrics.circuit_statistic, + } + } + + pub fn size(&self) -> usize { + self.l2_to_l1_logs * L2ToL1Log::SERIALIZED_SIZE + + self.l2_l1_long_messages + + self.published_bytecode_bytes + // TODO(PLA-648): refactor this constant + // It represents the need to store the length's of messages as well as bytecodes. + // It works due to the fact that each bytecode/L2->L1 long message is accompanied by a corresponding + // user L2->L1 log. + + self.l2_to_l1_logs * 4 + } +} + +impl ops::Add for VmExecutionMetrics { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self { + published_bytecode_bytes: self.published_bytecode_bytes + + other.published_bytecode_bytes, + contracts_deployed: self.contracts_deployed + other.contracts_deployed, + contracts_used: self.contracts_used + other.contracts_used, + l2_l1_long_messages: self.l2_l1_long_messages + other.l2_l1_long_messages, + l2_to_l1_logs: self.l2_to_l1_logs + other.l2_to_l1_logs, + gas_used: self.gas_used + other.gas_used, + vm_events: self.vm_events + other.vm_events, + storage_logs: self.storage_logs + other.storage_logs, + total_log_queries: self.total_log_queries + other.total_log_queries, + cycles_used: self.cycles_used + other.cycles_used, + computational_gas_used: self.computational_gas_used + other.computational_gas_used, + pubdata_published: self.pubdata_published + other.pubdata_published, + circuit_statistic: self.circuit_statistic + other.circuit_statistic, + } + } +} + +impl ops::AddAssign for VmExecutionMetrics { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index fd488e5100c..b8614a46c14 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -12,12 +12,11 @@ //! where `VmTracer` is a trait implemented for a specific VM version. use zksync_types::Transaction; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmMemoryMetrics, }; pub trait VmInterface { diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index f633b133ab0..741bcaea18f 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -4,13 +4,15 @@ use anyhow::Context as _; use tracing::{span, Level}; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ - interface::{TxExecutionMode, VmExecutionResultAndLogs, VmInterface}, + interface::{ + TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs, VmInterface, + }, tracers::StorageInvocations, MultiVMTracer, }; use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, transaction_request::CallOverrides, - ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, + l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, + PackedEthSignature, Transaction, U256, }; use super::{ diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs index 673c30b9f17..59fa2e38db7 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/node/api_server/src/execution_sandbox/testonly.rs @@ -1,9 +1,9 @@ use std::fmt; -use zksync_multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; -use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Transaction, +use zksync_multivm::interface::{ + ExecutionResult, TransactionExecutionMetrics, VmExecutionResultAndLogs, }; +use zksync_types::{l2::L2Tx, ExecuteTransactionCommon, Transaction}; use super::{ execute::{TransactionExecutionOutput, TransactionExecutor}, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 27e1c2ab305..a9bd2e9c2c6 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -3,14 +3,16 @@ use std::time::Duration; use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; -use zksync_multivm::interface::{ - storage::StorageViewMetrics, VmExecutionResultAndLogs, VmMemoryMetrics, +use zksync_multivm::{ + interface::{ + storage::StorageViewMetrics, TransactionExecutionMetrics, VmExecutionResultAndLogs, + VmMemoryMetrics, + }, + utils::StorageWritesDeduplicator, }; use zksync_shared_metrics::InteractionType; use zksync_types::{ event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - fee::TransactionExecutionMetrics, - storage_writes_deduplicator::StorageWritesDeduplicator, H256, }; use zksync_utils::bytecode::bytecode_len_in_bytes; diff --git a/core/node/api_server/src/tx_sender/master_pool_sink.rs b/core/node/api_server/src/tx_sender/master_pool_sink.rs index cb4e73e3bb7..736edf0b247 100644 --- a/core/node/api_server/src/tx_sender/master_pool_sink.rs +++ b/core/node/api_server/src/tx_sender/master_pool_sink.rs @@ -2,8 +2,9 @@ use std::collections::hash_map::{Entry, HashMap}; use tokio::sync::Mutex; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256}; +use zksync_types::{l2::L2Tx, Address, Nonce, H256}; use super::{tx_sink::TxSink, SubmitTxError}; use crate::web3::metrics::API_METRICS; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 826200b5537..085f3c395dd 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,7 +10,7 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::VmExecutionResultAndLogs, + interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, get_eth_call_gas_limit, get_max_batch_gas_limit, @@ -25,7 +25,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api::state_override::StateOverride, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, diff --git a/core/node/api_server/src/tx_sender/proxy.rs b/core/node/api_server/src/tx_sender/proxy.rs index e179cdcb774..536a9767c1f 100644 --- a/core/node/api_server/src/tx_sender/proxy.rs +++ b/core/node/api_server/src/tx_sender/proxy.rs @@ -11,8 +11,9 @@ use zksync_dal::{ helpers::wait_for_l1_batch, transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, DalError, }; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{api, fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256, U256}; +use zksync_types::{api, l2::L2Tx, Address, Nonce, H256, U256}; use zksync_web3_decl::{ client::{DynClient, L2}, error::{ClientRpcContext, EnrichedClientResult, Web3Error}, diff --git a/core/node/api_server/src/tx_sender/tx_sink.rs b/core/node/api_server/src/tx_sender/tx_sink.rs index 5edf21b0701..3d764816fe0 100644 --- a/core/node/api_server/src/tx_sender/tx_sink.rs +++ b/core/node/api_server/src/tx_sender/tx_sink.rs @@ -1,7 +1,7 @@ use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, Core}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_types::{ api::{Transaction, TransactionDetails, TransactionId}, - fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256, }; diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index a074c143057..dab53cb4b4d 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -1,6 +1,7 @@ //! Tests for the `debug` Web3 namespace. -use zksync_types::{tx::TransactionExecutionResult, vm_trace::Call, BOOTLOADER_ADDRESS}; +use zksync_multivm::interface::TransactionExecutionResult; +use zksync_types::{vm_trace::Call, BOOTLOADER_ADDRESS}; use zksync_web3_decl::{ client::{DynClient, L2}, namespaces::DebugNamespaceClient, diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index d136971734a..3919bbab36e 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -17,7 +17,9 @@ use zksync_config::{ GenesisConfig, }; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; -use zksync_multivm::zk_evm_latest::ethereum_types::U256; +use zksync_multivm::interface::{ + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics, +}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -26,18 +28,14 @@ use zksync_node_test_utils::{ use zksync_types::{ api, block::L2BlockHeader, - fee::TransactionExecutionMetrics, get_nonce_key, l2::L2Tx, storage::get_code_key, tokens::{TokenInfo, TokenMetadata}, - tx::{ - tx_execution_info::TxExecutionStatus, ExecutionMetrics, IncludedTxLocation, - TransactionExecutionResult, - }, + tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, - VmEvent, H256, U64, + VmEvent, H256, U256, U64, }; use zksync_utils::u256_to_h256; use zksync_web3_decl::{ @@ -273,7 +271,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index cccebdd6ddd..39f991aba04 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -8,7 +8,7 @@ use http::StatusCode; use tokio::sync::watch; use zksync_config::configs::chain::NetworkConfig; use zksync_dal::ConnectionPool; -use zksync_types::{api, Address, L1BatchNumber, H160, H2048, H256, U64}; +use zksync_types::{api, Address, Bloom, L1BatchNumber, H160, H256, U64}; use zksync_web3_decl::{ client::{WsClient, L2}, jsonrpsee::{ @@ -318,7 +318,7 @@ impl WsTest for BasicSubscriptionsTest { Some(new_l2_block.base_fee_per_gas.into()) ); assert_eq!(received_block_header.extra_data, Bytes::default()); - assert_eq!(received_block_header.logs_bloom, H2048::default()); + assert_eq!(received_block_header.logs_bloom, Bloom::default()); assert_eq!( received_block_header.timestamp, new_l2_block.timestamp.into() diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index a2dcae1724f..b29d01af39a 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -66,6 +66,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), }; storage .blocks_dal() diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 2c6fdc79a52..9cf06b992e8 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -32,7 +32,10 @@ use zksync_node_test_utils::{create_l1_batch_metadata, l1_batch_metadata_to_comm use zksync_state_keeper::{ io::{IoCursor, L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, - testonly::{fund, l1_transaction, l2_transaction, MockBatchExecutor}, + testonly::{ + fund, l1_transaction, l2_transaction, test_batch_executor::MockReadStorageFactory, + MockBatchExecutor, + }, AsyncRocksdbCache, MainBatchExecutor, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper, }; @@ -631,7 +634,7 @@ impl StateKeeperRunner { .with_handler(Box::new(tree_writes_persistence)) .with_handler(Box::new(self.sync_state.clone())), Arc::new(NoopSealer), - Arc::new(self.pool.0.clone()), + Arc::new(MockReadStorageFactory), ) .run() .await diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index d4dbe454603..a5458e996e1 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -121,6 +121,7 @@ async fn insert_l2_blocks( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index dcb9ba2c012..bbad6b9a222 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -20,8 +20,8 @@ use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, web3::{BlockNumber, FilterBuilder}, - AccountTreeId, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, H256, + AccountTreeId, Address, Bloom, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, + ProtocolVersion, ProtocolVersionId, StorageKey, H256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -359,6 +359,7 @@ pub async fn create_genesis_l1_batch( protocol_version: Some(protocol_version.minor), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Bloom::zero(), }; let mut transaction = storage.start_transaction().await?; diff --git a/core/node/logs_bloom_backfill/Cargo.toml b/core/node/logs_bloom_backfill/Cargo.toml new file mode 100644 index 00000000000..5e6ddef6df9 --- /dev/null +++ b/core/node/logs_bloom_backfill/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "zksync_logs_bloom_backfill" +description = "ZKsync logs bloom backfill" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_dal.workspace = true +zksync_types.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true diff --git a/core/node/logs_bloom_backfill/src/lib.rs b/core/node/logs_bloom_backfill/src/lib.rs new file mode 100644 index 00000000000..e5a270928e7 --- /dev/null +++ b/core/node/logs_bloom_backfill/src/lib.rs @@ -0,0 +1,233 @@ +use std::time::Duration; + +use anyhow::Context; +use tokio::sync::watch; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_types::{block::build_bloom, BloomInput, L2BlockNumber}; + +#[derive(Debug)] +pub struct LogsBloomBackfill { + connection_pool: ConnectionPool, +} + +#[derive(Debug, PartialEq)] +enum BloomWaitOutcome { + Ok, + Canceled, +} + +impl LogsBloomBackfill { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + async fn wait_for_l2_block_with_bloom( + connection: &mut Connection<'_, Core>, + stop_receiver: &mut watch::Receiver, + ) -> anyhow::Result { + const INTERVAL: Duration = Duration::from_secs(1); + tracing::debug!("waiting for at least one L2 block in DB with bloom"); + + loop { + if *stop_receiver.borrow() { + return Ok(BloomWaitOutcome::Canceled); + } + + if connection.blocks_dal().has_last_l2_block_bloom().await? { + return Ok(BloomWaitOutcome::Ok); + } + + // We don't check the result: if a stop signal is received, we'll return at the start + // of the next iteration. + tokio::time::timeout(INTERVAL, stop_receiver.changed()) + .await + .ok(); + } + } + + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut connection = self + .connection_pool + .connection_tagged("logs_bloom_backfill") + .await?; + + if Self::wait_for_l2_block_with_bloom(&mut connection, &mut stop_receiver).await? + == BloomWaitOutcome::Canceled + { + return Ok(()); // Stop signal received + } + + let max_block_without_bloom = connection + .blocks_dal() + .get_max_l2_block_without_bloom() + .await?; + let Some(max_block_without_bloom) = max_block_without_bloom else { + tracing::info!("all blooms are already there, exiting migration"); + return Ok(()); + }; + let first_l2_block = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await? + .context( + "logs_bloom_backfill: missing l2 block in DB after waiting for at least one", + )?; + + tracing::info!("starting blooms backfill from block {max_block_without_bloom}"); + let mut right_bound = max_block_without_bloom.0; + loop { + const WINDOW: u32 = 1000; + + if *stop_receiver.borrow_and_update() { + tracing::info!("received a stop signal; logs bloom backfill is shut down"); + } + + let left_bound = right_bound.saturating_sub(WINDOW - 1).max(first_l2_block.0); + tracing::info!( + "started calculating blooms for block range {left_bound}..={right_bound}" + ); + + let mut bloom_items = connection + .events_dal() + .get_bloom_items_for_l2_blocks( + L2BlockNumber(left_bound)..=L2BlockNumber(right_bound), + ) + .await?; + + let blooms: Vec<_> = (left_bound..=right_bound) + .map(|block| { + let items = bloom_items + .remove(&L2BlockNumber(block)) + .unwrap_or_default(); + let iter = items.iter().map(|v| BloomInput::Raw(v.as_slice())); + build_bloom(iter) + }) + .collect(); + connection + .blocks_dal() + .range_update_logs_bloom(L2BlockNumber(left_bound), &blooms) + .await?; + tracing::info!("filled blooms for block range {left_bound}..={right_bound}"); + + if left_bound == first_l2_block.0 { + break; + } else { + right_bound = left_bound - 1; + } + } + + tracing::info!("logs bloom backfill is finished"); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::{ + block::L2BlockHeader, tx::IncludedTxLocation, Address, L1BatchNumber, VmEvent, H256, + }; + + use super::*; + + async fn create_l2_block( + conn: &mut Connection<'_, Core>, + l2_block_number: L2BlockNumber, + block_events: &[VmEvent], + ) { + let l2_block_header = L2BlockHeader { + number: l2_block_number, + timestamp: 0, + hash: H256::from_low_u64_be(u64::from(l2_block_number.0)), + l1_tx_count: 0, + l2_tx_count: 0, + fee_account_address: Address::repeat_byte(1), + base_fee_per_gas: 0, + gas_per_pubdata_limit: 0, + batch_fee_input: Default::default(), + base_system_contracts_hashes: Default::default(), + protocol_version: Some(Default::default()), + virtual_blocks: 0, + gas_limit: 0, + logs_bloom: Default::default(), + }; + + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let events_vec: Vec<_> = block_events.iter().collect(); + conn.events_dal() + .save_events( + l2_block_number, + &[( + IncludedTxLocation { + tx_hash: Default::default(), + tx_index_in_l2_block: 0, + tx_initiator_address: Default::default(), + }, + events_vec, + )], + ) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_logs_bloom_backfill() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + connection + .protocol_versions_dal() + .save_protocol_version_with_tx(&Default::default()) + .await + .unwrap(); + + let blocks_count = 5u32; + for block_number in 0..blocks_count { + let event = VmEvent { + location: (L1BatchNumber(0), 0), + address: Address::from_low_u64_be(block_number as u64 + 1), + indexed_topics: Vec::new(), + value: Vec::new(), + }; + create_l2_block(&mut connection, L2BlockNumber(block_number), &[event]).await; + + if block_number + 1 < blocks_count { + // Drop bloom if block is not last. + connection + .blocks_dal() + .drop_l2_block_bloom(L2BlockNumber(block_number)) + .await + .unwrap(); + } + } + let max_block_without_bloom = connection + .blocks_dal() + .get_max_l2_block_without_bloom() + .await + .unwrap(); + assert_eq!( + max_block_without_bloom, + Some(L2BlockNumber(blocks_count) - 2) + ); + + let migration = LogsBloomBackfill::new(connection_pool.clone()); + let (_sender, receiver) = watch::channel(false); + migration.run(receiver).await.unwrap(); + + for block_number in 0..(blocks_count - 1) { + let header = connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(block_number)) + .await + .unwrap() + .unwrap(); + let address = Address::from_low_u64_be(block_number as u64 + 1); + let contains_address = header + .logs_bloom + .contains_input(BloomInput::Raw(address.as_bytes())); + assert!(contains_address); + } + } +} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 142d6cfa11a..3a81a578c03 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -54,6 +54,7 @@ zksync_base_token_adjuster.workspace = true zksync_node_storage_init.workspace = true zksync_external_price_api.workspace = true zksync_external_proof_integration_api.workspace = true +zksync_logs_bloom_backfill.workspace = true pin-project-lite.workspace = true tracing.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 13f593644dc..53eeb1c5280 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -56,7 +56,7 @@ impl WiringLayer for EthWatchLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let client = input.eth_client.0; let eth_client = EthHttpQueryClient::new( diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs index 7877bc6abbe..6f8805bc5fa 100644 --- a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -59,7 +59,7 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let blob_store = input.object_store.0; let task = ProverApiTask { diff --git a/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs b/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs new file mode 100644 index 00000000000..4e37549a775 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs @@ -0,0 +1,61 @@ +use zksync_logs_bloom_backfill::LogsBloomBackfill; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for ethereum watcher +/// +/// Responsible for initializing and running of [`LogsBloomBackfill`] task, that backfills `logsBloom` for old blocks. +#[derive(Debug)] +pub struct LogsBloomBackfillLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub logs_bloom_backfill: LogsBloomBackfill, +} + +#[async_trait::async_trait] +impl WiringLayer for LogsBloomBackfillLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "logs_bloom_backfill_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get_singleton().await?; + let logs_bloom_backfill = LogsBloomBackfill::new(pool); + Ok(Output { + logs_bloom_backfill, + }) + } +} + +#[async_trait::async_trait] +impl Task for LogsBloomBackfill { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "logs_bloom_backfill".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 6256f2d6104..6f3500a82cb 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -15,6 +15,7 @@ pub mod healtcheck_server; pub mod house_keeper; pub mod l1_batch_commitment_mode_validation; pub mod l1_gas; +pub mod logs_bloom_backfill; pub mod main_node_client; pub mod main_node_fee_params_fetcher; pub mod metadata_calculator; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index bcb3cedc6e7..b53ff73c1a0 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -59,7 +59,7 @@ impl WiringLayer for ProofDataHandlerLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let blob_store = input.object_store.0; let task = ProofDataHandlerTask { diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index b0dfe0f1600..a77344f3706 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -102,7 +102,7 @@ impl WiringLayer for StateKeeperLayer { let state_keeper = StateKeeperTask { io, - batch_executor_base, + batch_executor: batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), @@ -125,7 +125,7 @@ impl WiringLayer for StateKeeperLayer { #[derive(Debug)] pub struct StateKeeperTask { io: Box, - batch_executor_base: Box, + batch_executor: Box, output_handler: OutputHandler, sealer: Arc, storage_factory: Arc, @@ -141,7 +141,7 @@ impl Task for StateKeeperTask { let state_keeper = ZkSyncStateKeeper::new( stop_receiver.0, self.io, - self.batch_executor_base, + self.batch_executor, self.output_handler, self.sealer, self.storage_factory, diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index e091472ad51..edd8306e72e 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -13,7 +13,7 @@ use zksync_node_test_utils::{ use zksync_state_keeper::{ io::{L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, - testonly::test_batch_executor::TestBatchExecutorBuilder, + testonly::test_batch_executor::{MockReadStorageFactory, TestBatchExecutorBuilder}, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper, }; use zksync_types::{ @@ -132,7 +132,7 @@ impl StateKeeperHandles { Box::new(batch_executor_base), output_handler, Arc::new(NoopSealer), - Arc::new(pool), + Arc::new(MockReadStorageFactory), ); Self { diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 890543bcd91..d1cd88ee277 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -33,7 +33,6 @@ zksync_base_token_adjuster.workspace = true anyhow.workspace = true async-trait.workspace = true -tempfile.workspace = true # used in `testonly` module tokio = { workspace = true, features = ["time"] } thiserror.workspace = true tracing.workspace = true @@ -44,6 +43,7 @@ hex.workspace = true [dev-dependencies] assert_matches.workspace = true +tempfile.workspace = true test-casing.workspace = true futures.workspace = true diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index cc05da9235b..b4090460116 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -6,8 +6,8 @@ use tokio::{runtime::Handle, sync::mpsc}; use zksync_multivm::{ interface::{ storage::{ReadStorage, StorageView}, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, + CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, }, tracers::CallTracer, vm_latest::HistoryEnabled, @@ -16,7 +16,6 @@ use zksync_multivm::{ use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_state::OwnedStorage; use zksync_types::{vm::FastVmMode, vm_trace::Call, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; use crate::{ @@ -58,7 +57,7 @@ impl MainBatchExecutor { } } -impl BatchExecutor for MainBatchExecutor { +impl BatchExecutor for MainBatchExecutor { fn init_batch( &mut self, storage: OwnedStorage, diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index b6f57694afa..2040328ba79 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -6,12 +6,11 @@ use tokio::{ task::JoinHandle, }; use zksync_multivm::interface::{ - storage::StorageViewCache, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionResultAndLogs, + storage::StorageViewCache, CompressedBytecodeInfo, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, }; use zksync_state::OwnedStorage; use zksync_types::{vm_trace::Call, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ metrics::{ExecutorCommand, EXECUTOR_METRICS}, @@ -55,10 +54,12 @@ impl TxExecutionResult { /// An abstraction that allows us to create different kinds of batch executors. /// The only requirement is to return a [`BatchExecutorHandle`], which does its work /// by communicating with the externally initialized thread. -pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { +/// +/// This type is generic over the storage type accepted to create the VM instance, mostly for testing purposes. +pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { fn init_batch( &mut self, - storage: OwnedStorage, + storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, ) -> BatchExecutorHandle; diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/batch_executor/tests/tester.rs index 6730d427c67..e70c8b06fe0 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/batch_executor/tests/tester.rs @@ -10,6 +10,7 @@ use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractEx use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, + utils::StorageWritesDeduplicator, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; @@ -21,7 +22,6 @@ use zksync_types::{ ethabi::Token, protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, - storage_writes_deduplicator::StorageWritesDeduplicator, system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, vm::FastVmMode, diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index f3b3f6e0fb4..4d2907e8291 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -9,14 +9,15 @@ use futures::FutureExt; use zksync_config::GenesisConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, prepare_recovery_snapshot, }; use zksync_types::{ - block::L2BlockHasher, fee::TransactionExecutionMetrics, - protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, ProtocolVersionId, + block::L2BlockHasher, protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, + ProtocolVersionId, }; use zksync_vm_utils::storage::L1BatchParamsProvider; diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index de9ac22e177..4dfb7400ffc 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -352,11 +352,11 @@ mod tests { use assert_matches::assert_matches; use futures::FutureExt; use zksync_dal::CoreDal; - use zksync_multivm::zk_evm_latest::ethereum_types::{H256, U256}; + use zksync_multivm::interface::VmExecutionMetrics; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ - api::TransactionStatus, block::BlockGasCount, tx::ExecutionMetrics, - writes::StateDiffRecord, L1BatchNumber, L2BlockNumber, StorageLogKind, + api::TransactionStatus, block::BlockGasCount, writes::StateDiffRecord, L1BatchNumber, + L2BlockNumber, StorageLogKind, H256, U256, }; use zksync_utils::h256_to_u256; @@ -464,7 +464,7 @@ mod tests { tx_result, vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); output_handler.handle_l2_block(&updates).await.unwrap(); diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 03495c0d98b..71f711b8c2a 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -367,6 +367,7 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { mod tests { use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ + interface::{TransactionExecutionResult, TxExecutionStatus}, utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}, zk_evm_latest::ethereum_types::H256, VmVersion, @@ -375,7 +376,6 @@ mod tests { use zksync_types::{ block::L2BlockHeader, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, AccountTreeId, Address, L1BatchNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, VmEvent, }; @@ -526,6 +526,7 @@ mod tests { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(VmVersion::latest()), virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), + logs_bloom: Default::default(), }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 92630015f2a..65d1cc9e208 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -9,21 +9,23 @@ use std::{ use anyhow::Context as _; use itertools::Itertools; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}; +use zksync_multivm::{ + interface::{DeduplicatedWritesMetrics, TransactionExecutionResult}, + utils::{ + get_max_batch_gas_limit, get_max_gas_per_pubdata_byte, ModifiedSlot, + StorageWritesDeduplicator, + }, +}; use zksync_shared_metrics::{BlockStage, L2BlockStage, APP_METRICS}; use zksync_types::{ - block::{L1BatchHeader, L2BlockHeader}, + block::{build_bloom, L1BatchHeader, L2BlockHeader}, event::extract_long_l2_to_l1_messages, helpers::unix_timestamp_ms, l2_to_l1_log::UserL2ToL1Log, - storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, - tx::{ - tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, - TransactionExecutionResult, - }, + tx::IncludedTxLocation, utils::display_timestamp, - Address, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, Transaction, - VmEvent, H256, + Address, BloomInput, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, + Transaction, VmEvent, H256, }; use zksync_utils::u256_to_h256; @@ -358,6 +360,17 @@ impl L2BlockSealCommand { // Run sub-tasks in parallel. L2BlockSealProcess::run_subtasks(self, strategy).await?; + let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::CalculateLogsBloom, is_fictive); + let iter = self.l2_block.events.iter().flat_map(|event| { + event + .indexed_topics + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain([BloomInput::Raw(event.address.as_bytes())]) + }); + let logs_bloom = build_bloom(iter); + progress.observe(Some(self.l2_block.events.len())); + // Seal block header at the last step. let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertL2BlockHeader, is_fictive); let definite_vm_version = self @@ -379,6 +392,7 @@ impl L2BlockSealCommand { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(definite_vm_version), virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), + logs_bloom, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 7c70607c763..9cc0a9ac98e 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -4,14 +4,15 @@ use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; -use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_multivm::{ + interface::{TransactionExecutionMetrics, VmExecutionMetrics}, + utils::derive_base_fee_and_gas_per_pubdata, +}; use zksync_node_test_utils::prepare_recovery_snapshot; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, commitment::L1BatchCommitmentMode, - fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, - tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, VmEvent, H256, U256, }; @@ -246,7 +247,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -264,7 +265,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -353,7 +354,7 @@ async fn processing_events_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -457,7 +458,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom create_execution_result([]), vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index dc5e5f345d5..2dc45a5eaaa 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -10,7 +10,10 @@ use zksync_config::{ use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_multivm::vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT; +use zksync_multivm::{ + interface::{TransactionExecutionMetrics, TransactionExecutionResult}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; use zksync_node_fee_model::{ l1_gas_price::{GasAdjuster, GasAdjusterClient}, MainNodeFeeInputProvider, @@ -22,12 +25,10 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, - tx::TransactionExecutionResult, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, }; diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 934ed9493f8..a610194ab9c 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -1,18 +1,22 @@ use std::{ convert::Infallible, + fmt, sync::Arc, time::{Duration, Instant}, }; use anyhow::Context as _; +use async_trait::async_trait; use tokio::sync::watch; use tracing::{info_span, Instrument}; -use zksync_multivm::interface::{Halt, L1BatchEnv, SystemEnv}; +use zksync_multivm::{ + interface::{Halt, L1BatchEnv, SystemEnv}, + utils::StorageWritesDeduplicator, +}; use zksync_state::ReadStorageFactory; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, storage_writes_deduplicator::StorageWritesDeduplicator, - utils::display_timestamp, L1BatchNumber, Transaction, + protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, }; use super::{ @@ -48,6 +52,45 @@ impl Error { } } +/// Functionality [`BatchExecutor`] + [`ReadStorageFactory`] with an erased storage type. This allows to keep +/// [`ZkSyncStateKeeper`] not parameterized by the storage type, simplifying its dependency injection and usage in tests. +#[async_trait] +trait ErasedBatchExecutor: fmt::Debug + Send { + async fn init_batch( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + stop_receiver: &watch::Receiver, + ) -> Result; +} + +/// The only [`ErasedBatchExecutor`] implementation. +#[derive(Debug)] +struct ErasedBatchExecutorImpl { + batch_executor: Box>, + storage_factory: Arc>, +} + +#[async_trait] +impl ErasedBatchExecutor for ErasedBatchExecutorImpl { + async fn init_batch( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + stop_receiver: &watch::Receiver, + ) -> Result { + let storage = self + .storage_factory + .access_storage(stop_receiver, l1_batch_env.number - 1) + .await + .context("failed creating VM storage")? + .ok_or(Error::Canceled)?; + Ok(self + .batch_executor + .init_batch(storage, l1_batch_env, system_env)) + } +} + /// State keeper represents a logic layer of L1 batch / L2 block processing flow. /// It's responsible for taking all the data from the `StateKeeperIO`, feeding it into `BatchExecutor` objects /// and calling `SealManager` to decide whether an L2 block or L1 batch should be sealed. @@ -62,27 +105,28 @@ pub struct ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, output_handler: OutputHandler, - batch_executor_base: Box, + batch_executor: Box, sealer: Arc, - storage_factory: Arc, } impl ZkSyncStateKeeper { - pub fn new( + pub fn new( stop_receiver: watch::Receiver, sequencer: Box, - batch_executor_base: Box, + batch_executor: Box>, output_handler: OutputHandler, sealer: Arc, - storage_factory: Arc, + storage_factory: Arc>, ) -> Self { Self { stop_receiver, io: sequencer, - batch_executor_base, + batch_executor: Box::new(ErasedBatchExecutorImpl { + batch_executor, + storage_factory, + }), output_handler, sealer, - storage_factory, } } @@ -146,7 +190,12 @@ impl ZkSyncStateKeeper { .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .batch_executor + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) .await?; self.restore_state(&mut batch_executor, &mut updates_manager, pending_l2_blocks) .await?; @@ -195,7 +244,12 @@ impl ZkSyncStateKeeper { (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .batch_executor + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -208,24 +262,6 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } - async fn create_batch_executor( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - ) -> Result { - let Some(storage) = self - .storage_factory - .access_storage(&self.stop_receiver, l1_batch_env.number - 1) - .await - .context("failed creating VM storage")? - else { - return Err(Error::Canceled); - }; - Ok(self - .batch_executor_base - .init_batch(storage, l1_batch_env, system_env)) - } - /// This function is meant to be called only once during the state-keeper initialization. /// It will check if we should load a protocol upgrade or a `setChainId` transaction, /// perform some checks and return it. diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index 5003d75b669..dbe1e4cb977 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -158,13 +158,11 @@ async fn get_transaction_nonces( #[cfg(test)] mod tests { + use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::create_l2_transaction; - use zksync_types::{ - fee::TransactionExecutionMetrics, L2BlockNumber, PriorityOpId, ProtocolVersionId, - StorageLog, H256, - }; + use zksync_types::{L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, H256}; use zksync_utils::u256_to_h256; use super::*; diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 0f9650881b2..1bf314d1b91 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -10,9 +10,11 @@ use vise::{ Metrics, }; use zksync_mempool::MempoolStore; -use zksync_multivm::interface::{VmExecutionResultAndLogs, VmRevertReason}; +use zksync_multivm::interface::{ + DeduplicatedWritesMetrics, VmExecutionResultAndLogs, VmRevertReason, +}; use zksync_shared_metrics::InteractionType; -use zksync_types::{tx::tx_execution_info::DeduplicatedWritesMetrics, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use super::seal_criteria::SealResolution; @@ -346,6 +348,7 @@ pub(super) enum L2BlockSealStage { ExtractL2ToL1Logs, InsertL2ToL1Logs, ReportTxMetrics, + CalculateLogsBloom, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] diff --git a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs index 264618f5d13..1f3e8d104ce 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs @@ -69,7 +69,7 @@ impl SealCriterion for CircuitsCriterion { } #[cfg(test)] mod tests { - use zksync_types::{circuit::CircuitStatistic, tx::ExecutionMetrics}; + use zksync_multivm::interface::{CircuitStatistic, VmExecutionMetrics}; use super::*; @@ -85,7 +85,7 @@ mod tests { } fn test_no_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -105,7 +105,7 @@ mod tests { } fn test_include_and_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -125,7 +125,7 @@ mod tests { } fn test_exclude_and_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -145,7 +145,7 @@ mod tests { } fn test_unexecutable_tx_resolution( - tx_execution_metrics: ExecutionMetrics, + tx_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -169,12 +169,12 @@ mod tests { fn circuits_seal_criterion() { let config = get_config(); let protocol_version = ProtocolVersionId::latest(); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: (MAX_CIRCUITS_PER_BATCH / 4) as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_no_seal_block_resolution( block_execution_metrics, @@ -182,7 +182,7 @@ mod tests { protocol_version, ); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: (MAX_CIRCUITS_PER_BATCH - 1 @@ -191,7 +191,7 @@ mod tests { )) as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_include_and_seal_block_resolution( @@ -200,12 +200,12 @@ mod tests { protocol_version, ); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: MAX_CIRCUITS_PER_BATCH as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_exclude_and_seal_block_resolution( @@ -214,14 +214,14 @@ mod tests { protocol_version, ); - let tx_execution_metrics = ExecutionMetrics { + let tx_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: MAX_CIRCUITS_PER_BATCH as f32 * config.reject_tx_at_geometry_percentage as f32 + 1.0, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_unexecutable_tx_resolution(tx_execution_metrics, &CircuitsCriterion, protocol_version); diff --git a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs index f575a905891..09fcf2f0fc1 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs @@ -66,7 +66,7 @@ impl SealCriterion for PubDataBytesCriterion { #[cfg(test)] mod tests { - use zksync_types::tx::ExecutionMetrics; + use zksync_multivm::interface::VmExecutionMetrics; use super::*; @@ -84,7 +84,7 @@ mod tests { max_pubdata_per_batch: 100000, }; - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: (config.max_pubdata_per_batch as f64 * config.close_block_at_eth_params_percentage - 1.0 @@ -92,7 +92,7 @@ mod tests { ProtocolVersionId::latest().into(), ) as f64) .round() as usize, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let empty_block_resolution = criterion.should_seal( @@ -108,12 +108,12 @@ mod tests { ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: (config.max_pubdata_per_batch as f64 * config.close_block_at_eth_params_percentage + 1f64) .round() as usize, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( @@ -129,9 +129,9 @@ mod tests { ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: config.max_pubdata_per_batch as usize + 1, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( &config, diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 01be129dde6..e3fe849e802 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -13,13 +13,12 @@ use std::fmt; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_multivm::{interface::Halt, vm_latest::TransactionVmExt}; +use zksync_multivm::{ + interface::{DeduplicatedWritesMetrics, Halt, TransactionExecutionMetrics, VmExecutionMetrics}, + vm_latest::TransactionVmExt, +}; use zksync_types::{ - block::BlockGasCount, - fee::TransactionExecutionMetrics, - tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, - utils::display_timestamp, - ProtocolVersionId, Transaction, + block::BlockGasCount, utils::display_timestamp, ProtocolVersionId, Transaction, }; use zksync_utils::time::millis_since; @@ -159,7 +158,7 @@ impl SealResolution { /// to the entire L2 block / L1 batch. #[derive(Debug, Default)] pub struct SealData { - pub(super) execution_metrics: ExecutionMetrics, + pub(super) execution_metrics: VmExecutionMetrics, pub(super) gas_count: BlockGasCount, pub(super) cumulative_size: usize, pub(super) writes_metrics: DeduplicatedWritesMetrics, @@ -174,7 +173,7 @@ impl SealData { tx_metrics: &TransactionExecutionMetrics, protocol_version: ProtocolVersionId, ) -> Self { - let execution_metrics = ExecutionMetrics::from_tx_metrics(tx_metrics); + let execution_metrics = VmExecutionMetrics::from_tx_metrics(tx_metrics); let writes_metrics = DeduplicatedWritesMetrics::from_tx_metrics(tx_metrics); let gas_count = gas_count_from_tx_and_metrics(transaction, &execution_metrics) + gas_count_from_writes(&writes_metrics, protocol_version); @@ -289,7 +288,7 @@ mod tests { create_execution_result([]), vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 02b0043b97c..d17261a3a0f 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -9,7 +9,6 @@ use zksync_multivm::interface::{ storage::StorageViewCache, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, Refunds, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, }; -use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -78,10 +77,10 @@ pub(crate) fn storage_view_cache() -> StorageViewCache { #[derive(Debug)] pub struct MockBatchExecutor; -impl BatchExecutor for MockBatchExecutor { +impl BatchExecutor<()> for MockBatchExecutor { fn init_batch( &mut self, - _storage: OwnedStorage, + _storage: (), _l1batch_params: L1BatchEnv, _system_env: SystemEnv, ) -> BatchExecutorHandle { diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index aefc8d50bc7..d8ee36990a1 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -20,7 +20,7 @@ use zksync_multivm::{ vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_test_utils::create_l2_transaction; -use zksync_state::{OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbStorage}; +use zksync_state::ReadStorageFactory; use zksync_types::{ fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -207,7 +207,7 @@ impl TestScenario { Box::new(batch_executor_base), output_handler, Arc::new(sealer), - Arc::::default(), + Arc::new(MockReadStorageFactory), ); let sk_thread = tokio::spawn(state_keeper.run()); @@ -410,10 +410,10 @@ impl TestBatchExecutorBuilder { } } -impl BatchExecutor for TestBatchExecutorBuilder { +impl BatchExecutor<()> for TestBatchExecutorBuilder { fn init_batch( &mut self, - _storage: OwnedStorage, + _storage: (), _l1_batch_params: L1BatchEnv, _system_env: SystemEnv, ) -> BatchExecutorHandle { @@ -806,28 +806,15 @@ impl StateKeeperIO for TestIO { /// Storage factory that produces empty VM storage for any batch. Should only be used with a mock batch executor /// that doesn't read from the storage. Prefer using `ConnectionPool` as a factory if it's available. #[derive(Debug)] -pub struct MockReadStorageFactory(tempfile::TempDir); - -impl Default for MockReadStorageFactory { - fn default() -> Self { - Self( - tempfile::TempDir::new() - .expect("failed creating temporary directory for `MockReadStorageFactory`"), - ) - } -} +pub struct MockReadStorageFactory; #[async_trait] -impl ReadStorageFactory for MockReadStorageFactory { +impl ReadStorageFactory<()> for MockReadStorageFactory { async fn access_storage( &self, _stop_receiver: &watch::Receiver, _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let storage = RocksdbStorage::builder(self.0.path()) - .await - .expect("Cannot create mock RocksDB storage") - .build_unchecked(); - Ok(Some(PgOrRocksdbStorage::Rocksdb(storage).into())) + ) -> anyhow::Result> { + Ok(Some(())) } } diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index a5239f44483..e9a0a57c697 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -11,7 +11,7 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_multivm::{ interface::{ ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, - VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -20,7 +20,6 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L2BlockExecutionData, L2BlockHasher}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, - tx::tx_execution_info::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, Transaction, H256, U256, ZKPORTER_IS_AVAILABLE, @@ -210,7 +209,7 @@ async fn sealed_by_gas() { }; let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { l1_gas: l1_gas_per_tx, - execution_metrics: ExecutionMetrics::default(), + execution_metrics: VmExecutionMetrics::default(), }); TestScenario::new() @@ -261,7 +260,7 @@ async fn sealed_by_gas_then_by_num_tx() { prove: 0, execute: 0, }, - execution_metrics: ExecutionMetrics::default(), + execution_metrics: VmExecutionMetrics::default(), }); // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. @@ -438,7 +437,7 @@ async fn load_upgrade_tx() { Box::new(batch_executor_base), output_handler, Arc::new(sealer), - Arc::::default(), + Arc::new(MockReadStorageFactory), ); // Since the version hasn't changed, and we are not using shared bridge, we should not load any diff --git a/core/node/state_keeper/src/types.rs b/core/node/state_keeper/src/types.rs index 2606e7d5c7b..e112871a647 100644 --- a/core/node/state_keeper/src/types.rs +++ b/core/node/state_keeper/src/types.rs @@ -5,10 +5,8 @@ use std::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; -use zksync_multivm::interface::VmExecutionResultAndLogs; -use zksync_types::{ - block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, -}; +use zksync_multivm::interface::{VmExecutionMetrics, VmExecutionResultAndLogs}; +use zksync_types::{block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction}; use super::{ metrics::StateKeeperGauges, @@ -83,7 +81,7 @@ impl MempoolGuard { #[derive(Debug, Clone, Copy, PartialEq)] pub struct ExecutionMetricsForCriteria { pub l1_gas: BlockGasCount, - pub execution_metrics: ExecutionMetrics, + pub execution_metrics: VmExecutionMetrics, } impl ExecutionMetricsForCriteria { diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index 7bc2095ff9b..aa2e22cac48 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -1,8 +1,6 @@ -use zksync_multivm::interface::FinishedL1Batch; +use zksync_multivm::interface::{FinishedL1Batch, TransactionExecutionResult, VmExecutionMetrics}; use zksync_types::{ - block::BlockGasCount, - priority_op_onchain_data::PriorityOpOnchainData, - tx::{tx_execution_info::ExecutionMetrics, TransactionExecutionResult}, + block::BlockGasCount, priority_op_onchain_data::PriorityOpOnchainData, ExecuteTransactionCommon, L1BatchNumber, }; @@ -13,7 +11,7 @@ pub struct L1BatchUpdates { pub number: L1BatchNumber, pub executed_transactions: Vec, pub priority_ops_onchain_data: Vec, - pub block_execution_metrics: ExecutionMetrics, + pub block_execution_metrics: VmExecutionMetrics, // how much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, pub txs_encoding_size: usize, @@ -76,7 +74,7 @@ mod tests { tx, create_execution_result([]), BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index 8b3060babad..883db604aad 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,18 +1,20 @@ use std::collections::HashMap; use zksync_multivm::{ - interface::{ExecutionResult, L2BlockEnv, VmExecutionResultAndLogs}, + interface::{ + CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, + TxExecutionStatus, VmExecutionMetrics, VmExecutionResultAndLogs, + }, vm_latest::TransactionVmExt, }; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, event::extract_bytecodes_marked_as_known, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, vm_trace::Call, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, VmEvent, H256, }; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; +use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; @@ -26,7 +28,7 @@ pub struct L2BlockUpdates { pub new_factory_deps: HashMap>, /// How much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, - pub block_execution_metrics: ExecutionMetrics, + pub block_execution_metrics: VmExecutionMetrics, pub txs_encoding_size: usize, pub payload_encoding_size: usize, pub timestamp: u64, @@ -52,7 +54,7 @@ impl L2BlockUpdates { system_l2_to_l1_logs: vec![], new_factory_deps: HashMap::new(), l1_gas_count: BlockGasCount::default(), - block_execution_metrics: ExecutionMetrics::default(), + block_execution_metrics: VmExecutionMetrics::default(), txs_encoding_size: 0, payload_encoding_size: 0, timestamp, @@ -67,7 +69,7 @@ impl L2BlockUpdates { &mut self, result: VmExecutionResultAndLogs, l1_gas_count: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, ) { self.events.extend(result.logs.events); self.storage_logs.extend(result.logs.storage_logs); @@ -85,7 +87,7 @@ impl L2BlockUpdates { tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, tx_l1_gas_this_tx: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, call_traces: Vec, ) { @@ -204,7 +206,7 @@ mod tests { tx, create_execution_result([]), BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index b1310800d8a..1ac06a6a293 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,17 +1,15 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ - storage::StorageViewCache, FinishedL1Batch, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs, + storage::StorageViewCache, CompressedBytecodeInfo, FinishedL1Batch, L1BatchEnv, SystemEnv, + VmExecutionMetrics, VmExecutionResultAndLogs, }, - utils::get_batch_base_fee, + utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, - storage_writes_deduplicator::StorageWritesDeduplicator, - tx::tx_execution_info::ExecutionMetrics, vm_trace::Call, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, + block::BlockGasCount, fee_model::BatchFeeInput, vm_trace::Call, Address, L1BatchNumber, + L2BlockNumber, ProtocolVersionId, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; use super::{ @@ -112,7 +110,7 @@ impl UpdatesManager { tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, tx_l1_gas_this_tx: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, call_traces: Vec, ) { let latency = UPDATES_MANAGER_METRICS @@ -188,7 +186,7 @@ impl UpdatesManager { self.l1_batch.l1_gas_count + self.l2_block.l1_gas_count } - pub(crate) fn pending_execution_metrics(&self) -> ExecutionMetrics { + pub(crate) fn pending_execution_metrics(&self) -> VmExecutionMetrics { self.l1_batch.block_execution_metrics + self.l2_block.block_execution_metrics } @@ -236,7 +234,7 @@ mod tests { create_execution_result([]), vec![], new_block_gas_count(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/utils.rs b/core/node/state_keeper/src/utils.rs index c99bbf51945..4240ad30625 100644 --- a/core/node/state_keeper/src/utils.rs +++ b/core/node/state_keeper/src/utils.rs @@ -1,9 +1,9 @@ +use zksync_multivm::interface::{DeduplicatedWritesMetrics, VmExecutionMetrics}; use zksync_types::{ - aggregated_operations::AggregatedActionType, - block::BlockGasCount, - tx::{tx_execution_info::DeduplicatedWritesMetrics, ExecutionMetrics}, - ExecuteTransactionCommon, ProtocolVersionId, Transaction, + aggregated_operations::AggregatedActionType, block::BlockGasCount, ExecuteTransactionCommon, + ProtocolVersionId, Transaction, }; + // TODO(QIT-32): Remove constants(except `L1_OPERATION_EXECUTE_COST`) and logic that use them const L1_BATCH_COMMIT_BASE_COST: u32 = 31_000; const L1_BATCH_PROVE_BASE_COST: u32 = 7_000; @@ -36,7 +36,7 @@ fn base_tx_cost(tx: &Transaction, op: AggregatedActionType) -> u32 { } } -fn additional_pubdata_commit_cost(execution_metrics: &ExecutionMetrics) -> u32 { +fn additional_pubdata_commit_cost(execution_metrics: &VmExecutionMetrics) -> u32 { (execution_metrics.size() as u32) * GAS_PER_BYTE } @@ -57,7 +57,7 @@ pub(super) fn new_block_gas_count() -> BlockGasCount { pub(super) fn gas_count_from_tx_and_metrics( tx: &Transaction, - execution_metrics: &ExecutionMetrics, + execution_metrics: &VmExecutionMetrics, ) -> BlockGasCount { let commit = base_tx_cost(tx, AggregatedActionType::Commit) + additional_pubdata_commit_cost(execution_metrics); @@ -68,7 +68,7 @@ pub(super) fn gas_count_from_tx_and_metrics( } } -pub(super) fn gas_count_from_metrics(execution_metrics: &ExecutionMetrics) -> BlockGasCount { +pub(super) fn gas_count_from_metrics(execution_metrics: &VmExecutionMetrics) -> BlockGasCount { BlockGasCount { commit: additional_pubdata_commit_cost(execution_metrics), prove: 0, diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 614d64805b9..acb65bf1634 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -5,7 +5,10 @@ use std::collections::HashMap; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; -use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +use zksync_multivm::{ + interface::{TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics}, + utils::get_max_gas_per_pubdata_byte, +}; use zksync_node_genesis::GenesisParams; use zksync_system_constants::{get_intrinsic_constants, ZKPORTER_IS_AVAILABLE}; use zksync_types::{ @@ -21,7 +24,6 @@ use zksync_types::{ protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, transaction_request::PaymasterParams, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersion, ProtocolVersionId, StorageLog, H256, U256, }; @@ -42,6 +44,7 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), } } @@ -157,7 +160,7 @@ pub fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, @@ -207,6 +210,7 @@ impl Snapshot { protocol_version: Some(genesis_params.minor_protocol_version()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), }; Snapshot { l1_batch, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 4cb2d26f6bd..61f0a5ec3f6 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -5,6 +5,7 @@ use rand::{prelude::SliceRandom, Rng}; use tokio::sync::RwLock; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_test_utils::{ create_l1_batch_metadata, create_l2_block, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, @@ -14,7 +15,7 @@ use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; use zksync_test_account::Account; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, L2BlockHasher}, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, get_intrinsic_constants, l2::L2Tx, utils::storage_key_for_standard_token_balance, diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 569321d548c..c6d0ae40a43 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -44,8 +44,14 @@ describe('web3 API compatibility tests', () => { const blockHash = (await alice.provider.getBlock(blockNumber)).hash!; const blockWithTxsByNumber = await alice.provider.getBlock(blockNumber, true); expect(blockWithTxsByNumber.gasLimit).toBeGreaterThan(0n); - let sumTxGasUsed = 0n; + // `ethers.Block` doesn't include `logsBloom` for some reason. + const blockByNumberFull = await alice.provider.send('eth_getBlockByNumber', [blockNumberHex, false]); + expect(blockByNumberFull.logsBloom).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + expect(blockByNumberFull.logsBloom.length).toEqual(514); + expect(blockByNumberFull.logsBloom != ethers.zeroPadValue('0x00', 256)).toBeTruthy(); + + let sumTxGasUsed = 0n; for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.getTransactionReceipt(tx.hash); sumTxGasUsed = sumTxGasUsed + receipt!.gasUsed; @@ -53,12 +59,21 @@ describe('web3 API compatibility tests', () => { expect(blockWithTxsByNumber.gasUsed).toBeGreaterThanOrEqual(sumTxGasUsed); let expectedReceipts = []; + let expectedBloom = blockByNumberFull.logsBloom.toLowerCase(); + let blockBloomFromReceipts = new Uint8Array(256); for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.send('eth_getTransactionReceipt', [tx.hash]); expectedReceipts.push(receipt); + + let receiptBloom = ethers.getBytes(receipt.logsBloom); + for (let i = 0; i < blockBloomFromReceipts.length; i++) { + blockBloomFromReceipts[i] = blockBloomFromReceipts[i] | receiptBloom[i]; + } } + expect(ethers.hexlify(blockBloomFromReceipts)).toEqual(expectedBloom); + let receipts = await alice.provider.send('eth_getBlockReceipts', [blockNumberHex]); expect(receipts).toEqual(expectedReceipts); diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index efbc08a957a..27218d79aaf 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -6,8 +6,11 @@ license.workspace = true publish = false [dependencies] -zksync_vm_benchmark_harness.workspace = true +zksync_types.workspace = true zksync_vlog.workspace = true +zksync_vm_benchmark_harness.workspace = true + +rand.workspace = true vise.workspace = true tokio.workspace = true diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs index 5becccfab80..9e12fc25f54 100644 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ b/core/tests/vm-benchmark/benches/criterion.rs @@ -1,7 +1,24 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use std::time::Duration; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, + Criterion, +}; +use zksync_types::Transaction; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, + get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, + Legacy, LoadTestParams, +}; + +const SAMPLE_SIZE: usize = 20; + +fn benches_in_folder(c: &mut Criterion) { + let mut group = c.benchmark_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); -fn benches_in_folder(c: &mut Criterion) { for path in std::fs::read_dir("deployment_benchmarks").unwrap() { let path = path.unwrap().path(); @@ -9,12 +26,73 @@ fn benches_in_folder(c: &mut Criterion) { let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); let tx = get_deploy_tx(code); - - c.bench_function(path.file_name().unwrap().to_str().unwrap(), |b| { - b.iter(|| BenchmarkingVm::new().run_transaction(black_box(&tx))) + let file_name = path.file_name().unwrap().to_str().unwrap(); + let full_suffix = if FULL { "/full" } else { "" }; + let bench_name = format!("{file_name}{full_suffix}"); + group.bench_function(bench_name, |bencher| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); + } else { + bencher.iter_batched( + BenchmarkingVm::::default, + |mut vm| { + let result = vm.run_transaction(black_box(&tx)); + (vm, result) + }, + BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one + ); + } }); } } -criterion_group!(benches, benches_in_folder); +fn bench_load_test(c: &mut Criterion) { + let mut group = c.benchmark_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + // Nonce 0 is used for the deployment transaction + let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); + bench_load_test_transaction::(&mut group, "load_test", &tx); + + let tx = get_realistic_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); + + let tx = get_heavy_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); +} + +fn bench_load_test_transaction( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + tx: &Transaction, +) { + group.bench_function(name, |bencher| { + bencher.iter_batched( + || { + let mut vm = BenchmarkingVm::::default(); + vm.run_transaction(&get_load_test_deploy_tx()); + vm + }, + |mut vm| { + let result = vm.run_transaction(black_box(tx)); + assert!(!result.result.is_failed(), "{:?}", result.result); + (vm, result) + }, + BatchSize::LargeInput, + ); + }); +} + +criterion_group!( + benches, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + bench_load_test::, + bench_load_test:: +); criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/fill_bootloader.rs index fac422c8237..13fa1df0b2f 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/fill_bootloader.rs @@ -1,23 +1,195 @@ -use std::time::Instant; +//! Benchmarks executing entire batches of transactions with varying size (from 1 to 5,000). +//! +//! - `fill_bootloader_full/*` benches emulate the entire transaction lifecycle including taking a snapshot +//! before a transaction and rolling back to it on halt. They also include VM initialization and drop. +//! In contrast, `fill_bootloader/*` benches only cover transaction execution. +//! - `deploy_simple_contract` benches deploy a simple contract in each transaction. All transactions succeed. +//! - `transfer` benches perform the base token transfer in each transaction. All transactions succeed. +//! - `transfer_with_invalid_nonce` benches are similar to `transfer`, but each transaction with a probability +//! `TX_FAILURE_PROBABILITY` has a previously used nonce and thus halts during validation. +//! - `load_test(|_realistic|_heavy)` execute the load test contract (a mixture of storage reads, writes, emitting events, +//! recursive calls, hashing and deploying new contracts). These 3 categories differ in how many operations of each kind +//! are performed in each transaction. Beware that the first executed transaction is load test contract deployment, +//! which skews results for small-size batches. -use criterion::black_box; +use std::{iter, time::Duration}; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, + BenchmarkId, Criterion, Throughput, +}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use zksync_types::Transaction; use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, BenchmarkingVm, + cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, + get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, + BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, }; -fn main() { - let test_contract = - std::fs::read("deployment_benchmarks/event_spam").expect("failed to read file"); +/// Gas limit for deployment transactions. +const DEPLOY_GAS_LIMIT: u32 = 30_000_000; +/// Tested numbers of transactions in a batch. +const TXS_IN_BATCH: &[usize] = &[1, 10, 50, 100, 200, 500, 1_000, 2_000, 5_000]; + +/// RNG seed used e.g. to randomize failing transactions. +const RNG_SEED: u64 = 123; +/// Probability for a transaction to fail in the `transfer_with_invalid_nonce` benchmarks. +const TX_FAILURE_PROBABILITY: f64 = 0.2; + +fn bench_vm( + vm: &mut BenchmarkingVm, + txs: &[Transaction], + expected_failures: &[bool], +) { + for (i, tx) in txs.iter().enumerate() { + let result = if FULL { + vm.run_transaction_full(black_box(tx)) + } else { + vm.run_transaction(black_box(tx)) + }; + let result = &result.result; + let expecting_failure = expected_failures.get(i).copied().unwrap_or(false); + assert_eq!( + result.is_failed(), + expecting_failure, + "{result:?} on tx #{i}" + ); + black_box(result); + } +} + +fn run_vm_expecting_failures( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + txs: &[Transaction], + expected_failures: &[bool], +) { + for txs_in_batch in TXS_IN_BATCH { + if *txs_in_batch > txs.len() { + break; + } + + group.throughput(Throughput::Elements(*txs_in_batch as u64)); + group.bench_with_input( + BenchmarkId::new(name, txs_in_batch), + txs_in_batch, + |bencher, &txs_in_batch| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|| { + let mut vm = BenchmarkingVm::::default(); + bench_vm::<_, true>(&mut vm, &txs[..txs_in_batch], expected_failures); + }); + } else { + bencher.iter_batched( + BenchmarkingVm::::default, + |mut vm| { + bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); + vm + }, + BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one + ); + } + }, + ); + } +} +fn run_vm( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + txs: &[Transaction], +) { + run_vm_expecting_failures::(group, name, txs, &[]); +} + +fn bench_fill_bootloader(c: &mut Criterion) { + let is_test_mode = !std::env::args().any(|arg| arg == "--bench"); + let txs_in_batch = if is_test_mode { + &TXS_IN_BATCH[..3] // Reduce the number of transactions in a batch so that tests don't take long + } else { + TXS_IN_BATCH + }; + + let mut group = c.benchmark_group(if FULL { + format!("fill_bootloader_full{}", VM::LABEL.as_suffix()) + } else { + format!("fill_bootloader{}", VM::LABEL.as_suffix()) + }); + group + .sample_size(10) + .measurement_time(Duration::from_secs(10)); + + // Deploying simple contract + let test_contract = + std::fs::read("deployment_benchmarks/deploy_simple_contract").expect("failed to read file"); let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx_with_gas_limit(code, 1000); + let max_txs = *txs_in_batch.last().unwrap() as u32; + let txs: Vec<_> = (0..max_txs) + .map(|nonce| get_deploy_tx_with_gas_limit(code, DEPLOY_GAS_LIMIT, nonce)) + .collect(); + run_vm::(&mut group, "deploy_simple_contract", &txs); + drop(txs); + + // Load test with various parameters + let txs = + (1..=max_txs).map(|nonce| get_load_test_tx(nonce, 10_000_000, LoadTestParams::default())); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test", &txs); + drop(txs); - let start = Instant::now(); + let txs = (1..=max_txs).map(get_realistic_load_test_tx); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test_realistic", &txs); + drop(txs); - let mut vm = BenchmarkingVm::new(); - for _ in 0..1000 { - vm.run_transaction(black_box(&tx)); + let txs = (1..=max_txs).map(get_heavy_load_test_tx); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test_heavy", &txs); + drop(txs); + + // Base token transfers + let txs: Vec<_> = (0..max_txs).map(get_transfer_tx).collect(); + run_vm::(&mut group, "transfer", &txs); + + // Halted transactions produced by the following benchmarks *must* be rolled back, + // otherwise the bootloader will process following transactions incorrectly. + if !FULL { + return; } - println!("{:?}", start.elapsed()); + let mut rng = StdRng::seed_from_u64(RNG_SEED); + + let mut txs_with_failures = Vec::with_capacity(txs.len()); + let mut expected_failures = Vec::with_capacity(txs.len()); + txs_with_failures.push(txs[0].clone()); + expected_failures.push(false); + let mut successful_txs = &txs[1..]; + for _ in 1..txs.len() { + let (tx, should_fail) = if rng.gen_bool(TX_FAILURE_PROBABILITY) { + // Since we add the transaction with nonce 0 unconditionally as the first tx to execute, + // all transactions generated here should halt during validation. + (get_transfer_tx(0), true) + } else { + let (tx, remaining_txs) = successful_txs.split_first().unwrap(); + successful_txs = remaining_txs; + (tx.clone(), false) + }; + txs_with_failures.push(tx); + expected_failures.push(should_fail); + } + run_vm_expecting_failures::( + &mut group, + "transfer_with_invalid_nonce", + &txs_with_failures, + &expected_failures, + ); } + +criterion_group!( + benches, + bench_fill_bootloader::, + bench_fill_bootloader::, + bench_fill_bootloader:: +); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index f0ba43f2685..2837a2345a5 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,33 +1,40 @@ use iai::black_box; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, + Legacy, +}; -fn run_bytecode(path: &str) { +fn run_bytecode(path: &str) { let test_contract = std::fs::read(path).expect("failed to read file"); let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); let tx = get_deploy_tx(code); - black_box(BenchmarkingVm::new().run_transaction(&tx)); + black_box(BenchmarkingVm::::default().run_transaction(&tx)); } macro_rules! make_functions_and_main { - ($($file:ident,)+) => { + ($($file:ident => $legacy_name:ident,)+) => { $( - fn $file() { - run_bytecode(concat!("deployment_benchmarks/", stringify!($file))) - } + fn $file() { + run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + } + + fn $legacy_name() { + run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + } )+ - iai::main!($($file,)+); + iai::main!($($file, $legacy_name,)+); }; } make_functions_and_main!( - access_memory, - call_far, - decode_shl_sub, - deploy_simple_contract, - finish_eventful_frames, - write_and_decode, - event_spam, - slot_hash_collision, + access_memory => access_memory_legacy, + call_far => call_far_legacy, + decode_shl_sub => decode_shl_sub_legacy, + deploy_simple_contract => deploy_simple_contract_legacy, + finish_eventful_frames => finish_eventful_frames_legacy, + write_and_decode => write_and_decode_legacy, + event_spam => event_spam_legacy, + slot_hash_collision => slot_hash_collision_legacy, ); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml index acd5f37cbc7..a24d3fa1294 100644 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ b/core/tests/vm-benchmark/harness/Cargo.toml @@ -14,3 +14,6 @@ zksync_system_constants.workspace = true zksync_contracts.workspace = true zk_evm.workspace = true once_cell.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/harness/src/instruction_counter.rs index 28e6d151965..48b1e3527ad 100644 --- a/core/tests/vm-benchmark/harness/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/harness/src/instruction_counter.rs @@ -13,7 +13,7 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { - #[allow(dead_code)] // FIXME + #[allow(dead_code)] // FIXME: re-enable instruction counting once new tracers are merged pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index f206728d40b..6460d25a8e8 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -1,15 +1,17 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; -use zksync_contracts::{deployer_contract, BaseSystemContracts}; +pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_contracts::{deployer_contract, BaseSystemContracts, TestContract}; use zksync_multivm::{ interface::{ - storage::InMemoryStorage, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, + storage::{InMemoryStorage, StorageView}, + ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::get_max_gas_per_pubdata_byte, - vm_fast::Vm, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + vm_fast, vm_latest, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, }; use zksync_types::{ block::L2BlockHasher, @@ -18,7 +20,7 @@ use zksync_types::{ fee_model::BatchFeeInput, helpers::unix_timestamp_ms, l2::L2Tx, - utils::storage_key_for_eth_balance, + utils::{deployed_address_create, storage_key_for_eth_balance}, Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; @@ -40,18 +42,24 @@ pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { Some(&bytes[..32 * words]) } +const LOAD_TEST_MAX_READS: usize = 100; + +static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= + Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); + static STORAGE: Lazy = Lazy::new(|| { let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); - // Give `PRIVATE_KEY` some money + let balance = U256::from(10u32).pow(U256::from(32)); //10^32 wei let key = storage_key_for_eth_balance(&PRIVATE_KEY.address()); - storage.set_value(key, zksync_utils::u256_to_h256(U256([0, 0, 1, 0]))); - + storage.set_value(key, zksync_utils::u256_to_h256(balance)); storage }); static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); +static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); + static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { deployer_contract() .function("create") @@ -62,15 +70,92 @@ static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { static PRIVATE_KEY: Lazy = Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); -pub struct BenchmarkingVm(Vm<&'static InMemoryStorage>); +/// VM label used to name `criterion` benchmarks. +#[derive(Debug, Clone, Copy)] +pub enum VmLabel { + Fast, + Legacy, +} -impl BenchmarkingVm { - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - let timestamp = unix_timestamp_ms(); +impl VmLabel { + /// Non-empty name for `criterion` benchmark naming. + pub const fn as_str(self) -> &'static str { + match self { + Self::Fast => "fast", + Self::Legacy => "legacy", + } + } + + /// Optional prefix for `criterion` benchmark naming (including a starting `/`). + pub const fn as_suffix(self) -> &'static str { + match self { + Self::Fast => "", + Self::Legacy => "/legacy", + } + } +} + +/// Factory for VMs used in benchmarking. +pub trait BenchmarkingVmFactory { + /// VM label used to name `criterion` benchmarks. + const LABEL: VmLabel; + + /// Type of the VM instance created by this factory. + type Instance: VmInterfaceHistoryEnabled; + + /// Creates a VM instance. + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance; +} - Self(Vm::new( - zksync_multivm::interface::L1BatchEnv { +/// Factory for the new / fast VM. +#[derive(Debug)] +pub struct Fast(()); + +impl BenchmarkingVmFactory for Fast { + const LABEL: VmLabel = VmLabel::Fast; + + type Instance = vm_fast::Vm<&'static InMemoryStorage>; + + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance { + vm_fast::Vm::new(batch_env, system_env, storage) + } +} + +/// Factory for the legacy VM (latest version). +#[derive(Debug)] +pub struct Legacy; + +impl BenchmarkingVmFactory for Legacy { + const LABEL: VmLabel = VmLabel::Legacy; + + type Instance = vm_latest::Vm, HistoryEnabled>; + + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance { + let storage = StorageView::new(storage).to_rc_ptr(); + vm_latest::Vm::new(batch_env, system_env, storage) + } +} + +#[derive(Debug)] +pub struct BenchmarkingVm(VM::Instance); + +impl Default for BenchmarkingVm { + fn default() -> Self { + let timestamp = unix_timestamp_ms(); + Self(VM::create( + L1BatchEnv { previous_batch_hash: None, number: L1BatchNumber(1), timestamp, @@ -87,7 +172,7 @@ impl BenchmarkingVm { max_virtual_blocks_to_create: 100, }, }, - zksync_multivm::interface::SystemEnv { + SystemEnv { zk_porter_available: false, version: ProtocolVersionId::latest(), base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), @@ -96,33 +181,63 @@ impl BenchmarkingVm { default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, - &*STORAGE, + &STORAGE, )) } +} +impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); self.0.execute(VmExecutionMode::OneTx) } + pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { + self.0.make_snapshot(); + let (compression_result, tx_result) = self.0.inspect_transaction_with_bytecode_compression( + Default::default(), + tx.clone(), + true, + ); + compression_result.expect("compressing bytecodes failed"); + + if matches!(tx_result.result, ExecutionResult::Halt { .. }) { + self.0.rollback_to_the_latest_snapshot(); + } else { + self.0.pop_snapshot_no_rollback(); + } + tx_result + } + pub fn instruction_count(&mut self, tx: &Transaction) -> usize { self.0.push_transaction(tx.clone()); - let count = Rc::new(RefCell::new(0)); + self.0.inspect(Default::default(), VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged + count.take() + } +} - self.0.inspect((), VmExecutionMode::OneTx); +impl BenchmarkingVm { + pub fn new() -> Self { + Self::default() + } +} - count.take() +impl BenchmarkingVm { + pub fn legacy() -> Self { + Self::default() } } pub fn get_deploy_tx(code: &[u8]) -> Transaction { - get_deploy_tx_with_gas_limit(code, 30_000_000) + get_deploy_tx_with_gas_limit(code, 30_000_000, 0) } -pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction { +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { + let mut salt = vec![0_u8; 32]; + salt[28..32].copy_from_slice(&nonce.to_be_bytes()); let params = [ - Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(salt), Token::FixedBytes(hash_bytecode(code).0.to_vec()), Token::Bytes([].to_vec()), ]; @@ -135,15 +250,8 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction let mut signed = L2Tx::new_signed( CONTRACT_DEPLOYER_ADDRESS, calldata, - Nonce(0), - Fee { - gas_limit: U256::from(gas_limit), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(0), - gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( - ProtocolVersionId::latest().into(), - )), - }, + Nonce(nonce), + tx_fee(gas_limit), U256::zero(), L2ChainId::from(270), &PRIVATE_KEY, @@ -153,13 +261,144 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction .expect("should create a signed execute transaction"); signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} +pub fn get_transfer_tx(nonce: u32) -> Transaction { + let mut signed = L2Tx::new_signed( + PRIVATE_KEY.address(), + vec![], // calldata + Nonce(nonce), + tx_fee(1_000_000), + 1_000_000_000.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); signed.into() } +pub fn get_load_test_deploy_tx() -> Transaction { + let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; + let params = [ + Token::FixedBytes(vec![0_u8; 32]), + Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), + Token::Bytes(encode(&calldata)), + ]; + let create_calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); + factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + create_calldata, + Nonce(0), + tx_fee(100_000_000), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + factory_deps, + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { + assert!( + params.reads <= LOAD_TEST_MAX_READS, + "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" + ); + + let execute_function = LOAD_TEST_CONTRACT + .contract + .function("execute") + .expect("no `execute` function in load test contract"); + let calldata = execute_function + .encode_input(&vec![ + Token::Uint(U256::from(params.reads)), + Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.hashes)), + Token::Uint(U256::from(params.events)), + Token::Uint(U256::from(params.recursive_calls)), + Token::Uint(U256::from(params.deploys)), + ]) + .expect("cannot encode `execute` inputs"); + + let mut signed = L2Tx::new_signed( + *LOAD_TEST_CONTRACT_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + LOAD_TEST_CONTRACT.factory_deps.clone(), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 30, + writes: 2, + events: 5, + hashes: 10, + recursive_calls: 0, + deploys: 0, + }, + ) +} + +pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 100, + writes: 5, + events: 20, + hashes: 100, + recursive_calls: 20, + deploys: 5, + }, + ) +} + #[cfg(test)] mod tests { + use assert_matches::assert_matches; use zksync_contracts::read_bytecode; + use zksync_multivm::interface::ExecutionResult; use crate::*; @@ -171,9 +410,44 @@ mod tests { let mut vm = BenchmarkingVm::new(); let res = vm.run_transaction(&get_deploy_tx(&test_contract)); - assert!(matches!( - res.result, - zksync_multivm::interface::ExecutionResult::Success { .. } - )); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_transfer() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_transfer_tx(0)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let params = LoadTestParams::default(); + let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test_with_realistic_txs() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let res = vm.run_transaction(&get_realistic_load_test_tx(1)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test_with_heavy_txs() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let res = vm.run_transaction(&get_heavy_load_test_tx(1)); + assert_matches!(res.result, ExecutionResult::Success { .. }); } } diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile index e0aa07cfa7c..da098df91d5 100644 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ b/docs/guides/external-node/building-from-scratch/Dockerfile @@ -21,6 +21,8 @@ RUN cp target/release/zksync_external_node /usr/bin # build contracts RUN git submodule update --init --recursive RUN zk run yarn +RUN zk compiler all || true +RUN rm /root/.cache/hardhat-nodejs/compilers-v2/linux-amd64/solc-*.does.not.work || true RUN zk compiler all RUN zk contract build RUN zk f yarn run l2-contracts build diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index a3e823b260a..369ce50be0b 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -50,7 +50,7 @@ services: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.6.0" + image: "matterlabs/external-node:2.0-v24.16.0" depends_on: postgres: condition: service_healthy diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index e7ebaafb3c4..1417c6cc360 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -50,7 +50,7 @@ services: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.6.0" + image: "matterlabs/external-node:2.0-v24.16.0" depends_on: postgres: condition: service_healthy diff --git a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml index 6d61ef3963e..be37aaf2932 100644 --- a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml @@ -8,3 +8,7 @@ gossip_static_outbound: addr: 'external-node-consensus-mainnet.zksync.dev:3054' - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' + - key: 'node:public:ed25519:45d23515008b5121484eb774507df63ff4ce9f4b65e6a03b7c9ec4e0474d3044' + addr: 'consensus-mainnet-1.zksync-nodes.com:3054' + - key: 'node:public:ed25519:c278bb0831e8d0dcd3aaf0b7af7c3dca048d50b28c578ceecce61a412986b883' + addr: 'consensus-mainnet-2.zksync-nodes.com:3054' diff --git a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml index 25461b5dfc4..8d2551c0708 100644 --- a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml @@ -8,3 +8,7 @@ gossip_static_outbound: addr: 'external-node-consensus-sepolia.zksync.dev:3054' - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' + - key: 'node:public:ed25519:f48616db5965ada49dcbd51b1de11068a27c9886c900d3522607f16dff2e66fc' + addr: 'consensus-sepolia-1.zksync-nodes.com:3054' + - key: 'node:public:ed25519:3789d49293792755a9c1c2a7ed9b0e210e92994606dcf76388b5635d7ed676cb' + addr: 'consensus-sepolia-2.zksync-nodes.com:3054' diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index ab2b12c48db..303c91b137c 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -41,7 +41,7 @@ nixsgxLib.mkSGXContainer { sgx = { edmm_enable = false; - enclave_size = "32G"; + enclave_size = "8G"; max_threads = 128; }; }; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index bb9ecfe2707..453cc96cd8b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7828,6 +7828,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_interface", ] [[package]] @@ -8344,7 +8345,6 @@ dependencies = [ "zksync_contracts", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index e0141e38b09..93393f8a59c 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -9,7 +9,7 @@ use ethers::{ types::{Address, TransactionRequest, H256}, }; -use crate::wallets::Wallet; +use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( private_key: H256, @@ -79,15 +79,26 @@ pub async fn mint_token( let contract = TokenContract::new(token_address, client); // contract for address in addresses { - contract - .mint(address, amount.into()) - .send() - .await? - // It's safe to set such low number of confirmations and low interval for localhost - .confirmations(1) - .interval(Duration::from_millis(30)) - .await?; + if let Err(err) = mint(&contract, address, amount).await { + logger::warn(format!("Failed to mint {err}")) + } } Ok(()) } + +async fn mint( + contract: &TokenContract, + address: Address, + amount: u128, +) -> anyhow::Result<()> { + contract + .mint(address, amount.into()) + .send() + .await? + // It's safe to set such low number of confirmations and low interval for localhost + .confirmations(1) + .interval(Duration::from_millis(30)) + .await?; + Ok(()) +}