From f3cb39e7010a3d4b7ae4ad23271ea7e0f7ec3fbc Mon Sep 17 00:00:00 2001 From: Rami Khalil Date: Wed, 24 Jan 2024 17:25:27 +0200 Subject: [PATCH] build on derive --- guests/eth-block/src/main.rs | 2 +- guests/op-block/src/main.rs | 4 +- guests/op-compose/Cargo.lock | 13 +- guests/op-compose/Cargo.toml | 3 +- guests/op-derive/src/main.rs | 2 +- host/src/lib.rs | 12 +- host/src/operations/chains.rs | 3 +- host/src/operations/info.rs | 7 +- host/src/operations/rollups.rs | 145 +++++++------ lib/src/builder/execute/optimism.rs | 11 +- lib/src/host/mod.rs | 39 ++++ lib/src/host/preflight.rs | 47 +++-- lib/src/host/provider/file_provider.rs | 2 +- lib/src/host/provider_db.rs | 6 +- lib/src/host/verify.rs | 2 +- lib/src/input.rs | 2 +- lib/src/optimism/composition.rs | 2 +- lib/src/optimism/mod.rs | 272 ++++++++++++++++--------- primitives/src/transactions/mod.rs | 16 +- primitives/src/trie/mpt.rs | 9 + testing/ef-tests/src/lib.rs | 4 +- testing/ef-tests/testguest/src/main.rs | 4 +- testing/ef-tests/tests/evm.rs | 2 +- 23 files changed, 383 insertions(+), 226 deletions(-) diff --git a/guests/eth-block/src/main.rs b/guests/eth-block/src/main.rs index 8df2b7a7..583fd1a5 100644 --- a/guests/eth-block/src/main.rs +++ b/guests/eth-block/src/main.rs @@ -30,7 +30,7 @@ pub fn main() { let mut output = EthereumStrategy::build_from(Ð_MAINNET_CHAIN_SPEC, input) .expect("Failed to build the resulting block"); // Abridge successful construction results - if let BlockBuildOutput::SUCCESS { new_block_hash, new_block_head, new_block_state } = &mut output { + if let BlockBuildOutput::SUCCESS { new_block_head, new_block_state, .. } = &mut output { let trie_root = core::mem::replace(new_block_state, new_block_head.state_root.into()); // Leak memory, save cycles core::mem::forget(trie_root); diff --git a/guests/op-block/src/main.rs b/guests/op-block/src/main.rs index 623b2d22..00f5b479 100644 --- a/guests/op-block/src/main.rs +++ b/guests/op-block/src/main.rs @@ -27,10 +27,10 @@ pub fn main() { // Read the input previous block and transaction data let input = env::read(); // Build the resulting block - let output = OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, input) + let mut output = OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, input) .expect("Failed to build the resulting block"); // Abridge successful construction results - if let BlockBuildOutput::SUCCESS { new_block_hash, new_block_head, new_block_state } = &mut output { + if let BlockBuildOutput::SUCCESS { new_block_head, new_block_state, .. } = &mut output { let trie_root = core::mem::replace(new_block_state, new_block_head.state_root.into()); // Leak memory, save cycles core::mem::forget(trie_root); diff --git a/guests/op-compose/Cargo.lock b/guests/op-compose/Cargo.lock index 2ce4a0b5..93ec6a29 100644 --- a/guests/op-compose/Cargo.lock +++ b/guests/op-compose/Cargo.lock @@ -1679,8 +1679,7 @@ dependencies = [ [[package]] name = "k256" version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +source = "git+https://github.com/risc0/RustCrypto-elliptic-curves?tag=k256/v0.13.3-risczero.0#d4f457a04410397cbb652a67c168b6cd6e9757c4" dependencies = [ "cfg-if", "ecdsa", @@ -2272,8 +2271,9 @@ dependencies = [ [[package]] name = "radium" -version = "0.7.1" -source = "git+https://github.com/ferrilab/radium.git?rev=723bed5#723bed5abd75994ee4b7221b8b12c9f4e77ce408" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" @@ -3929,8 +3929,3 @@ dependencies = [ "sha3", "thiserror", ] - -[[patch.unused]] -name = "k256" -version = "0.13.1" -source = "git+https://github.com/risc0/RustCrypto-elliptic-curves?tag=k256/v0.13.1-risc0#44b1fc2b317e76bb150636cf67d0fbdfcac39601" diff --git a/guests/op-compose/Cargo.toml b/guests/op-compose/Cargo.toml index e379e519..735f6c4d 100644 --- a/guests/op-compose/Cargo.toml +++ b/guests/op-compose/Cargo.toml @@ -11,10 +11,9 @@ zeth-lib = { path = "../../lib", default-features = false } zeth-primitives = { path = "../../primitives", default-features = false } [patch.crates-io] -radium = { git = 'https://github.com/ferrilab/radium.git', rev = "723bed5" } # use optimized risc0 circuit crypto-bigint = { git = "https://github.com/risc0/RustCrypto-crypto-bigint", tag = "v0.5.2-risc0" } -k256 = { git = "https://github.com/risc0/RustCrypto-elliptic-curves", tag = "k256/v0.13.1-risc0" } +k256 = { git = "https://github.com/risc0/RustCrypto-elliptic-curves", tag = "k256/v0.13.3-risczero.0" } sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2/v0.10.6-risc0" } [profile.release] diff --git a/guests/op-derive/src/main.rs b/guests/op-derive/src/main.rs index 4f7c34db..5cca38a9 100644 --- a/guests/op-derive/src/main.rs +++ b/guests/op-derive/src/main.rs @@ -23,7 +23,7 @@ risc0_zkvm::guest::entry!(main); pub fn main() { let derive_input: DeriveInput = env::read(); - let mut derive_machine = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, derive_input) + let mut derive_machine = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, derive_input, None) .expect("Could not create derive machine"); let output = derive_machine .derive() diff --git a/host/src/lib.rs b/host/src/lib.rs index 572239b2..f95bae93 100644 --- a/host/src/lib.rs +++ b/host/src/lib.rs @@ -12,23 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{ - fs, - path::{Path, PathBuf}, -}; +use std::fs; use risc0_zkvm::Receipt; pub mod cli; pub mod operations; -pub fn cache_file_path(cache_path: &Path, network: &str, block_no: u64, ext: &str) -> PathBuf { - cache_path - .join(network) - .join(block_no.to_string()) - .with_extension(ext) -} - pub fn save_receipt(file_reference: &String, receipt: &Receipt, index: Option<&mut usize>) { let receipt_serialized = bincode::serialize(receipt).expect("Failed to serialize receipt!"); let path = if let Some(number) = index { diff --git a/host/src/operations/chains.rs b/host/src/operations/chains.rs index 239d3467..f8c25e3d 100644 --- a/host/src/operations/chains.rs +++ b/host/src/operations/chains.rs @@ -21,13 +21,12 @@ use serde::{Deserialize, Serialize}; use zeth_lib::{ builder::BlockBuilderStrategy, consts::ChainSpec, - host::{preflight::Preflight, verify::Verifier}, + host::{cache_file_path, preflight::Preflight, verify::Verifier}, input::BlockBuildInput, output::BlockBuildOutput, }; use crate::{ - cache_file_path, cli::Cli, operations::{execute, maybe_prove}, }; diff --git a/host/src/operations/info.rs b/host/src/operations/info.rs index 38b3cb03..e70717dd 100644 --- a/host/src/operations/info.rs +++ b/host/src/operations/info.rs @@ -16,11 +16,14 @@ use alloy_sol_types::SolInterface; use log::warn; use zeth_lib::{ consts::Network, - host::provider::{new_provider, BlockQuery}, + host::{ + cache_file_path, + provider::{new_provider, BlockQuery}, + }, optimism::OpSystemInfo, }; -use crate::{cache_file_path, cli::Cli}; +use crate::cli::Cli; pub async fn op_info(cli: Cli) -> anyhow::Result<()> { let core_args = cli.core_args().clone(); diff --git a/host/src/operations/rollups.rs b/host/src/operations/rollups.rs index 95fc37f3..26309039 100644 --- a/host/src/operations/rollups.rs +++ b/host/src/operations/rollups.rs @@ -18,10 +18,9 @@ use anyhow::Context; use log::info; use zeth_guests::*; use zeth_lib::{ - builder::OptimismStrategy, + builder::{BlockBuilderStrategy, OptimismStrategy}, consts::{Network, OP_MAINNET_CHAIN_SPEC}, - host::{preflight::Preflight, rpc_db::RpcDb}, - input::BlockBuildInput, + host::{rpc_db::RpcDb, ProviderFactory}, optimism::{ batcher_db::BatcherDb, composition::{ComposeInput, ComposeInputOperation, ComposeOutputOperation}, @@ -31,61 +30,22 @@ use zeth_lib::{ }; use zeth_primitives::{ block::Header, - transactions::optimism::OptimismTxEssence, tree::{MerkleMountainRange, MerkleProof}, }; use crate::{ - cache_file_path, - cli::{Cli, CoreArgs}, + cli::Cli, operations::{execute, maybe_prove}, }; -async fn fetch_op_blocks( - core_args: &CoreArgs, - block_number: u64, - block_count: u64, -) -> anyhow::Result>> { - let mut op_blocks = vec![]; - for i in 0..block_count { - let block_number = block_number + i; - let rpc_cache = core_args.cache.as_ref().map(|dir| { - cache_file_path(dir, &Network::Optimism.to_string(), block_number, "json.gz") - }); - let rpc_url = core_args.op_rpc_url.clone(); - // Collect block building data - let preflight_result = tokio::task::spawn_blocking(move || { - OptimismStrategy::run_preflight( - OP_MAINNET_CHAIN_SPEC.clone(), - rpc_cache, - rpc_url, - block_number, - ) - }) - .await? - .context("preflight failed")?; - - // Create the guest input from [Init] - let input = preflight_result - .clone() - .try_into() - .context("invalid preflight data")?; - - op_blocks.push(input); - } - - Ok(op_blocks) -} - pub async fn derive_rollup_blocks(cli: Cli, file_reference: &String) -> anyhow::Result<()> { info!("Fetching data ..."); let core_args = cli.core_args().clone(); - let op_blocks = fetch_op_blocks( - &core_args, - core_args.block_number + 1, - core_args.block_count, - ) - .await?; + let op_builder_provider_factory = ProviderFactory::new( + core_args.cache.clone(), + Network::Optimism, + core_args.op_rpc_url.clone(), + ); let (derive_input, output) = tokio::task::spawn_blocking(move || { let derive_input = DeriveInput { @@ -96,16 +56,31 @@ pub async fn derive_rollup_blocks(cli: Cli, file_reference: &String) -> anyhow:: ), op_head_block_no: core_args.block_number, op_derive_block_count: core_args.block_count, - op_blocks: op_blocks.clone(), + op_block_outputs: vec![], + builder_image_id: OP_BLOCK_ID, }; - let mut derive_machine = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, derive_input) - .context("Could not create derive machine")?; + let mut derive_machine = DeriveMachine::new( + &OPTIMISM_CHAIN_SPEC, + derive_input, + Some(op_builder_provider_factory), + ) + .context("Could not create derive machine")?; let derive_output = derive_machine.derive().context("could not derive")?; + let op_block_outputs = derive_output + .op_block_inputs + .clone() + .into_iter() + .map(|input| { + OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, input) + .expect("Failed to build op block") + }) + .collect(); let derive_input_mem = DeriveInput { db: derive_machine.derive_input.db.get_mem_db(), op_head_block_no: core_args.block_number, op_derive_block_count: core_args.block_count, - op_blocks, + op_block_outputs, + builder_image_id: OP_BLOCK_ID, }; let out: anyhow::Result<_> = Ok((derive_input_mem, derive_output)); out @@ -115,10 +90,21 @@ pub async fn derive_rollup_blocks(cli: Cli, file_reference: &String) -> anyhow:: info!("Running from memory ..."); { - let output_mem = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, derive_input.clone()) - .context("Could not create derive machine")? - .derive() - .unwrap(); + // todo: run without factory (using outputs) + let core_args = cli.core_args().clone(); + let op_builder_provider_factory = ProviderFactory::new( + core_args.cache.clone(), + Network::Optimism, + core_args.op_rpc_url.clone(), + ); + let output_mem = DeriveMachine::new( + &OPTIMISM_CHAIN_SPEC, + derive_input.clone(), + Some(op_builder_provider_factory), + ) + .context("Could not create derive machine")? + .derive() + .unwrap(); assert_eq!(output, output_mem); } @@ -280,18 +266,26 @@ pub async fn compose_derived_rollup_blocks( core_args.op_rpc_url.clone(), core_args.cache.clone(), ); - let op_head_block_no = core_args.block_number + op_block_index; - let op_blocks = fetch_op_blocks(&core_args, op_head_block_no + 1, composition_size).await?; + let op_builder_provider_factory = ProviderFactory::new( + core_args.cache.clone(), + Network::Optimism, + core_args.op_rpc_url.clone(), + ); let (input, output, chain) = tokio::task::spawn_blocking(move || { let derive_input = DeriveInput { db, op_head_block_no: core_args.block_number + op_block_index, op_derive_block_count: composition_size, - op_blocks: op_blocks.clone(), + op_block_outputs: vec![], + builder_image_id: OP_BLOCK_ID, }; - let mut derive_machine = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, derive_input) - .expect("Could not create derive machine"); + let mut derive_machine = DeriveMachine::new( + &OPTIMISM_CHAIN_SPEC, + derive_input, + Some(op_builder_provider_factory), + ) + .expect("Could not create derive machine"); let eth_head_no = derive_machine.op_batcher.state.epoch.number; let eth_head = derive_machine .derive_input @@ -322,11 +316,21 @@ pub async fn compose_derived_rollup_blocks( } eth_chain.push(eth_tail); + let op_block_outputs = derive_output + .op_block_inputs + .clone() + .into_iter() + .map(|input| { + OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, input) + .expect("Failed to build op block") + }) + .collect(); let derive_input_mem = DeriveInput { db: derive_machine.derive_input.db.get_mem_db(), op_head_block_no: core_args.block_number + op_block_index, op_derive_block_count: composition_size, - op_blocks, + op_block_outputs, + builder_image_id: OP_BLOCK_ID, }; let out: anyhow::Result<_> = Ok((derive_input_mem, derive_output, eth_chain)); out @@ -335,10 +339,19 @@ pub async fn compose_derived_rollup_blocks( info!("Deriving ..."); { - let output_mem = DeriveMachine::new(&OPTIMISM_CHAIN_SPEC, input.clone()) - .expect("Could not create derive machine") - .derive() - .context("could not derive")?; + let op_builder_provider_factory = ProviderFactory::new( + core_args.cache.clone(), + Network::Optimism, + core_args.op_rpc_url.clone(), + ); + let output_mem = DeriveMachine::new( + &OPTIMISM_CHAIN_SPEC, + input.clone(), + Some(op_builder_provider_factory), + ) + .expect("Could not create derive machine") + .derive() + .context("could not derive")?; assert_eq!(output, output_mem); } diff --git a/lib/src/builder/execute/optimism.rs b/lib/src/builder/execute/optimism.rs index da6c31df..18ff546a 100644 --- a/lib/src/builder/execute/optimism.rs +++ b/lib/src/builder/execute/optimism.rs @@ -117,20 +117,19 @@ impl TxExecStrategy for OpTxExecStrategy { .into_iter() .enumerate() { - // verify the transaction signature - let tx_from = tx - .recover_from() - .with_context(|| format!("Error recovering address for transaction {}", tx_no))?; - #[cfg(not(target_os = "zkvm"))] { let tx_hash = tx.hash(); debug!("Tx no. {} (hash: {})", tx_no, tx_hash); debug!(" Type: {}", tx.essence.tx_type()); - debug!(" Fr: {:?}", tx_from); debug!(" To: {:?}", tx.essence.to().unwrap_or_default()); } + // verify the transaction signature + let tx_from = tx + .recover_from() + .with_context(|| format!("Error recovering address for transaction {}", tx_no))?; + // verify transaction gas let block_available_gas = block_builder.input.gas_limit - cumulative_gas_used; if block_available_gas < tx.essence.gas_limit() { diff --git a/lib/src/host/mod.rs b/lib/src/host/mod.rs index c75d65d3..5208be3f 100644 --- a/lib/src/host/mod.rs +++ b/lib/src/host/mod.rs @@ -12,9 +12,48 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::path::{Path, PathBuf}; + +use crate::{ + consts::Network, + host::provider::{new_provider, Provider}, +}; + pub mod mpt; pub mod preflight; pub mod provider; pub mod provider_db; pub mod rpc_db; pub mod verify; + +pub fn cache_file_path(cache_path: &Path, network: &str, block_no: u64, ext: &str) -> PathBuf { + cache_path + .join(network) + .join(block_no.to_string()) + .with_extension(ext) +} + +#[derive(Clone)] +pub struct ProviderFactory { + pub dir: Option, + pub network: Network, + pub rpc_url: Option, +} + +impl ProviderFactory { + pub fn new(dir: Option, network: Network, rpc_url: Option) -> Self { + Self { + dir, + network, + rpc_url, + } + } + + pub fn create_provider(&self, block_number: u64) -> anyhow::Result> { + let rpc_cache = self + .dir + .as_ref() + .map(|dir| cache_file_path(dir, &self.network.to_string(), block_number, "json.gz")); + new_provider(rpc_cache, self.rpc_url.clone()) + } +} diff --git a/lib/src/host/preflight.rs b/lib/src/host/preflight.rs index 0446f1d6..e8c75bab 100644 --- a/lib/src/host/preflight.rs +++ b/lib/src/host/preflight.rs @@ -44,7 +44,7 @@ use crate::{ /// The initial data required to build a block as returned by the [Preflight]. #[derive(Debug, Clone)] -pub struct Data { +pub struct BlockBuildPreflightData { pub db: MemDb, pub parent_header: Header, pub parent_proofs: HashMap, @@ -63,7 +63,13 @@ pub trait Preflight { cache_path: Option, rpc_url: Option, block_no: u64, - ) -> Result>; + ) -> Result>; + + fn preflight_input_with_provider_db( + chain_spec: ChainSpec, + provider_db: ProviderDb, + input: BlockBuildInput, + ) -> Result>; } /// Implements the [Preflight] trait for all compatible [BlockBuilderStrategy]s. @@ -77,7 +83,7 @@ where cache_path: Option, rpc_url: Option, block_no: u64, - ) -> Result> { + ) -> Result> { let mut provider = new_provider(cache_path, rpc_url)?; // Fetch the parent block @@ -107,10 +113,26 @@ where // Create the input data let input = new_preflight_input(block.clone(), parent_header.clone())?; + + // Create the block builder, run the transactions and extract the DB + Self::preflight_input_with_provider_db(chain_spec, provider_db, input).map( + move |mut headerless_preflight_data| { + headerless_preflight_data.header = block.try_into().expect("invalid block"); + headerless_preflight_data + }, + ) + } + + fn preflight_input_with_provider_db( + chain_spec: ChainSpec, + provider_db: ProviderDb, + input: BlockBuildInput, + ) -> Result> { + let parent_header = input.parent_header.clone(); let transactions = input.transactions.clone(); let withdrawals = input.withdrawals.clone(); - // Create the block builder, run the transactions and extract the DB + // todo: support erroneous preflight let mut builder = BlockBuilder::new(&chain_spec, input) .with_db(provider_db) .prepare_header::()? @@ -129,15 +151,16 @@ where info!("Saving provider cache ..."); // Save the provider cache - provider_db.get_provider().save()?; + provider_db.save_provider()?; info!("Provider-backed execution is Done!"); - Ok(Data { + // Fetch the target block + Ok(BlockBuildPreflightData { db: provider_db.get_initial_db().clone(), parent_header, parent_proofs, - header: block.try_into().context("invalid block")?, + header: Default::default(), transactions, withdrawals, proofs, @@ -177,6 +200,7 @@ where .collect::, _>>()?; let input = BlockBuildInput { + parent_header, beneficiary: from_ethers_h160(block.author.context("author missing")?), gas_limit: from_ethers_u256(block.gas_limit), timestamp: from_ethers_u256(block.timestamp), @@ -187,18 +211,17 @@ where parent_state_trie: Default::default(), parent_storage: Default::default(), contracts: Default::default(), - parent_header, ancestor_headers: Default::default(), }; Ok(input) } -/// Converts the [Data] returned by the [Preflight] into [BlockBuildInput] required by the -/// [BlockBuilder]. -impl TryFrom> for BlockBuildInput { +/// Converts the [BlockBuildPreflightData] returned by the [Preflight] into +/// [BlockBuildInput] required by the [BlockBuilder]. +impl TryFrom> for BlockBuildInput { type Error = anyhow::Error; - fn try_from(data: Data) -> Result> { + fn try_from(data: BlockBuildPreflightData) -> Result> { // collect the code from each account let mut contracts = HashSet::new(); for account in data.db.accounts.values() { diff --git a/lib/src/host/provider/file_provider.rs b/lib/src/host/provider/file_provider.rs index 285bd2db..f67bff5c 100644 --- a/lib/src/host/provider/file_provider.rs +++ b/lib/src/host/provider/file_provider.rs @@ -29,7 +29,7 @@ use serde_with::serde_as; use super::{AccountQuery, BlockQuery, MutProvider, ProofQuery, Provider, StorageQuery}; #[serde_as] -#[derive(Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize)] pub struct FileProvider { #[serde(skip)] file_path: PathBuf, diff --git a/lib/src/host/provider_db.rs b/lib/src/host/provider_db.rs index 759547e8..6c487d87 100644 --- a/lib/src/host/provider_db.rs +++ b/lib/src/host/provider_db.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +extern crate alloc; + use std::collections::BTreeSet; use ethers_core::types::{EIP1186ProofResponse, H160, H256}; @@ -48,8 +50,8 @@ impl ProviderDb { } } - pub fn get_provider(&self) -> &dyn Provider { - self.provider.as_ref() + pub fn save_provider(&self) -> anyhow::Result<()> { + self.provider.save() } pub fn get_initial_db(&self) -> &MemDb { diff --git a/lib/src/host/verify.rs b/lib/src/host/verify.rs index 0afac794..c8a76a4c 100644 --- a/lib/src/host/verify.rs +++ b/lib/src/host/verify.rs @@ -63,7 +63,7 @@ pub trait Verifier { } /// Verify using the preflight data. -impl Verifier for preflight::Data { +impl Verifier for preflight::BlockBuildPreflightData { fn verify_block(&self, header: &Header, state: &MptNode) -> Result<()> { let errors = verify_state_trie(state, &self.proofs).context("failed to verify state trie")?; diff --git a/lib/src/input.rs b/lib/src/input.rs index d2ce769c..f360e263 100644 --- a/lib/src/input.rs +++ b/lib/src/input.rs @@ -30,7 +30,7 @@ use zeth_primitives::{ pub type StorageEntry = (MptNode, Vec); /// External block input. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[derive(Debug, Clone, Default, Eq, PartialEq, Deserialize, Serialize)] pub struct BlockBuildInput { /// Previous block header pub parent_header: Header, diff --git a/lib/src/optimism/composition.rs b/lib/src/optimism/composition.rs index eb470c8e..757d4310 100644 --- a/lib/src/optimism/composition.rs +++ b/lib/src/optimism/composition.rs @@ -25,7 +25,7 @@ use zeth_primitives::{ use crate::optimism::DeriveOutput; /// Denotes a zkVM Image ID. -type ImageId = [u32; 8]; +pub type ImageId = [u32; 8]; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ComposeInput { diff --git a/lib/src/optimism/mod.rs b/lib/src/optimism/mod.rs index 21d741f8..fffcdb95 100644 --- a/lib/src/optimism/mod.rs +++ b/lib/src/optimism/mod.rs @@ -18,6 +18,8 @@ use alloy_sol_types::{sol, SolInterface}; use anyhow::{bail, ensure, Context, Result}; #[cfg(not(target_os = "zkvm"))] use log::info; +#[cfg(target_os = "zkvm")] +use risc0_zkvm::{guest::env, serde::to_vec, sha::Digest}; use serde::{Deserialize, Serialize}; use zeth_primitives::{ batch::Batch, @@ -33,14 +35,22 @@ use zeth_primitives::{ uint, Address, BlockHash, BlockNumber, FixedBytes, RlpBytes, B256, U256, }; +#[cfg(not(target_os = "zkvm"))] +use crate::{ + builder::{BlockBuilderStrategy, OptimismStrategy}, + consts::OP_MAINNET_CHAIN_SPEC, + host::{preflight::Preflight, provider_db::ProviderDb, ProviderFactory}, +}; use crate::{ consts::ONE, input::BlockBuildInput, optimism::{ batcher::{Batcher, BlockId, L2BlockInfo}, batcher_db::BatcherDb, + composition::ImageId, config::ChainConfig, }, + output::BlockBuildOutput, }; pub mod batcher; @@ -83,7 +93,9 @@ pub struct DeriveInput { /// Block count for the operation. pub op_derive_block_count: u64, /// Block building data for execution - pub op_blocks: Vec>, + pub op_block_outputs: Vec, + /// Image id of block builder guest + pub builder_image_id: ImageId, } /// Represents the output of the derivation process. @@ -95,8 +107,15 @@ pub struct DeriveOutput { pub op_head: (BlockNumber, BlockHash), /// Derived Optimism blocks. pub derived_op_blocks: Vec<(BlockNumber, BlockHash)>, + /// Dependent op block inputs + pub op_block_inputs: Vec>, + /// Image id of block builder guest + pub builder_image_id: ImageId, } +#[cfg(target_os = "zkvm")] +type ProviderFactory = (); + /// Implementation of the actual derivation process. pub struct DeriveMachine { /// Input for the derivation process. @@ -104,12 +123,19 @@ pub struct DeriveMachine { op_head_block_header: Header, op_block_seq_no: u64, pub op_batcher: Batcher, + pub provider_factory: Option, } impl DeriveMachine { /// Creates a new instance of DeriveMachine. - pub fn new(chain_config: &ChainConfig, mut derive_input: DeriveInput) -> Result { + pub fn new( + chain_config: &ChainConfig, + mut derive_input: DeriveInput, + provider_factory: Option, + ) -> Result { derive_input.db.validate()?; + #[cfg(not(target_os = "zkvm"))] + ensure!(provider_factory.is_some(), "Missing provider factory!"); // read system config from op_head (seq_no/epoch_no..etc) let op_head = derive_input @@ -186,6 +212,7 @@ impl DeriveMachine { op_head_block_header: op_head.block_header, op_block_seq_no, op_batcher, + provider_factory, }) } @@ -200,6 +227,14 @@ impl DeriveMachine { let mut derived_op_blocks = Vec::new(); let mut process_next_eth_block = false; + #[cfg(target_os = "zkvm")] + let mut op_block_output_iter = + core::mem::take(&mut self.derive_input.op_block_outputs).into_iter(); + #[cfg(not(target_os = "zkvm"))] + let mut op_block_inputs = vec![]; + #[cfg(target_os = "zkvm")] + let op_block_inputs = vec![]; + while self.op_head_block_header.number < target_block_no { #[cfg(not(target_os = "zkvm"))] info!( @@ -225,7 +260,6 @@ impl DeriveMachine { // Process batches while let Some(op_batch) = self.op_batcher.read_batch()? { // Process the batch - // self.op_block_no += 1; #[cfg(not(target_os = "zkvm"))] info!( @@ -286,8 +320,8 @@ impl DeriveMachine { tx_trie.insert(&trie_key, tx)?; } - let _new_op_head_input = BlockBuildInput { - parent_header: Default::default(), + let new_op_head_input = BlockBuildInput { + parent_header: self.op_head_block_header.clone(), beneficiary: self.op_batcher.config.sequencer_fee_vault, gas_limit: self.op_batcher.config.system_config.gas_limit, timestamp: U256::from(op_batch.0.timestamp), @@ -295,115 +329,151 @@ impl DeriveMachine { mix_hash: l1_epoch_header_mix_hash, transactions: derived_transactions, withdrawals: vec![], - // TODO + // initializing these fields is not needed here parent_state_trie: Default::default(), parent_storage: Default::default(), contracts: vec![], ancestor_headers: vec![], }; - // in guest: ask for receipt about this (without RLP decoding) - // on host: go run the preflight and queue up the input data (using RLP decoded + // host: go run the preflight and queue up the input data (using RLP decoded // transactions) - - // obtain verified op block header - let new_op_head = { - // load the next op block header - let new_op_head = self - .derive_input - .db - .get_op_block_header(self.op_head_block_header.number + 1) - .context("op block not found")?; - - // Verify that the op block header loaded from the DB matches the payload - // attributes of the batch. - ensure!( - new_op_head.parent_hash == self.op_batcher.state.safe_head.hash, - "Invalid op block parent hash" - ); - ensure!( - new_op_head.beneficiary == self.op_batcher.config.sequencer_fee_vault, - "Invalid op block beneficiary" - ); - ensure!( - new_op_head.gas_limit == self.op_batcher.config.system_config.gas_limit, - "Invalid op block gas limit" - ); - ensure!( - new_op_head.timestamp == U256::from(op_batch.0.timestamp), - "Invalid op block timestamp" - ); - ensure!( - new_op_head.extra_data.is_empty(), - "Invalid op block extra data" - ); - - // verify that the new op head mix hash matches the mix hash of the L1 block - ensure!( - new_op_head.mix_hash == l1_epoch_header_mix_hash, - "Invalid op block mix hash" - ); - - // verify that the new op head transactions match the batch transactions - ensure!( - tx_trie.hash() == new_op_head.transactions_root, - "Invalid op block transactions" - ); - - ensure!( - new_op_head.withdrawals_root.is_none(), - "Invalid op block withdrawals" + #[cfg(not(target_os = "zkvm"))] + let op_block_output = { + // Create the provider DB + let provider_db = ProviderDb::new( + self.provider_factory + .as_ref() + .unwrap() + .create_provider(self.op_head_block_header.number)?, + self.op_head_block_header.number, ); - - new_op_head + let preflight_data = OptimismStrategy::preflight_input_with_provider_db( + OP_MAINNET_CHAIN_SPEC.clone(), + provider_db, + new_op_head_input.clone(), + ) + .map(|mut headerless_preflight_data| { + headerless_preflight_data.header = Header { + beneficiary: new_op_head_input.beneficiary, + gas_limit: new_op_head_input.gas_limit, + timestamp: new_op_head_input.timestamp, + extra_data: new_op_head_input.extra_data.clone(), + mix_hash: new_op_head_input.mix_hash, + // unnecessary + parent_hash: Default::default(), + ommers_hash: Default::default(), + state_root: Default::default(), + transactions_root: Default::default(), + receipts_root: Default::default(), + logs_bloom: Default::default(), + difficulty: Default::default(), + number: 0, + gas_used: Default::default(), + nonce: Default::default(), + base_fee_per_gas: Default::default(), + withdrawals_root: None, + }; + headerless_preflight_data + })?; + + let executable_input: BlockBuildInput = + preflight_data.try_into()?; + op_block_inputs.push(executable_input.clone()); + + OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, executable_input)? }; - - let new_op_head_hash = new_op_head.hash(); - - #[cfg(not(target_os = "zkvm"))] - info!( - "Derived Op block {} w/ hash {}", - new_op_head.number, new_op_head_hash - ); - - self.op_batcher.state.safe_head = L2BlockInfo { - hash: new_op_head_hash, - timestamp: new_op_head.timestamp.try_into().unwrap(), - l1_origin: BlockId { - number: self.op_batcher.state.epoch.number, - hash: self.op_batcher.state.epoch.hash, - }, + // guest: ask for receipt about provided block build output + #[cfg(target_os = "zkvm")] + let op_block_output = { + let output = op_block_output_iter.next().unwrap(); + // A valid receipt should be provided for block building results + let builder_journal = + to_vec(&output).expect("Failed to encode builder journal"); + env::verify( + Digest::from(self.derive_input.builder_image_id), + &builder_journal, + ) + .expect("Failed to validate block build output"); + output }; - derived_op_blocks.push((new_op_head.number, new_op_head_hash)); - self.op_head_block_header = new_op_head; - - if self.op_head_block_header.number == target_block_no { - break; - } + match op_block_output { + BlockBuildOutput::SUCCESS { + new_block_hash, + new_block_head, + .. + } => { + // Verify that the built op block matches the payload attributes of the + // batch. + ensure!( + new_block_head.parent_hash == self.op_batcher.state.safe_head.hash, + "Invalid op block parent hash" + ); + ensure!( + new_block_head.beneficiary + == self.op_batcher.config.sequencer_fee_vault, + "Invalid op block beneficiary" + ); + ensure!( + new_block_head.gas_limit + == self.op_batcher.config.system_config.gas_limit, + "Invalid op block gas limit" + ); + ensure!( + new_block_head.timestamp == U256::from(op_batch.0.timestamp), + "Invalid op block timestamp" + ); + ensure!( + new_block_head.extra_data.is_empty(), + "Invalid op block extra data" + ); + ensure!( + new_block_head.mix_hash == l1_epoch_header_mix_hash, + "Invalid op block mix hash" + ); + ensure!( + tx_trie.hash() == new_block_head.transactions_root, + "Invalid op block transactions" + ); + ensure!( + new_block_head.withdrawals_root.is_none(), + "Invalid op block withdrawals" + ); + + // obtain verified op block header + #[cfg(not(target_os = "zkvm"))] + info!( + "Derived Op block {} w/ hash {}", + new_block_head.number, new_block_hash + ); + + self.op_batcher.state.safe_head = L2BlockInfo { + hash: new_block_hash, + timestamp: new_block_head.timestamp.try_into().unwrap(), + l1_origin: BlockId { + number: self.op_batcher.state.epoch.number, + hash: self.op_batcher.state.epoch.hash, + }, + }; + + derived_op_blocks.push((new_block_head.number, new_block_hash)); + self.op_head_block_header = new_block_head; + + if self.op_head_block_header.number == target_block_no { + break; + } + } + BlockBuildOutput::FAILURE { bad_input_hash } => { + ensure!( + new_op_head_input.partial_hash() == bad_input_hash, + "Invalid input partial hash" + ); + } + }; } } - // // Execute transactions to verify valid state transitions - // let op_blocks = mem::take(&mut self.derive_input.op_blocks); - // if op_blocks.len() != derived_op_blocks.len() { - // bail!( - // "Mismatch between number of input op blocks {} and derived block count {}", - // op_blocks.len(), - // derived_op_blocks.len() - // ); - // } - // for (i, input) in op_blocks.into_iter().enumerate() { - // let (header, _) = OptimismStrategy::build_from(&OP_MAINNET_CHAIN_SPEC, input)?; - // if header.hash() != derived_op_blocks[i].1 { - // bail!( - // "Mismatch between built block {} and derived block {}.", - // header.number, - // &derived_op_blocks[i].0 - // ) - // } - // } - Ok(DeriveOutput { eth_tail: ( self.op_batcher.state.current_l1_block_number, @@ -414,6 +484,8 @@ impl DeriveMachine { self.op_head_block_header.hash(), ), derived_op_blocks, + op_block_inputs, + builder_image_id: self.derive_input.builder_image_id, }) } diff --git a/primitives/src/transactions/mod.rs b/primitives/src/transactions/mod.rs index 1038b711..7c2f3785 100644 --- a/primitives/src/transactions/mod.rs +++ b/primitives/src/transactions/mod.rs @@ -20,6 +20,8 @@ use self::{ optimism::{OptimismTxEssence, OPTIMISM_DEPOSITED_TX_TYPE}, signature::TxSignature, }; +#[cfg(not(target_os = "zkvm"))] +use crate::RlpBytes; use crate::{keccak::keccak, transactions::ethereum::EthereumTxEssence, U256}; pub mod ethereum; @@ -130,6 +132,10 @@ impl Encodable for Transaction { impl Decodable for Transaction { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + // sanity check + #[cfg(not(target_os = "zkvm"))] + let buf_clone = Vec::from(*buf); + let essence = E::headerless_decode(buf)?; let signature = if let (Ok(v), Ok(r), Ok(s)) = (u64::decode(buf), U256::decode(buf), U256::decode(buf)) @@ -138,7 +144,15 @@ impl Decodable for Transaction { } else { TxSignature::default() }; - Ok(Self { essence, signature }) + let result = Self { essence, signature }; + + #[cfg(not(target_os = "zkvm"))] + { + let result_rlp = result.to_rlp(); + assert_eq!(result_rlp, buf_clone, "insanity!"); + } + + Ok(result) } } diff --git a/primitives/src/trie/mpt.rs b/primitives/src/trie/mpt.rs index d030e6f6..9d03832b 100644 --- a/primitives/src/trie/mpt.rs +++ b/primitives/src/trie/mpt.rs @@ -71,6 +71,15 @@ pub enum Error { LegacyRlp(#[from] DecoderError), } +impl From for MptNode { + fn from(value: B256) -> Self { + MptNode { + data: MptNodeData::Digest(value), + cached_reference: RefCell::new(None), + } + } +} + /// Represents the various types of data that can be stored within a node in the sparse /// Merkle Patricia Trie (MPT). /// diff --git a/testing/ef-tests/src/lib.rs b/testing/ef-tests/src/lib.rs index 3cb47a8e..49f85e50 100644 --- a/testing/ef-tests/src/lib.rs +++ b/testing/ef-tests/src/lib.rs @@ -22,7 +22,7 @@ use zeth_lib::{ builder::{BlockBuilder, BlockBuilderStrategy, EthereumStrategy}, consts::ChainSpec, host::{ - preflight::Data, + preflight::BlockBuildPreflightData, provider::{AccountQuery, BlockQuery, ProofQuery, Provider, StorageQuery}, provider_db::ProviderDb, }, @@ -359,7 +359,7 @@ pub fn create_input( let proofs = provider_db.get_latest_proofs().unwrap(); let ancestor_headers = provider_db.get_ancestor_headers().unwrap(); - let preflight_data = Data { + let preflight_data = BlockBuildPreflightData { db: provider_db.get_initial_db().clone(), parent_header, parent_proofs, diff --git a/testing/ef-tests/testguest/src/main.rs b/testing/ef-tests/testguest/src/main.rs index ac2552c4..0ac58032 100644 --- a/testing/ef-tests/testguest/src/main.rs +++ b/testing/ef-tests/testguest/src/main.rs @@ -29,10 +29,10 @@ pub fn main() { // Read the input previous block and transaction data let input = env::read(); // Build the resulting block - let output = EthereumStrategy::build_from(&chain_spec, input) + let mut output = EthereumStrategy::build_from(&chain_spec, input) .expect("Failed to build the resulting block"); // Abridge successful construction results - if let BlockBuildOutput::SUCCESS { new_block_hash, new_block_head, new_block_state } = &mut output { + if let BlockBuildOutput::SUCCESS { new_block_head, new_block_state, .. } = &mut output { let trie_root = core::mem::replace(new_block_state, new_block_head.state_root.into()); // Leak memory, save cycles core::mem::forget(trie_root); diff --git a/testing/ef-tests/tests/evm.rs b/testing/ef-tests/tests/evm.rs index d068aec7..7f2b066d 100644 --- a/testing/ef-tests/tests/evm.rs +++ b/testing/ef-tests/tests/evm.rs @@ -98,7 +98,7 @@ fn evm( diff::Result::Both(l, _) => println!(" {}", l), } } - assert_eq!(new_block_state, exp_state.hash()); + assert_eq!(new_block_state.hash(), exp_state.hash()); } // the headers should match