diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 46da775003..54a948e04a 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -96,6 +96,9 @@ jobs: make -j make install + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Cache cbrotli uses: actions/cache@v3 id: cache-cbrotli @@ -158,4 +161,5 @@ jobs: cd contracts yarn install yarn build + yarn build:forge:yul yarn hardhat --network localhost test test/prover/*.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f2c4fac84c..b27c196a6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,9 @@ jobs: with: targets: 'wasm32-unknown-unknown, wasm32-wasi' + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Cache Build Products uses: actions/cache@v3 with: @@ -114,8 +117,7 @@ jobs: skip-pkg-cache: true - name: Custom Lint run: | - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... - name: Set environment variables run: | diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8fb9d80c21..8b7ebd0e15 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -75,6 +75,9 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Cache Rust Build Products uses: actions/cache@v3 with: diff --git a/Dockerfile b/Dockerfile index 9f8a5a75df..b23c7f0736 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,13 +26,14 @@ COPY --from=brotli-library-builder /workspace/install/ / FROM node:16-bookworm-slim as contracts-builder RUN apt-get update && \ - apt-get install -y git python3 make g++ + apt-get install -y git python3 make g++ curl +RUN curl -L https://foundry.paradigm.xyz | bash && . ~/.bashrc && ~/.foundry/bin/foundryup WORKDIR /workspace COPY contracts/package.json contracts/yarn.lock contracts/ RUN cd contracts && yarn install COPY contracts contracts/ COPY Makefile . -RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity +RUN . ~/.bashrc && NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity FROM debian:bookworm-20231218 as wasm-base WORKDIR /workspace @@ -181,6 +182,7 @@ COPY fastcache/go.mod fastcache/go.sum fastcache/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ +COPY --from=contracts-builder workspace/contracts/out/ contracts/out/ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/ COPY --from=contracts-builder workspace/.make/ .make/ COPY --from=prover-header-export / target/ diff --git a/Makefile b/Makefile index 97c47079e7..d03b940726 100644 --- a/Makefile +++ b/Makefile @@ -311,8 +311,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ @@ -335,6 +334,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/solidity: $(DEP_PREDICATE) contracts/src/*/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make yarn --cwd contracts build + yarn --cwd contracts build:forge:yul @touch $@ .make/yarndeps: $(DEP_PREDICATE) contracts/package.json contracts/yarn.lock $(ORDER_ONLY_PREDICATE) .make diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs index a398cb22f5..dfc7f21779 100644 --- a/arbitrator/jit/src/wavmio.rs +++ b/arbitrator/jit/src/wavmio.rs @@ -193,8 +193,8 @@ pub fn resolve_preimage_impl( }; let offset = match u32::try_from(offset) { - Ok(offset) => offset as usize, - Err(_) => error!("bad offset {offset} in {name}"), + Ok(offset) if offset % 32 == 0 => offset as usize, + _ => error!("bad offset {offset} in {name}"), }; let len = std::cmp::min(32, preimage.len().saturating_sub(offset)); diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 8285c011df..c7610ab31f 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -19,7 +19,6 @@ use crate::machine::{argument_data_to_inbox, Machine}; use arbutil::PreimageType; use eyre::Result; use machine::{get_empty_preimage_resolver, GlobalState, MachineStatus, PreimageResolver}; -use sha3::{Digest, Keccak256}; use static_assertions::const_assert_eq; use std::{ ffi::CStr, @@ -303,13 +302,18 @@ pub unsafe extern "C" fn arbitrator_set_preimage_resolver( return None; } let data = CBytes::from_raw_parts(res.ptr, res.len as usize); - let have_hash = Keccak256::digest(&data); - if have_hash.as_slice() != *hash { - panic!( - "Resolved incorrect data for hash {}: got {}", + #[cfg(debug_assertions)] + match crate::utils::hash_preimage(&data, ty) { + Ok(have_hash) if have_hash.as_slice() == *hash => {} + Ok(got_hash) => panic!( + "Resolved incorrect data for hash {} (rehashed to {})", hash, - hex::encode(data), - ); + Bytes32(got_hash), + ), + Err(err) => panic!( + "Failed to hash preimage from resolver (expecting hash {}): {}", + hash, err, + ), } Some(data) }, diff --git a/arbitrator/prover/test-cases/rust/data/msg0.bin b/arbitrator/prover/test-cases/rust/data/msg0.bin index 5cd813e5c5..7eb0b7fdf9 100644 Binary files a/arbitrator/prover/test-cases/rust/data/msg0.bin and b/arbitrator/prover/test-cases/rust/data/msg0.bin differ diff --git a/arbitrator/prover/test-cases/rust/data/msg1.bin b/arbitrator/prover/test-cases/rust/data/msg1.bin index 2ea3dec3e1..fefa1cc823 100644 Binary files a/arbitrator/prover/test-cases/rust/data/msg1.bin and b/arbitrator/prover/test-cases/rust/data/msg1.bin differ diff --git a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs index d16f60ff50..679ee14486 100644 --- a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs +++ b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs @@ -29,24 +29,29 @@ fn main() { let mut bytebuffer = Bytes32([0x0; 32]); // in delayed inbox - we're skipping the "kind" byte println!("delayed inbox message 0"); + let mut expected_buffer = bytebuffer.0; let len = wavm_read_delayed_inbox_message(0, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); - assert_eq!(len, 2); - assert_eq!(bytebuffer.0[1], 0xaa); + assert_eq!(len, 3); + expected_buffer[2] = 0xaa; + assert_eq!(bytebuffer.0, expected_buffer); println!("delayed inbox message 1"); let len = wavm_read_delayed_inbox_message(1, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); assert_eq!(len, 32); - for j in 1..31 { - assert_eq!(bytebuffer.0[j], (j as u8)); + for j in 1..32 { + assert_eq!(bytebuffer.0[j], (j as u8) - 1); } println!("inbox message 0"); + expected_buffer = bytebuffer.0; let len = wavm_read_inbox_message(0, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); - assert_eq!(len, 1); - assert_eq!(bytebuffer.0[0], 0xaa); + expected_buffer[0] = 0; + expected_buffer[1] = 0xaa; + assert_eq!(len, 2); + assert_eq!(bytebuffer.0, expected_buffer); println!("inbox message 1"); let len = wavm_read_inbox_message(1, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); assert_eq!(len, 32); for j in 0..32 { - assert_eq!(bytebuffer.0[j], (j as u8) + 1); + assert_eq!(bytebuffer.0[j], (j as u8)); } let keccak_hash = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"); diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c4fc500d76..e3af0b2afb 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -18,15 +18,19 @@ import ( "github.com/andybalholm/brotli" "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -40,6 +44,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -48,7 +53,16 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + + usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) + blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) +) + +const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" + + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" ) type batchPosterPosition struct { @@ -104,6 +118,8 @@ type BatchPosterConfig struct { DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` + // Maximum 4844 blob enabled batch size. + Max4844BatchSize int `koanf:"max-4844-batch-size" reload:"hot"` // Max batch post delay. MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` // Wait for max BatchPost delay. @@ -119,6 +135,8 @@ type BatchPosterConfig struct { RedisUrl string `koanf:"redis-url"` RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + ForcePost4844Blobs bool `koanf:"force-post-4844-blobs" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -158,6 +176,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") @@ -166,6 +185,8 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") + f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") + f.Bool(prefix+".force-post-4844-blobs", DefaultBatchPosterConfig.ForcePost4844Blobs, "if the parent chain supports 4844 blobs and post-4844-blobs is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -179,7 +200,9 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go - MaxSize: 100000, + MaxSize: 100000, + // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? + Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -188,6 +211,8 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 50_000, + Post4844Blobs: false, + ForcePost4844Blobs: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -207,6 +232,7 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ var TestBatchPosterConfig = BatchPosterConfig{ Enable: true, MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, PollInterval: time.Millisecond * 10, ErrorDelay: time.Millisecond * 10, MaxDelay: 0, @@ -215,6 +241,8 @@ var TestBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 10_000, + Post4844Blobs: true, + ForcePost4844Blobs: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -338,10 +366,7 @@ func AccessList(opts *AccessListOpts) types.AccessList { StorageKeys: []common.Hash{ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // maxTimeVariation // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of // "eip1967.proxy.admin" subtracted by 1. common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), @@ -427,7 +452,8 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) if shouldHalt { logLevel = log.Error } - logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + txErr := arbutil.DetailTxError(ctx, b.l1Reader.Client(), tx, r) + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) return shouldHalt, nil } } @@ -532,13 +558,20 @@ type buildingBatch struct { startMsgCount arbutil.MessageIndex msgCount arbutil.MessageIndex haveUsefulMessage bool + use4844 bool } -func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) - if config.MaxSize <= 40 { - panic("MaxBatchSize too small") +func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64, use4844 bool) *batchSegments { + maxSize := config.MaxSize + if use4844 { + maxSize = config.Max4844BatchSize + } else { + if maxSize <= 40 { + panic("Maximum batch size too small") + } + maxSize -= 40 } + compressedBuffer := bytes.NewBuffer(make([]byte, 0, maxSize*2)) compressionLevel := config.CompressionLevel recompressionLevel := config.CompressionLevel if backlog > 20 { @@ -562,7 +595,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxSize - 40, // TODO + sizeLimit: maxSize, recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -755,30 +788,73 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { return fullMsg, nil } -func (b *BatchPoster) encodeAddBatch(seqNum *big.Int, prevMsgNum arbutil.MessageIndex, newMsgNum arbutil.MessageIndex, message []byte, delayedMsg uint64) ([]byte, error) { - method, ok := b.seqInboxABI.Methods["addSequencerL2BatchFromOrigin0"] +func (b *BatchPoster) encodeAddBatch( + seqNum *big.Int, + prevMsgNum arbutil.MessageIndex, + newMsgNum arbutil.MessageIndex, + l2MessageData []byte, + delayedMsg uint64, + use4844 bool, +) ([]byte, []kzg4844.Blob, error) { + methodName := sequencerBatchPostMethodName + if use4844 { + methodName = sequencerBatchPostWithBlobsMethodName + } + method, ok := b.seqInboxABI.Methods[methodName] if !ok { - return nil, errors.New("failed to find add batch method") - } - inputData, err := method.Inputs.Pack( - seqNum, - message, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) + return nil, nil, errors.New("failed to find add batch method") + } + var calldata []byte + var kzgBlobs []kzg4844.Blob + var err error + if use4844 { + kzgBlobs, err = blobs.EncodeBlobs(l2MessageData) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode blobs: %w", err) + } + // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. + calldata, err = method.Inputs.Pack( + seqNum, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } else { + calldata, err = method.Inputs.Pack( + seqNum, + l2MessageData, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } if err != nil { - return nil, err + return nil, nil, err } - fullData := append([]byte{}, method.ID...) - fullData = append(fullData, inputData...) - return fullData, nil + fullCalldata := append([]byte{}, method.ID...) + fullCalldata = append(fullCalldata, calldata...) + return fullCalldata, kzgBlobs, nil } var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +type estimateGasParams struct { + From common.Address `json:"from"` + To *common.Address `json:"to"` + Data hexutil.Bytes `json:"data"` + AccessList types.AccessList `json:"accessList"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` +} + +func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { + var gas hexutil.Uint64 + err := client.CallContext(ctx, &gas, "eth_estimateGas", params) + return uint64(gas), err +} + +func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 if !useNormalEstimation { @@ -789,12 +865,18 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, } useNormalEstimation = latestNonce == realNonce } + rawRpcClient := b.l1Reader.Client().Client() if useNormalEstimation { + _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute real blob commitments: %w", err) + } // If we're at the latest nonce, we can skip the special future tx estimate stuff - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ From: b.dataPoster.Sender(), To: &b.seqInboxAddr, Data: realData, + BlobHashes: realBlobHashes, AccessList: realAccessList, }) if err != nil { @@ -807,14 +889,19 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) if err != nil { return 0, err } - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: data, + _, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute blob commitments: %w", err) + } + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: data, + BlobHashes: blobHashes, // This isn't perfect because we're probably estimating the batch at a different sequence number, // but it should overestimate rather than underestimate which is fine. AccessList: realAccessList, @@ -862,10 +949,29 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + var use4844 bool + config := b.config() + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.ForcePost4844Blobs { + use4844 = true + } else { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + } b.building = &buildingBatch{ - segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate()), + segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, startMsgCount: batchPosition.MessageCount, + use4844: use4844, } } msgCount, err := b.streamer.GetMessageCount() @@ -921,22 +1027,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, fmt.Errorf("error getting L1 bound block: %w", err) } - maxTimeVariation, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ Context: ctx, BlockNumber: l1Bound.Number, }) if err != nil { // This might happen if the latest finalized block is old enough that our L1 node no longer has its state log.Warn("error getting max time variation on L1 bound block; falling back on latest block", "err", err) - maxTimeVariation, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) if err != nil { return false, fmt.Errorf("error getting max time variation: %w", err) } } l1BoundBlockNumber := arbutil.ParentHeaderToL1BlockNumber(l1Bound) - l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariation.FutureBlocks)) - l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariation.FutureSeconds)) + l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariationFutureBlocks)) + l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariationFutureSeconds)) if config.L1BlockBoundBypass > 0 { latestHeader, err := b.l1Reader.LastHeader(ctx) @@ -947,8 +1053,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) - l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) - l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelaySeconds)) + l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelayBlocks)) + l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds)) } } @@ -1041,10 +1147,13 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } - data, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg) + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) if err != nil { return false, err } + if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + } accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg)) // On restart, we may be trying to estimate gas for a batch whose successor has // already made it into pending state, if not latest state. @@ -1053,7 +1162,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) if err != nil { return false, err } @@ -1073,6 +1182,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) data, gasLimit, new(big.Int), + kzgBlobs, accessList, ) if err != nil { @@ -1080,12 +1190,13 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } log.Info( "BatchPoster: batch sent", - "sequence nr.", batchPosition.NextSeqNum, + "sequenceNumber", batchPosition.NextSeqNum, "from", batchPosition.MessageCount, "to", b.building.msgCount, - "prev delayed", batchPosition.DelayedMessageCount, - "current delayed", b.building.segments.delayedMsg, - "total segments", len(b.building.segments.rawSegments), + "prevDelayed", batchPosition.DelayedMessageCount, + "currentDelayed", b.building.segments.delayedMsg, + "totalSegments", len(b.building.segments.rawSegments), + "numBlobs", len(kzgBlobs), ) recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go new file mode 100644 index 0000000000..1424285832 --- /dev/null +++ b/arbnode/blob_reader.go @@ -0,0 +1,216 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/util/jsonapi" + "github.com/offchainlabs/nitro/util/pretty" + + "github.com/spf13/pflag" +) + +type BlobClient struct { + ec arbutil.L1Interface + beaconUrl *url.URL + httpClient *http.Client + + // The genesis time time and seconds per slot won't change so only request them once. + cachedGenesisTime uint64 + cachedSecondsPerSlot uint64 +} + +type BlobClientConfig struct { + BeaconChainUrl string `koanf:"beacon-chain-url"` +} + +var DefaultBlobClientConfig = BlobClientConfig{ + BeaconChainUrl: "", +} + +func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") +} + +func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { + beaconUrl, err := url.Parse(config.BeaconChainUrl) + if err != nil { + return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) + } + return &BlobClient{ + ec: ec, + beaconUrl: beaconUrl, + httpClient: &http.Client{}, + }, nil +} + +type fullResult[T any] struct { + Data T `json:"data"` +} + +func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath string) (T, error) { + // Unfortunately, methods on a struct can't be generic. + + var empty T + + // not really a deep copy, but copies the Path part we care about + url := *b.beaconUrl + url.Path = path.Join(url.Path, beaconPath) + + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) + if err != nil { + return empty, err + } + + resp, err := b.httpClient.Do(req) + if err != nil { + return empty, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return empty, err + } + + var full fullResult[T] + if err := json.Unmarshal(body, &full); err != nil { + return empty, err + } + + return full.Data, nil +} + +// Get all the blobs associated with a particular block. +func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + header, err := b.ec.HeaderByHash(ctx, blockHash) + if err != nil { + return nil, err + } + genesisTime, err := b.genesisTime(ctx) + if err != nil { + return nil, err + } + secondsPerSlot, err := b.secondsPerSlot(ctx) + if err != nil { + return nil, err + } + slot := (header.Time - genesisTime) / secondsPerSlot + return b.blobSidecars(ctx, slot, versionedHashes) +} + +type blobResponseItem struct { + BlockRoot string `json:"block_root"` + Index jsonapi.Uint64String `json:"index"` + Slot jsonapi.Uint64String `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex jsonapi.Uint64String `json:"proposer_index"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` +} + +func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + if err != nil { + return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) + } + + if len(response) < len(versionedHashes) { + return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) + } + + output := make([]kzg4844.Blob, len(versionedHashes)) + outputsFound := make([]bool, len(versionedHashes)) + + for _, blobItem := range response { + var commitment kzg4844.Commitment + copy(commitment[:], blobItem.KzgCommitment) + versionedHash := blobs.CommitmentToVersionedHash(commitment) + + // The versioned hashes of the blob commitments are produced in the by HASH_OPCODE_BYTE, + // presumably in the order they were added to the tx. The spec is unclear if the blobs + // need to be returned in any particular order from the beacon API, so we put them back in + // the order from the tx. + var outputIdx int + var found bool + for outputIdx = range versionedHashes { + if versionedHashes[outputIdx] == versionedHash { + found = true + if outputsFound[outputIdx] { + return nil, fmt.Errorf("found blob with versioned hash %v twice", versionedHash) + } + outputsFound[outputIdx] = true + break + } + } + if !found { + continue + } + + copy(output[outputIdx][:], blobItem.Blob) + + var proof kzg4844.Proof + copy(proof[:], blobItem.KzgProof) + + err = kzg4844.VerifyBlobProof(output[outputIdx], commitment, proof) + if err != nil { + return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, blobItem.Index, pretty.FirstFewChars(blobItem.Blob.String())) + } + } + + for i, found := range outputsFound { + if !found { + return nil, fmt.Errorf("missing blob %v in slot %v, can't reconstruct batch payload", versionedHashes[i], slot) + } + } + + return output, nil +} + +type genesisResponse struct { + GenesisTime jsonapi.Uint64String `json:"genesis_time"` + // don't currently care about other fields, add if needed +} + +func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { + if b.cachedGenesisTime > 0 { + return b.cachedGenesisTime, nil + } + gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + if err != nil { + return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) + } + b.cachedGenesisTime = uint64(gr.GenesisTime) + return b.cachedGenesisTime, nil +} + +type getSpecResponse struct { + SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` +} + +func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { + if b.cachedSecondsPerSlot > 0 { + return b.cachedSecondsPerSlot, nil + } + gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + if err != nil { + return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + } + b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) + return b.cachedSecondsPerSlot, nil + +} diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 09f3e218b1..1415f78140 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -23,7 +23,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -31,12 +33,14 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" + "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -64,6 +68,7 @@ type DataPoster struct { metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) extraBacklog func() uint64 parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -177,6 +182,11 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro extraBacklog: opts.ExtraBacklog, parentChainID: opts.ParentChainID, } + var overflow bool + dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) + if overflow { + return nil, fmt.Errorf("parent chain ID %v overflows uint256 (necessary for blob transactions)", opts.ParentChainID) + } if dp.extraBacklog == nil { dp.extraBacklog = func() uint64 { return 0 } } @@ -363,7 +373,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { - nextNonce := lastQueueItem.Data.Nonce + 1 + nextNonce := lastQueueItem.FullTx.Nonce() + 1 if err := p.canPostWithNonce(ctx, nextNonce); err != nil { return 0, nil, false, err } @@ -442,27 +452,38 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if latestHeader.BaseFee == nil { - return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + } + newBlobFeeCap := big.NewInt(0) + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + } else if numBlobs > 0 { + return nil, nil, nil, fmt.Errorf( + "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ + "(either the parent chain node is not synced or the EIP-4844 was improperly activated)", + latestHeader.Number, + ) } softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) softConfNonce, err := p.client.NonceAt(ctx, p.Sender(), softConfBlock) if err != nil { - return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) + newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) newTipCap, err := p.client.SuggestGasTipCap(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) @@ -481,10 +502,13 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) } + // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease + // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) + elapsed := time.Since(dataCreatedAt) maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { log.Warn( @@ -496,6 +520,8 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = maxFeeCap } + // TODO: also have an expression limiting the max blob fee cap + latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) if config.AllocateMempoolBalance && !p.usingNoOpStorage { @@ -525,6 +551,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) } } + // TODO: take into account blob costs balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Warn( @@ -550,10 +577,14 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newTipCap = new(big.Int).Set(newFeeCap) } - return newFeeCap, newTipCap, nil + return newFeeCap, newTipCap, newBlobFeeCap, nil +} + +func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { + return p.PostTransaction(ctx, time.Now(), nonce, nil, to, calldata, gasLimit, value, nil, nil) } -func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, kzgBlobs []kzg4844.Blob, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -570,27 +601,69 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) if err != nil { return nil, err } - inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: value, - Data: calldata, - AccessList: accessList, - ChainID: p.parentChainID, - } - fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(&inner)) + + var deprecatedData types.DynamicFeeTx + var inner types.TxData + if len(kzgBlobs) > 0 { + value256, overflow := uint256.FromBig(value) + if overflow { + return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) + } + // Intentionally break out of date data poster redis clients, + // so they don't try to replace by fee a tx they don't understand + deprecatedData.Nonce = ^uint64(0) + commitments, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG commitments: %w", err) + } + proofs, err := blobs.ComputeBlobProofs(kzgBlobs, commitments) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG proofs: %w", err) + } + inner = &types.BlobTx{ + Nonce: nonce, + Gas: gasLimit, + To: to, + Value: value256, + Data: calldata, + Sidecar: &types.BlobTxSidecar{ + Blobs: kzgBlobs, + Commitments: commitments, + Proofs: proofs, + }, + BlobHashes: blobHashes, + AccessList: accessList, + ChainID: p.parentChainID256, + } + // reuse the code to convert gas fee and tip caps to uint256s + err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) + if err != nil { + return nil, err + } + } else { + deprecatedData = types.DynamicFeeTx{ + Nonce: nonce, + GasFeeCap: feeCap, + GasTipCap: tipCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, + ChainID: p.parentChainID, + } + inner = &deprecatedData + } + fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(inner)) if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } queuedTx := storage.QueuedTransaction{ - Data: inner, + DeprecatedData: deprecatedData, FullTx: fullTx, Meta: meta, Sent: false, @@ -603,8 +676,8 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // the mutex must be held by the caller func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTransaction) error { if prevTx != nil { - if prevTx.Data.Nonce != newTx.Data.Nonce { - return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) + if prevTx.FullTx.Nonce() != newTx.FullTx.Nonce() { + return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.FullTx.Nonce(), newTx.FullTx.Nonce()) } // Check if prevTx is the same as newTx and we don't need to do anything @@ -621,7 +694,7 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr return nil } } - if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + if err := p.queue.Put(ctx, newTx.FullTx.Nonce(), prevTx, newTx); err != nil { return fmt.Errorf("putting new tx in the queue: %w", err) } return nil @@ -645,22 +718,58 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return p.saveTx(ctx, newTx, &newerTx) } +func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) error { + switch data := data.(type) { + case *types.DynamicFeeTx: + data.GasFeeCap = newFeeCap + data.GasTipCap = newTipCap + return nil + case *types.BlobTx: + var overflow bool + data.GasFeeCap, overflow = uint256.FromBig(newFeeCap) + if overflow { + return fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) + } + data.GasTipCap, overflow = uint256.FromBig(newTipCap) + if overflow { + return fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) + } + data.BlobFeeCap, overflow = uint256.FromBig(newBlobFeeCap) + if overflow { + return fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) + } + return nil + default: + return fmt.Errorf("unexpected transaction data type %T", data) + } +} + +func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (*types.Transaction, error) { + data := tx.GetInner() + err := updateTxDataGasCaps(data, newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return nil, err + } + return types.NewTx(data), nil +} + // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.Data.GasFeeCap, minRbfIncrease) + minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) newTx := *prevTx + // TODO: also look at the blob fee cap if newFeeCap.Cmp(minNewFeeCap) < 0 { log.Debug( "no need to replace by fee transaction", - "nonce", prevTx.Data.Nonce, - "lastFeeCap", prevTx.Data.GasFeeCap, + "nonce", prevTx.FullTx.Nonce(), + "lastFeeCap", prevTx.FullTx.GasFeeCap(), "recommendedFeeCap", newFeeCap, - "lastTipCap", prevTx.Data.GasTipCap, + "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) @@ -676,9 +785,13 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa break } newTx.Sent = false - newTx.Data.GasFeeCap = newFeeCap - newTx.Data.GasTipCap = newTipCap - newTx.FullTx, err = p.signer(ctx, p.Sender(), types.NewTx(&newTx.Data)) + newTx.DeprecatedData.GasFeeCap = newFeeCap + newTx.DeprecatedData.GasTipCap = newTipCap + unsignedTx, err := updateGasCaps(newTx.FullTx, newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return err + } + newTx.FullTx, err = p.signer(ctx, p.Sender(), unsignedTx) if err != nil { return err } @@ -750,7 +863,7 @@ func (p *DataPoster) updateBalance(ctx context.Context) error { const maxConsecutiveIntermittentErrors = 10 func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg string) { - nonce := tx.Data.Nonce + nonce := tx.FullTx.Nonce() if err == nil { delete(p.errorCount, nonce) return @@ -764,7 +877,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index a9e78fcc58..9586b9c9a9 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -27,7 +27,7 @@ var ( type QueuedTransaction struct { FullTx *types.Transaction - Data types.DynamicFeeTx + DeprecatedData types.DynamicFeeTx // FullTx should be used instead Meta []byte Sent bool Created time.Time // may be earlier than the tx was given to the tx poster @@ -46,7 +46,7 @@ type queuedTransactionForEncoding struct { func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: qt.Meta, Sent: qt.Sent, Created: (RlpTime)(qt.Created), @@ -60,7 +60,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { return err } qt.FullTx = qtEnc.FullTx - qt.Data = qtEnc.Data + qt.DeprecatedData = qtEnc.Data qt.Meta = qtEnc.Meta qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) @@ -107,7 +107,7 @@ func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransa } return &QueuedTransaction{ FullTx: legacyQT.FullTx, - Data: legacyQT.Data, + DeprecatedData: legacyQT.Data, Meta: meta, Sent: legacyQT.Sent, Created: legacyQT.Created, @@ -127,7 +127,7 @@ func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, } return &LegacyQueuedTransaction{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: meta, Sent: qt.Sent, Created: qt.Created, diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index cf9918941e..f98c120f38 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -84,7 +84,7 @@ func valueOf(t *testing.T, i int) *storage.QueuedTransaction { big.NewInt(int64(i)), []byte{byte(i)}), Meta: meta, - Data: types.DynamicFeeTx{ + DeprecatedData: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), Nonce: uint64(i), GasTipCap: big.NewInt(int64(i)), diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index a28eebb5dc..beb2656e2b 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil) + tracker, err := NewInboxTracker(db, streamer, nil, nil) Require(t, err) err = streamer.Start(ctx) @@ -61,7 +61,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{1}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -77,7 +77,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -91,7 +91,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{3}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -130,7 +130,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 7a2ffa505c..c7af96c86a 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -402,7 +402,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) } - return r.GetSequencerMessageBytes(ctx, batchNum) + data, _, err := r.GetSequencerMessageBytes(ctx, batchNum) + return data, err }) if err != nil { return err @@ -571,24 +572,25 @@ func (r *InboxReader) getNextBlockToRead() (*big.Int, error) { return msgBlock, nil } -func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) { +func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) { metadata, err := r.tracker.GetBatchMetadata(seqNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } blockNum := arbmath.UintToBig(metadata.ParentChainBlock) seqBatches, err := r.sequencerInbox.LookupBatchesInRange(ctx, blockNum, blockNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } var seenBatches []uint64 for _, batch := range seqBatches { if batch.SequenceNumber == seqNum { - return batch.Serialize(ctx, r.client) + data, err := batch.Serialize(ctx, r.client) + return data, batch.BlockHash, err } seenBatches = append(seenBatches, batch.SequenceNumber) } - return nil, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) + return nil, common.Hash{}, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) } func (r *InboxReader) GetLastReadBlockAndBatchCount() (uint64, uint64) { diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 92d216f765..e979979dea 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -144,12 +144,13 @@ func TestTransactionStreamer(t *testing.T) { } else { dest = state.accounts[rand.Int()%len(state.accounts)] } + destHash := common.BytesToHash(dest.Bytes()) var gas uint64 = 100000 var l2Message []byte l2Message = append(l2Message, arbos.L2MessageKind_ContractTx) l2Message = append(l2Message, arbmath.Uint64ToU256Bytes(gas)...) l2Message = append(l2Message, arbmath.Uint64ToU256Bytes(l2pricing.InitialBaseFeeWei)...) - l2Message = append(l2Message, dest.Hash().Bytes()...) + l2Message = append(l2Message, destHash.Bytes()...) l2Message = append(l2Message, arbmath.U256Bytes(value)...) var requestId common.Hash binary.BigEndian.PutUint64(requestId.Bytes()[:8], uint64(i)) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 51f74cbeb4..eaf863bffc 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -38,12 +38,13 @@ type InboxTracker struct { mutex sync.Mutex validator *staker.BlockValidator das arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { return nil, errors.New("data availability service required but unconfigured") @@ -52,6 +53,7 @@ func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arb db: db, txStreamer: txStreamer, das: das, + blobReader: blobReader, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -504,11 +506,12 @@ type multiplexerBackend struct { inbox *InboxTracker } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0].Serialize(b.ctx, b.client) + bytes, err := b.batches[0].Serialize(b.ctx, b.client) + return bytes, b.batches[0].BlockHash, err } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -603,7 +606,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, arbstate.KeysetValidate) + + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index f92dcefe7c..de9745f2a8 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -65,10 +66,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(60 * 60 * 24 / 15), - FutureBlocks: big.NewInt(12), - DelaySeconds: big.NewInt(60 * 60 * 24), - FutureSeconds: big.NewInt(60 * 60), + DelayBlocks: 60 * 60 * 24 / 15, + FutureBlocks: 12, + DelaySeconds: 60 * 60 * 24, + FutureSeconds: 60 * 60, }, } } @@ -85,6 +86,7 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -142,6 +144,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) + BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -512,7 +515,15 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader) + var blobReader arbstate.BlobReader + if config.BlobClient.BeaconChainUrl != "" { + blobReader, err = NewBlobClient(config.BlobClient, l1client) + if err != nil { + return nil, err + } + } + + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err } @@ -531,6 +542,7 @@ func createNodeImpl( exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, + blobReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index 2adfcb60b3..b743bf0ef9 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -33,6 +34,7 @@ const ( batchDataTxInput batchDataLocation = iota batchDataSeparateEvent batchDataNone + batchDataBlobHashes ) func init() { @@ -102,7 +104,7 @@ type SequencerInboxBatch struct { AfterInboxAcc common.Hash AfterDelayedAcc common.Hash AfterDelayedCount uint64 - TimeBounds bridgegen.ISequencerInboxTimeBounds + TimeBounds bridgegen.IBridgeTimeBounds rawLog types.Log dataLocation batchDataLocation bridgeAddress common.Address @@ -149,6 +151,19 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut case batchDataNone: // No data when in a force inclusion batch return nil, nil + case batchDataBlobHashes: + tx, err := arbutil.GetLogTransaction(ctx, client, m.rawLog) + if err != nil { + return nil, err + } + if len(tx.BlobHashes()) == 0 { + return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) + } + data := []byte{arbstate.BlobHashesHeaderFlag} + for _, h := range tx.BlobHashes() { + data = append(data, h[:]...) + } + return data, nil default: return nil, fmt.Errorf("batch has invalid data location %v", m.dataLocation) } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 24ef2a7cc4..7e9cf1dbad 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -820,7 +820,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return nil } -func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, error) { +func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, common.Hash, error) { return s.inboxReader.GetSequencerMessageBytes(context.TODO(), batchNum) } @@ -968,8 +968,16 @@ func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos) return false } - err = s.exec.DigestMessage(pos, msg) - if err != nil { + var msgForPrefetch *arbostypes.MessageWithMetadata + if pos+1 < msgCount { + msg, err := s.GetMessage(pos + 1) + if err != nil { + log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos+1) + return false + } + msgForPrefetch = msg + } + if err = s.exec.DigestMessage(pos, msg, msgForPrefetch); err != nil { logger := log.Warn if prevMessageCount < msgCount { logger = log.Debug diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index caf44e2a99..7fdb61aba2 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -277,14 +277,13 @@ func (state *ArbosState) UpgradeArbosVersion( } } - switch state.arbosVersion { - case 1: - ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) + nextArbosVersion := state.arbosVersion + 1 + switch nextArbosVersion { case 2: + ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) + case 3: ensure(state.l1PricingState.SetPerBatchGasCost(0)) ensure(state.l1PricingState.SetAmortizedCostCapBips(math.MaxUint64)) - case 3: - // no state changes needed case 4: // no state changes needed case 5: @@ -296,10 +295,12 @@ func (state *ArbosState) UpgradeArbosVersion( case 8: // no state changes needed case 9: + // no state changes needed + case 10: ensure(state.l1PricingState.SetL1FeesAvailable(stateDB.GetBalance( l1pricing.L1PricerFundsPoolAddress, ))) - case 10: + case 11: // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) @@ -316,26 +317,29 @@ func (state *ArbosState) UpgradeArbosVersion( ensure(state.chainOwners.ClearList()) } // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. - // TODO: currently you can't get to ArbOS 20 without hitting the default case. - case 19: + case 20: if !chainConfig.DebugMode() { // This upgrade isn't finalized so we only want to support it for testing return fmt.Errorf( "the chain is upgrading to unsupported ArbOS version %v, %w", - state.arbosVersion+1, + nextArbosVersion, ErrFatalNodeOutOfDate, ) } // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) default: - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - state.arbosVersion+1, - ErrFatalNodeOutOfDate, - ) + if nextArbosVersion >= 12 && state.arbosVersion < 20 { + // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. + } else { + return fmt.Errorf( + "the chain is upgrading to unsupported ArbOS version %v, %w", + nextArbosVersion, + ErrFatalNodeOutOfDate, + ) + } } - state.arbosVersion++ + state.arbosVersion = nextArbosVersion } if firstTime && upgradeTo >= 6 { diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 27ecae8b85..f2312c46d4 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -146,10 +146,6 @@ func (ps *L1PricingState) SetPayRewardsTo(addr common.Address) error { return ps.payRewardsTo.Set(addr) } -func (ps *L1PricingState) GetRewardsRecepient() (common.Address, error) { - return ps.payRewardsTo.Get() -} - func (ps *L1PricingState) EquilibrationUnits() (*big.Int, error) { return ps.equilibrationUnits.Get() } @@ -174,10 +170,6 @@ func (ps *L1PricingState) SetPerUnitReward(weiPerUnit uint64) error { return ps.perUnitReward.Set(weiPerUnit) } -func (ps *L1PricingState) GetRewardsRate() (uint64, error) { - return ps.perUnitReward.Get() -} - func (ps *L1PricingState) LastUpdateTime() (uint64, error) { return ps.lastUpdateTime.Get() } diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index a6d351b49e..46d01b7bb1 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -40,19 +40,30 @@ const L1AuthenticatedMessageHeaderFlag byte = 0x40 // ZeroheavyMessageHeaderFlag indicates that this message is zeroheavy-encoded. const ZeroheavyMessageHeaderFlag byte = 0x20 +// BlobHashesHeaderFlag indicates that this message contains EIP 4844 versioned hashes of the committments calculated over the blob data for the batch data. +const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x50 + // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + func IsDASMessageHeaderByte(header byte) bool { - return (DASMessageHeaderFlag & header) > 0 + return hasBits(header, DASMessageHeaderFlag) } func IsTreeDASMessageHeaderByte(header byte) bool { - return (TreeDASMessageHeaderFlag & header) > 0 + return hasBits(header, TreeDASMessageHeaderFlag) } func IsZeroheavyEncodedHeaderByte(header byte) bool { - return (ZeroheavyMessageHeaderFlag & header) > 0 + return hasBits(header, ZeroheavyMessageHeaderFlag) +} + +func IsBlobHashesHeaderByte(header byte) bool { + return hasBits(header, BlobHashesHeaderFlag) } func IsBrotliMessageHeaderByte(b uint8) bool { diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3995bcf308..fcb1c1ebcb 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -8,11 +8,13 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -21,11 +23,12 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/zeroheavy" ) type InboxBackend interface { - PeekSequencerInbox() ([]byte, error) + PeekSequencerInbox() ([]byte, common.Hash, error) GetSequencerInboxPosition() uint64 AdvanceSequencerInbox() @@ -36,6 +39,14 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) +} + type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -50,7 +61,7 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -64,6 +75,9 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } payload := data[40:] + // Stage 1: Extract the payload from any data availability header. + // It's important that multiple DAS strategies can't both be invoked in the same batch, + // as these headers are validated by the sequencer inbox and not other DASs. if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { if dasReader == nil { log.Error("No DAS Reader configured, but sequencer message found with DAS header") @@ -77,8 +91,32 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da return parsedMsg, nil } } + } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { + blobHashes := payload[1:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + + if blobReader == nil { + return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") + } + + kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err = blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return parsedMsg, nil + } } + // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -88,6 +126,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da payload = pl } + // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { @@ -242,6 +281,7 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 dasReader DataAvailabilityReader + blobReader BlobReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -251,11 +291,12 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, dasReader: dasReader, + blobReader: blobReader, keysetValidationMode: keysetValidationMode, } } @@ -270,13 +311,14 @@ const BatchSegmentKindAdvanceL1BlockNumber uint8 = 4 // Note: this does *not* return parse errors, those are transformed into invalid messages func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMetadata, error) { if r.cachedSequencerMessage == nil { - bytes, realErr := r.backend.PeekSequencerInbox() + // Note: batchBlockHash will be zero in the replay binary, but that's fine + bytes, batchBlockHash, realErr := r.backend.PeekSequencerInbox() if realErr != nil { return nil, realErr } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, bytes, r.dasReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index fcb80cbd73..dcf43fd0da 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -9,6 +9,7 @@ import ( "errors" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" ) @@ -19,11 +20,11 @@ type multiplexerBackend struct { positionWithinMessage uint64 } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if b.batchSeqNum != 0 { - return nil, errors.New("reading unknown sequencer batch") + return nil, common.Hash{}, errors.New("reading unknown sequencer batch") } - return b.batch, nil + return b.batch, common.Hash{}, nil } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -66,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go index 7741af6e9b..8270a628bd 100644 --- a/arbutil/transaction_data.go +++ b/arbutil/transaction_data.go @@ -10,8 +10,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long -func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { +func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (*types.Transaction, error) { tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex) if err != nil { return nil, err @@ -19,6 +18,15 @@ func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) if tx.Hash() != log.TxHash { return nil, fmt.Errorf("L1 client returned unexpected transaction hash %v when looking up block %v transaction %v with expected hash %v", tx.Hash(), log.BlockHash, log.TxIndex, log.TxHash) } + return tx, nil +} + +// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long +func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { + tx, err := GetLogTransaction(ctx, client, log) + if err != nil { + return nil, err + } if len(tx.Data()) < 4 { return nil, fmt.Errorf("log emitting transaction %v unexpectedly does not have enough data", tx.Hash()) } diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index a80502610b..180ce1c67e 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rpc" ) type L1Interface interface { @@ -26,6 +27,7 @@ type L1Interface interface { CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) ChainID(ctx context.Context) (*big.Int, error) + Client() rpc.ClientInterface } func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 68d89302f0..da015ac52c 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) if err != nil { return nil, err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 2fb13ceed8..dd8a0fd1f7 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -62,11 +63,12 @@ func (c WavmChainContext) GetHeader(hash common.Hash, num uint64) *types.Header type WavmInbox struct{} -func (i WavmInbox) PeekSequencerInbox() ([]byte, error) { +func (i WavmInbox) PeekSequencerInbox() ([]byte, common.Hash, error) { pos := wavmio.GetInboxPosition() res := wavmio.ReadInboxMessage(pos) log.Info("PeekSequencerInbox", "pos", pos, "res[:8]", res[:8]) - return res, nil + // Our BlobPreimageReader doesn't need the block hash + return res, common.Hash{}, nil } func (i WavmInbox) GetSequencerInboxPosition() uint64 { @@ -117,6 +119,30 @@ func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbst return arbstate.DiscardImmediately, nil } +type BlobPreimageReader struct { +} + +func (r *BlobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + preimage, err := wavmio.ResolveTypedPreimage(arbutil.EthVersionedHashPreimageType, h) + if err != nil { + return nil, err + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -180,7 +206,7 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/contracts b/contracts index 154e95bf5e..9a6bfad236 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 154e95bf5e805b433af5265987e5752c05fee9b4 +Subproject commit 9a6bfad2363322099d399698751551ff044c7a72 diff --git a/deploy/deploy.go b/deploy/deploy.go index bd2f2ec329..59760e2c21 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -40,7 +41,12 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) + reader4844, tx, _, err := yulgen.DeployReader4844(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) + } + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index a0f6d837e4..d7e702f3c1 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -123,13 +123,14 @@ func (r *BlockRecorder) RecordBlockCreation( var readBatchInfo []validator.BatchInfo if msg != nil { batchFetcher := func(batchNum uint64) ([]byte, error) { - data, err := r.execEngine.streamer.FetchBatch(batchNum) + data, blockHash, err := r.execEngine.streamer.FetchBatch(batchNum) if err != nil { return nil, err } readBatchInfo = append(readBatchInfo, validator.BatchInfo{ - Number: batchNum, - Data: data, + Number: batchNum, + BlockHash: blockHash, + Data: data, }) return data, nil } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 58e91a197e..003159589a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -41,6 +41,8 @@ type ExecutionEngine struct { nextScheduledVersionCheck time.Time // protected by the createBlocksMutex reorgSequencing bool + + prefetchBlock bool } func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { @@ -71,6 +73,16 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } +func (s *ExecutionEngine) EnablePrefetchBlock() { + if s.Started() { + panic("trying to enable prefetch block after start") + } + if s.prefetchBlock { + panic("trying to enable prefetch block when already set") + } + s.prefetchBlock = true +} + func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { if s.Started() { panic("trying to set transaction streamer after start") @@ -107,7 +119,11 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return err } for i := range newMessages { - err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i]) + var msgForPrefetch *arbostypes.MessageWithMetadata + if i < len(newMessages)-1 { + msgForPrefetch = &newMessages[i] + } + err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) if err != nil { return err } @@ -449,7 +465,10 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb, s.bc, s.bc.Config(), - s.streamer.FetchBatch, + func(batchNum uint64) ([]byte, error) { + data, _, err := s.streamer.FetchBatch(batchNum) + return data, err + }, ) return block, statedb, receipts, err @@ -486,15 +505,20 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } -func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +// DigestMessage is used to create a block by executing msg against the latest state and storing it. +// Also, while creating a block by executing msg against the latest state, +// in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state +// but does not store the block. +// This helps in filling the cache, so that the next block creation is faster. +func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { if !s.createBlocksMutex.TryLock() { return errors.New("createBlock mutex held") } defer s.createBlocksMutex.Unlock() - return s.digestMessageWithBlockMutex(num, msg) + return s.digestMessageWithBlockMutex(num, msg, msgForPrefetch) } -func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { currentHeader, err := s.getCurrentHeader() if err != nil { return err @@ -508,11 +532,23 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, } startTime := time.Now() + var wg sync.WaitGroup + if s.prefetchBlock && msgForPrefetch != nil { + wg.Add(1) + go func() { + defer wg.Done() + _, _, _, err := s.createBlockFromNextMessage(msgForPrefetch) + if err != nil { + return + } + }() + } + block, statedb, receipts, err := s.createBlockFromNextMessage(msg) if err != nil { return err } - + wg.Wait() err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { return err diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 00337cc355..1ad73febe7 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -311,8 +311,8 @@ func (n *ExecutionNode) StopAndWait() { // } } -func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { - return n.ExecEngine.DigestMessage(num, msg) +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { + return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { return n.ExecEngine.Reorg(count, newMessages, oldMessages) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 5db38cbb4d..9bc6f4378d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,6 +66,7 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` + EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` } func (c *SequencerConfig) Validate() error { @@ -97,6 +98,7 @@ var DefaultSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } var TestSequencerConfig = SequencerConfig{ @@ -112,6 +114,7 @@ var TestSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -127,6 +130,7 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") + f.Bool(prefix+".enable-prefetch-block", DefaultSequencerConfig.EnablePrefetchBlock, "enable prefetching of blocks") } type txQueueItem struct { @@ -324,6 +328,9 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead } s.Pause() execEngine.EnableReorgSequencing() + if config.EnablePrefetchBlock { + execEngine.EnablePrefetchBlock() + } return s, nil } diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index 51ba88fec8..cff8b04d32 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -116,6 +116,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if tx.Gas() < params.TxGas { return core.ErrIntrinsicGas } + if tx.Type() >= types.ArbitrumDepositTxType || tx.Type() == types.BlobTxType { + // Should be unreachable for Arbitrum types due to UnmarshalBinary not accepting Arbitrum internal txs + // and we want to disallow BlobTxType since Arbitrum doesn't support EIP-4844 txs yet. + return types.ErrTxTypeNotSupported + } sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number, header.Time), tx) if err != nil { return err diff --git a/execution/interface.go b/execution/interface.go index ef9409b9c1..6761011a77 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -28,7 +28,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { - DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) @@ -73,7 +73,7 @@ type FullExecutionClient interface { // not implemented in execution, used as input type BatchFetcher interface { - FetchBatch(batchNum uint64) ([]byte, error) + FetchBatch(batchNum uint64) ([]byte, common.Hash, error) } type TransactionStreamer interface { diff --git a/go-ethereum b/go-ethereum index 5a266a5b68..1acd9c64ac 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 5a266a5b6853a9ddd76f25c3e02ac2265fef7996 +Subproject commit 1acd9c64ac5804729475ef60aa578b4ec52fa0e6 diff --git a/go.mod b/go.mod index 1d76b3997d..2d689f4891 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -43,7 +43,7 @@ require ( golang.org/x/crypto v0.17.0 golang.org/x/sys v0.15.0 golang.org/x/term v0.15.0 - golang.org/x/tools v0.9.1 + golang.org/x/tools v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -83,13 +83,14 @@ require ( github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect @@ -102,7 +103,7 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -252,7 +253,7 @@ require ( github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/supranational/blst v0.3.11 // indirect - github.com/urfave/cli/v2 v2.24.1 // indirect + github.com/urfave/cli/v2 v2.25.7 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect @@ -280,8 +281,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect - golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect - golang.org/x/mod v0.11.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/go.sum b/go.sum index 872afcafbf..a2b483c100 100644 --- a/go.sum +++ b/go.sum @@ -45,7 +45,7 @@ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOv github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= @@ -226,16 +226,18 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codeclysm/extract/v3 v3.0.2 h1:sB4LcE3Php7LkhZwN0n2p8GCwZe92PEQutdbGURf5xc= github.com/codeclysm/extract/v3 v3.0.2/go.mod h1:NKsw+hqua9H+Rlwy/w/3Qgt9jDonYEgB6wJu+25eOKw= @@ -264,8 +266,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -338,8 +340,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -1621,8 +1623,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= -github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -1803,8 +1805,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU= -golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1829,8 +1831,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2102,8 +2104,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/linter/koanf/handlers.go b/linters/koanf/handlers.go similarity index 99% rename from linter/koanf/handlers.go rename to linters/koanf/handlers.go index 5826004014..5ee3b80f9f 100644 --- a/linter/koanf/handlers.go +++ b/linters/koanf/handlers.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "fmt" diff --git a/linter/koanf/koanf.go b/linters/koanf/koanf.go similarity index 92% rename from linter/koanf/koanf.go rename to linters/koanf/koanf.go index d6780760e7..e53064b6b3 100644 --- a/linter/koanf/koanf.go +++ b/linters/koanf/koanf.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -8,7 +8,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var ( @@ -18,10 +17,6 @@ var ( errIncorrectFlag = errors.New("mismatching flag initialization") ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "koanfcheck", Doc: "check for koanf misconfigurations", @@ -101,7 +96,3 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { } return ret, nil } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/koanf/koanf_test.go b/linters/koanf/koanf_test.go similarity index 95% rename from linter/koanf/koanf_test.go rename to linters/koanf/koanf_test.go index 064ae533c4..9029951dfa 100644 --- a/linter/koanf/koanf_test.go +++ b/linters/koanf/koanf_test.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -20,7 +20,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linters/linters.go b/linters/linters.go new file mode 100644 index 0000000000..a6c9f6d55e --- /dev/null +++ b/linters/linters.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/offchainlabs/nitro/linters/koanf" + "github.com/offchainlabs/nitro/linters/pointercheck" + "github.com/offchainlabs/nitro/linters/rightshift" + "github.com/offchainlabs/nitro/linters/structinit" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main( + koanf.Analyzer, + pointercheck.Analyzer, + rightshift.Analyzer, + structinit.Analyzer, + ) +} diff --git a/linter/pointercheck/pointer.go b/linters/pointercheck/pointercheck.go similarity index 91% rename from linter/pointercheck/pointer.go rename to linters/pointercheck/pointercheck.go index 6500b01222..682ebd9357 100644 --- a/linter/pointercheck/pointer.go +++ b/linters/pointercheck/pointercheck.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "fmt" @@ -8,13 +8,8 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "pointercheck", Doc: "check for pointer comparison", @@ -94,7 +89,3 @@ func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { } return false } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/pointercheck/pointer_test.go b/linters/pointercheck/pointercheck_test.go similarity index 88% rename from linter/pointercheck/pointer_test.go rename to linters/pointercheck/pointercheck_test.go index 290e3826de..24f4534bca 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linters/pointercheck/pointercheck_test.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "os" @@ -11,7 +11,7 @@ import ( func TestAll(t *testing.T) { wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") diff --git a/linters/rightshift/rightshift.go b/linters/rightshift/rightshift.go new file mode 100644 index 0000000000..d6fcbfec6c --- /dev/null +++ b/linters/rightshift/rightshift.go @@ -0,0 +1,71 @@ +package rightshift + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "rightshift", + Doc: "check for 1 >> x operation", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testrightshift", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// rightShiftError indicates the position of pointer comparison. +type rightShiftError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all rightshift +// operations. +type Result struct { + Errors []rightShiftError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + be, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + // Check if the expression is '1 >> x'. + if be.Op == token.SHR && isOne(be.X) { + err := rightShiftError{ + Pos: pass.Fset.Position(be.Pos()), + Message: "found rightshift ('1 >> x') expression, did you mean '1 << x' ?", + } + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +// isOne checks if the expression is a constant 1. +func isOne(expr ast.Expr) bool { + bl, ok := expr.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == "1" +} diff --git a/linters/rightshift/rightshift_test.go b/linters/rightshift/rightshift_test.go new file mode 100644 index 0000000000..3640d79975 --- /dev/null +++ b/linters/rightshift/rightshift_test.go @@ -0,0 +1,36 @@ +package rightshift + +import ( + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get working directory: %v", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "rightshift") + want := []int{6, 11, 12} + got := erroLines(res) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("analysistest.Ru() unexpected diff in error lines:\n%s\n", diff) + } +} + +func erroLines(errs []*analysistest.Result) []int { + var ret []int + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + for _, err := range r.Errors { + ret = append(ret, err.Pos.Line) + } + } + } + return ret +} diff --git a/linter/structinit/structinit.go b/linters/structinit/structinit.go similarity index 93% rename from linter/structinit/structinit.go rename to linters/structinit/structinit.go index e4e65bc3fc..236b8747b2 100644 --- a/linter/structinit/structinit.go +++ b/linters/structinit/structinit.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "fmt" @@ -8,7 +8,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) // Tip for linter that struct that has this comment should be included in the @@ -16,10 +15,6 @@ import ( // Note: comment should be directly line above the struct definition. const linterTip = "// lint:require-exhaustive-initialization" -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - // Analyzer implements struct analyzer for structs that are annotated with // `linterTip`, it checks that every instantiation initializes all the fields. var Analyzer = &analysis.Analyzer{ @@ -116,7 +111,3 @@ type position struct { fileName string line int } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/structinit/structinit_test.go b/linters/structinit/structinit_test.go similarity index 89% rename from linter/structinit/structinit_test.go rename to linters/structinit/structinit_test.go index db3676e185..57dfc2b000 100644 --- a/linter/structinit/structinit_test.go +++ b/linters/structinit/structinit_test.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "os" @@ -12,7 +12,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linter/testdata/src/koanf/a/a.go b/linters/testdata/src/koanf/a/a.go similarity index 100% rename from linter/testdata/src/koanf/a/a.go rename to linters/testdata/src/koanf/a/a.go diff --git a/linter/testdata/src/koanf/b/b.go b/linters/testdata/src/koanf/b/b.go similarity index 100% rename from linter/testdata/src/koanf/b/b.go rename to linters/testdata/src/koanf/b/b.go diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linters/testdata/src/pointercheck/pointercheck.go similarity index 100% rename from linter/testdata/src/pointercheck/pointercheck.go rename to linters/testdata/src/pointercheck/pointercheck.go diff --git a/linters/testdata/src/rightshift/rightshift.go b/linters/testdata/src/rightshift/rightshift.go new file mode 100644 index 0000000000..3ad6d95980 --- /dev/null +++ b/linters/testdata/src/rightshift/rightshift.go @@ -0,0 +1,14 @@ +package rightshift + +import "fmt" + +func doThing(v int) int { + return 1 >> v // Error: Ln: 6 +} + +func calc() { + val := 10 + fmt.Printf("%v", 1>>val) // Error: Ln 11 + _ = doThing(1 >> val) // Error: Ln 12 + fmt.Printf("%v", 1<= version { + return 0, 0, nil + } + return version, timestamp, nil +} diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 330eb9a2e4..5d2ecce745 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -538,6 +538,11 @@ func Precompiles() map[addr]ArbosPrecompile { ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 + ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = 20 insert(MakePrecompile(templates.ArbAggregatorMetaData, &ArbAggregator{Address: hex("6d")})) insert(MakePrecompile(templates.ArbStatisticsMetaData, &ArbStatistics{Address: hex("6f")})) diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index b19b2d4353..02d962f0b4 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -4,7 +4,6 @@ package precompiles import ( - "bytes" "math/big" "testing" @@ -35,16 +34,14 @@ func TestEvents(t *testing.T) { } } - zeroHash := crypto.Keccak256([]byte{0x00}) - trueHash := common.Hash{}.Bytes() - falseHash := common.Hash{}.Bytes() - trueHash[31] = 0x01 + zeroHash := crypto.Keccak256Hash([]byte{0x00}) + falseHash := common.Hash{} var data []byte payload := [][]byte{ method.template.ID, // select the `Events` method - falseHash, // set the flag to false - zeroHash, // set the value to something known + falseHash.Bytes(), // set the flag to false + zeroHash.Bytes(), // set the value to something known } for _, bytes := range payload { data = append(data, bytes...) @@ -100,13 +97,13 @@ func TestEvents(t *testing.T) { basicTopics := logs[0].Topics mixedTopics := logs[1].Topics - if !bytes.Equal(basicTopics[1].Bytes(), zeroHash) || !bytes.Equal(mixedTopics[2].Bytes(), zeroHash) { + if basicTopics[1] != zeroHash || mixedTopics[2] != zeroHash { Fail(t, "indexing a bytes32 didn't work") } - if !bytes.Equal(mixedTopics[1].Bytes(), falseHash) { + if mixedTopics[1] != falseHash { Fail(t, "indexing a bool didn't work") } - if !bytes.Equal(mixedTopics[3].Bytes(), caller.Hash().Bytes()) { + if mixedTopics[3] != common.BytesToHash(caller.Bytes()) { Fail(t, "indexing an address didn't work") } @@ -117,10 +114,10 @@ func TestEvents(t *testing.T) { Fail(t, "failed to parse event logs", "\nprecompile:", cerr, "\nbasic:", berr, "\nmixed:", merr) } - if basic.Flag != true || !bytes.Equal(basic.Value[:], zeroHash) { + if basic.Flag != true || basic.Value != zeroHash { Fail(t, "event Basic's data isn't correct") } - if mixed.Flag != false || mixed.Not != true || !bytes.Equal(mixed.Value[:], zeroHash) { + if mixed.Flag != false || mixed.Not != true || mixed.Value != zeroHash { Fail(t, "event Mixed's data isn't correct") } if mixed.Conn != debugContractAddr || mixed.Caller != caller { diff --git a/solgen/gen.go b/solgen/gen.go index 5d43946fa5..770fa08571 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -23,6 +23,15 @@ type HardHatArtifact struct { Bytecode string `json:"bytecode"` } +type FoundryBytecode struct { + Object string `json:"object"` +} + +type FoundryArtifact struct { + Abi []interface{} `json:"abi"` + Bytecode FoundryBytecode `json:"bytecode"` +} + type moduleInfo struct { contractNames []string abis []string @@ -96,6 +105,35 @@ func main() { modInfo.addArtifact(artifact) } + yulFilePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "out", "yul", "*", "*.json")) + if err != nil { + log.Fatal(err) + } + yulModInfo := modules["yulgen"] + if yulModInfo == nil { + yulModInfo = &moduleInfo{} + modules["yulgen"] = yulModInfo + } + for _, path := range yulFilePaths { + _, file := filepath.Split(path) + name := file[:len(file)-5] + + data, err := os.ReadFile(path) + if err != nil { + log.Fatal("could not read", path, "for contract", name, err) + } + + artifact := FoundryArtifact{} + if err := json.Unmarshal(data, &artifact); err != nil { + log.Fatal("failed to parse contract", name, err) + } + yulModInfo.addArtifact(HardHatArtifact{ + ContractName: name, + Abi: artifact.Abi, + Bytecode: artifact.Bytecode.Object, + }) + } + // add upgrade executor module which is not compiled locally, but imported from 'nitro-contracts' depedencies upgExecutorPath := filepath.Join(parent, "contracts", "node_modules", "@offchainlabs", "upgrade-executor", "build", "contracts", "src", "UpgradeExecutor.sol", "UpgradeExecutor.json") _, err = os.Stat(upgExecutorPath) diff --git a/staker/block_validator.go b/staker/block_validator.go index 352335a5db..fad5e9c39a 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -45,11 +45,12 @@ type BlockValidator struct { chainCaughtUp bool // can only be accessed from creation thread or if holding reorg-write - nextCreateBatch []byte - nextCreateBatchMsgCount arbutil.MessageIndex - nextCreateBatchReread bool - nextCreateStartGS validator.GoGlobalState - nextCreatePrevDelayed uint64 + nextCreateBatch []byte + nextCreateBatchBlockHash common.Hash + nextCreateBatchMsgCount arbutil.MessageIndex + nextCreateBatchReread bool + nextCreateStartGS validator.GoGlobalState + nextCreatePrevDelayed uint64 // can only be accessed from from validation thread or if holding reorg-write lastValidGS validator.GoGlobalState @@ -455,23 +456,23 @@ func (v *BlockValidator) SetCurrentWasmModuleRoot(hash common.Hash) error { ) } -func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, arbutil.MessageIndex, error) { +func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, common.Hash, arbutil.MessageIndex, error) { batchCount, err := v.inboxTracker.GetBatchCount() if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } if batchCount <= batchNum { - return false, nil, 0, nil + return false, nil, common.Hash{}, 0, nil } batchMsgCount, err := v.inboxTracker.GetBatchMessageCount(batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - batch, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) + batch, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - return true, batch, batchMsgCount, nil + return true, batch, batchBlockHash, batchMsgCount, nil } func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, error) { @@ -500,11 +501,12 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } if v.nextCreateStartGS.PosInBatch == 0 || v.nextCreateBatchReread { // new batch - found, batch, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) + found, batch, batchBlockHash, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) if !found { return false, err } v.nextCreateBatch = batch + v.nextCreateBatchBlockHash = batchBlockHash v.nextCreateBatchMsgCount = count validatorMsgCountCurrentBatch.Update(int64(count)) v.nextCreateBatchReread = false @@ -522,7 +524,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } else { return false, fmt.Errorf("illegal batch msg count %d pos %d batch %d", v.nextCreateBatchMsgCount, pos, endGS.Batch) } - entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreatePrevDelayed) + entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreateBatchBlockHash, v.nextCreatePrevDelayed) if err != nil { return false, err } diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index c9fa3b132d..118ce15b44 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -176,16 +176,18 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, if node.NodeHash != nodeHash { return nil, fmt.Errorf("got unexpected node hash %v looking for node number %v with expected hash %v (reorg?)", node.NodeHash, nodeNum, nodeHash) } - latestChild, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), node.LatestChildNumber) - if err != nil { - return nil, err - } var query = ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(node.CreatedAtBlock), - ToBlock: new(big.Int).SetUint64(latestChild.CreatedAtBlock), Addresses: []common.Address{r.address}, Topics: [][]common.Hash{{nodeCreatedID}, nil, {nodeHash}}, } + query.FromBlock, err = r.getNodeCreationBlock(ctx, nodeNum) + if err != nil { + return nil, err + } + query.ToBlock, err = r.getNodeCreationBlock(ctx, node.LatestChildNumber) + if err != nil { + return nil, err + } logs, err := r.client.FilterLogs(ctx, query) if err != nil { return nil, err diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index acd86f8627..13b16e42cd 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -39,6 +39,7 @@ type StatelessBlockValidator struct { streamer TransactionStreamerInterface db ethdb.Database daService arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -67,7 +68,7 @@ type TransactionStreamerInterface interface { } type InboxReaderInterface interface { - GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) + GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) } type GlobalStatePosition struct { @@ -186,11 +187,13 @@ func newValidationEntry( end validator.GoGlobalState, msg *arbostypes.MessageWithMetadata, batch []byte, + batchBlockHash common.Hash, prevDelayed uint64, ) (*validationEntry, error) { batchInfo := validator.BatchInfo{ - Number: start.Batch, - Data: batch, + Number: start.Batch, + BlockHash: batchBlockHash, + Data: batch, } hasDelayed := false var delayedNum uint64 @@ -219,6 +222,7 @@ func NewStatelessBlockValidator( recorder execution.ExecutionRecorder, arbdb ethdb.Database, das arbstate.DataAvailabilityReader, + blobReader arbstate.BlobReader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -235,6 +239,7 @@ func NewStatelessBlockValidator( streamer: streamer, db: arbdb, daService: das, + blobReader: blobReader, } return validator, nil } @@ -284,17 +289,36 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - continue - } - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) + if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { + payload := batch.Data[41:] + if len(payload)%len(common.Hash{}) != 0 { + return fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) + for i := 0; i*32 < len(payload); i += 1 { + copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) + } + blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) if err != nil { - return err + return fmt.Errorf("failed to get blobs: %w", err) + } + if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { + e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) + } + for i, blob := range blobs { + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + } + } + if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if v.daService == nil { + log.Warn("No DAS configured, but sequencer message found with DAS header") + } else { + _, err := arbstate.RecoverPayloadFromDasBatch( + ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, + ) + if err != nil { + return err + } } } } @@ -359,11 +383,11 @@ func (v *StatelessBlockValidator) CreateReadyValidationEntry(ctx context.Context } start := buildGlobalState(*prevResult, startPos) end := buildGlobalState(*result, endPos) - seqMsg, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) + seqMsg, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) if err != nil { return nil, err } - entry, err := newValidationEntry(pos, start, end, msg, seqMsg, prevDelayed) + entry, err := newValidationEntry(pos, start, end, msg, seqMsg, batchBlockHash, prevDelayed) if err != nil { return nil, err } diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 774e9ab407..0ef190e703 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -10,7 +10,6 @@ import ( "math/big" "strings" "sync/atomic" - "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -177,7 +176,7 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -288,7 +287,7 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) + arbTx, err := v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), txData, gas, auth.Value) if err != nil { return nil, err } @@ -338,7 +337,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } // gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. @@ -420,7 +419,7 @@ func GetValidatorWalletContract( FromBlock: big.NewInt(fromBlock), ToBlock: nil, Addresses: []common.Address{validatorWalletFactoryAddr}, - Topics: [][]common.Hash{{walletCreatedID}, nil, {transactAuth.From.Hash()}}, + Topics: [][]common.Hash{{walletCreatedID}, nil, {common.BytesToHash(transactAuth.From.Bytes())}}, } logs, err := client.FilterLogs(ctx, query) if err != nil { diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index 44af5e2b60..3ae305b36c 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -6,7 +6,6 @@ package validatorwallet import ( "context" "fmt" - "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -95,7 +94,7 @@ func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (* return nil, err } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) + newTx, err := w.dataPoster.PostSimpleTransaction(ctx, nonce, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index f7bf74f699..cacbe3cee4 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -180,7 +180,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } lastTxHash := txs[len(txs)-1].Hash() - for i := 90; i > 0; i-- { + for i := 90; i >= 0; i-- { builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index e2e4227bf6..1fcf2bab34 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -93,6 +93,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.CODECOPY)) contractCode = append(contractCode, byte(vm.PUSH0)) + contractCode = append(contractCode, byte(vm.BLOBHASH)) contractCode = append(contractCode, byte(vm.RETURN)) basefee := builder.L2.GetBaseFee(t) var err error diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 2e17a50ede..a950ebd7ca 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -43,6 +43,9 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/eth/tracers" + _ "github.com/ethereum/go-ethereum/eth/tracers/js" + _ "github.com/ethereum/go-ethereum/eth/tracers/native" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -474,9 +477,10 @@ func createStackConfigForTest(dataDir string) *node.Config { stackConf.DataDir = dataDir stackConf.UseLightweightKDF = true stackConf.WSPort = 0 + stackConf.WSModules = append(stackConf.WSModules, "eth", "debug") stackConf.HTTPPort = 0 stackConf.HTTPHost = "" - stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") + stackConf.HTTPModules = append(stackConf.HTTPModules, "eth", "debug") stackConf.P2P.NoDiscovery = true stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" @@ -605,6 +609,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no Namespace: "eth", Service: filters.NewFilterAPI(filters.NewFilterSystem(l1backend.APIBackend, filters.Config{}), false), }}) + stack.RegisterAPIs(tracers.APIs(l1backend.APIBackend)) Require(t, stack.Start()) Require(t, l1backend.StartMining()) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index eda7fb449f..6f47c14f1f 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/solgen/go/mocksgen" @@ -180,6 +181,22 @@ func TestDifficultyForArbOSTen(t *testing.T) { } } +func TestBlobBasefeeReverts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + _, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ + Data: []byte{byte(vm.BLOBBASEFEE)}, + }, nil) + if err == nil { + t.Error("Expected BLOBBASEFEE to revert") + } +} + func TestComponentEstimate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 118d17ec81..b8f891e3e7 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -35,6 +35,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" @@ -199,11 +200,15 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + reader4844, tx, _, err := yulgen.DeployReader4844(&txOpts, l1Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Client, tx) + Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(10000), - FutureBlocks: big.NewInt(10000), - DelaySeconds: big.NewInt(10000), - FutureSeconds: big.NewInt(10000), + DelayBlocks: 10000, + FutureBlocks: 10000, + DelaySeconds: 10000, + FutureSeconds: 10000, } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, @@ -212,6 +217,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Info.GetAddress("sequencer"), timeBounds, big.NewInt(117964), + reader4844, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -379,7 +385,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -396,7 +402,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index 6707df1c64..f0797404a9 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -24,11 +24,11 @@ func InitOneContract(prand *testhelpers.PseudoRandomDataSource) (*statetransfer. numCells := int(prand.GetUint64() % 1000) for i := 0; i < numCells; i++ { storageAddr := prand.GetHash() - storageVal := prand.GetAddress().Hash() // 20 bytes so sum won't overflow - code = append(code, 0x7f) // PUSH32 - code = append(code, storageAddr[:]...) // storageAdr - code = append(code, 0x54) // SLOAD - code = append(code, 0x01) // ADD + storageVal := common.BytesToHash(prand.GetAddress().Bytes()) // 20 bytes so sum won't overflow + code = append(code, 0x7f) // PUSH32 + code = append(code, storageAddr[:]...) // storageAdr + code = append(code, 0x54) // SLOAD + code = append(code, 0x01) // ADD storageMap[storageAddr] = storageVal sum.Add(sum, storageVal.Big()) } diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index f09f68041a..e1715dc635 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -95,7 +95,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { time.Sleep(10 * time.Millisecond) } - _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) + _, _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) Require(t, err) l2Header, err := builder.L2.Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 10db09275b..e0a9c2ce78 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -120,3 +120,54 @@ func TestPrecompileErrorGasLeft(t *testing.T) { Require(t, err) assertNotAllGasConsumed(common.HexToAddress("0xff"), arbDebug.Methods["legacyError"].ID) } + +func TestScheduleArbosUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + callOpts := &bind.CallOpts{Context: ctx} + scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // TODO: Once we have an ArbOS 30, test a real upgrade with it + // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test + var testVersion uint64 = 100 + var testTimestamp uint64 = 1 << 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") + if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { + t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } +} diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index ef82c0466e..e9e99dffcc 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -3,12 +3,14 @@ package arbtest import ( "context" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/pruning" "github.com/offchainlabs/nitro/execution/gethexec" @@ -32,35 +34,34 @@ func TestPruning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var dataDir string - - func() { - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - _ = builder.Build(t) - dataDir = builder.dataDir - l2cleanupDone := false - defer func() { - if !l2cleanupDone { - builder.L2.cleanup() - } - builder.L1.cleanup() - }() - builder.L2Info.GenerateAccount("User2") - var txs []*types.Transaction - for i := uint64(0); i < 200; i++ { - tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) - txs = append(txs, tx) - err := builder.L2.Client.SendTransaction(ctx, tx) - Require(t, err) - } - for _, tx := range txs { - _, err := builder.L2.EnsureTxSucceeded(tx) - Require(t, err) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + _ = builder.Build(t) + l2cleanupDone := false + defer func() { + if !l2cleanupDone { + builder.L2.cleanup() } - l2cleanupDone = true - builder.L2.cleanup() - t.Log("stopped l2 node") + builder.L1.cleanup() + }() + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < 200; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + lastBlock, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + l2cleanupDone = true + builder.L2.cleanup() + t.Log("stopped l2 node") + func() { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() @@ -105,15 +106,44 @@ func TestPruning(t *testing.T) { Fatal(t, "The db doesn't have less entries after pruning then before. Before:", chainDbEntriesBeforePruning, "After:", chainDbEntriesAfterPruning) } }() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.dataDir = dataDir - cancel = builder.Build(t) - defer cancel() - builder.L2Info.GenerateAccount("User2") + testClient, cleanup := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: builder.l2StackConfig}) + defer cleanup() + + currentBlock := uint64(0) + // wait for the chain to catch up + for currentBlock < lastBlock { + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + time.Sleep(20 * time.Millisecond) + } + + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + bc := testClient.ExecNode.Backend.ArbInterface().BlockChain() + triedb := bc.StateCache().TrieDB() + var start uint64 + if currentBlock+1 >= builder.execConfig.Caching.BlockCount { + start = currentBlock + 1 - builder.execConfig.Caching.BlockCount + } else { + start = 0 + } + for i := start; i <= currentBlock; i++ { + header := bc.GetHeaderByNumber(i) + _, err := bc.StateAt(header.Root) + Require(t, err) + tr, err := trie.New(trie.TrieID(header.Root), triedb) + Require(t, err) + it, err := tr.NodeIterator(nil) + Require(t, err) + for it.Next(true) { + } + Require(t, it.Error()) + } + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) - err := builder.L2.Client.SendTransaction(ctx, tx) + err = testClient.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) + _, err = testClient.EnsureTxSucceeded(tx) Require(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 6e3ffd6125..62e89ff782 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,6 +208,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -260,6 +261,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index b14215fbf0..28bcbec9b4 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -69,11 +69,11 @@ type inboxBackend struct { delayedMessages [][]byte } -func (b *inboxBackend) PeekSequencerInbox() ([]byte, error) { +func (b *inboxBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0], nil + return b.batches[0], common.Hash{}, nil } func (b *inboxBackend) GetSequencerInboxPosition() uint64 { diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go new file mode 100644 index 0000000000..2852f2b29f --- /dev/null +++ b/util/blobs/blobs.go @@ -0,0 +1,140 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "bytes" + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +func fillBlobBytes(blob []byte, data []byte) []byte { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + startIdx := fieldElement*32 + 1 + copy(blob[startIdx:startIdx+31], data) + if len(data) <= 31 { + return nil + } + data = data[31:] + } + return data +} + +// The number of bits in a BLS scalar that aren't part of a whole byte. +const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 + +func fillBlobBits(blob []byte, data []byte) ([]byte, error) { + var acc uint16 + accBits := 0 + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + if accBits < spareBlobBits && len(data) > 0 { + acc |= uint16(data[0]) << accBits + accBits += 8 + data = data[1:] + } + blob[fieldElement*32] = uint8(acc & ((1 << spareBlobBits) - 1)) + accBits -= spareBlobBits + if accBits < 0 { + // We're out of data + break + } + acc >>= spareBlobBits + } + if accBits > 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } + return data, nil +} + +// EncodeBlobs takes in raw bytes data to convert into blobs used for KZG commitment EIP-4844 +// transactions on Ethereum. +func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { + data, err := rlp.EncodeToBytes(data) + if err != nil { + return nil, err + } + var blobs []kzg4844.Blob + for len(data) > 0 { + var b kzg4844.Blob + data = fillBlobBytes(b[:], data) + data, err = fillBlobBits(b[:], data) + if err != nil { + return nil, err + } + blobs = append(blobs, b) + } + return blobs, nil +} + +// DecodeBlobs decodes blobs into the batch data encoded in them. +func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { + var rlpData []byte + for _, blob := range blobs { + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + rlpData = append(rlpData, blob[fieldIndex*32+1:(fieldIndex+1)*32]...) + } + var acc uint16 + accBits := 0 + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + acc |= uint16(blob[fieldIndex*32]) << accBits + accBits += spareBlobBits + if accBits >= 8 { + rlpData = append(rlpData, uint8(acc)) + acc >>= 8 + accBits -= 8 + } + } + if accBits != 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } + } + var outputData []byte + err := rlp.Decode(bytes.NewReader(rlpData), &outputData) + return outputData, err +} + +func CommitmentToVersionedHash(commitment kzg4844.Commitment) common.Hash { + // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. + hash := sha256.Sum256(commitment[:]) + hash[0] = 1 + return hash +} + +// Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs +func ComputeCommitmentsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []common.Hash, error) { + commitments := make([]kzg4844.Commitment, len(blobs)) + versionedHashes := make([]common.Hash, len(blobs)) + + for i := range blobs { + var err error + commitments[i], err = kzg4844.BlobToCommitment(blobs[i]) + if err != nil { + return nil, nil, err + } + versionedHashes[i] = CommitmentToVersionedHash(commitments[i]) + } + + return commitments, versionedHashes, nil +} + +func ComputeBlobProofs(blobs []kzg4844.Blob, commitments []kzg4844.Commitment) ([]kzg4844.Proof, error) { + if len(blobs) != len(commitments) { + return nil, fmt.Errorf("ComputeBlobProofs got %v blobs but %v commitments", len(blobs), len(commitments)) + } + proofs := make([]kzg4844.Proof, len(blobs)) + for i := range blobs { + var err error + proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + if err != nil { + return nil, err + } + } + + return proofs, nil +} diff --git a/util/blobs/blobs_test.go b/util/blobs/blobs_test.go new file mode 100644 index 0000000000..753b50a489 --- /dev/null +++ b/util/blobs/blobs_test.go @@ -0,0 +1,52 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "bytes" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/params" +) + +const bytesEncodedPerBlob = 254 * 4096 / 8 + +var blsModulus, _ = new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) + +func TestBlobEncoding(t *testing.T) { + r := rand.New(rand.NewSource(1)) +outer: + for i := 0; i < 40; i++ { + data := make([]byte, r.Int()%bytesEncodedPerBlob*3) + _, err := r.Read(data) + if err != nil { + t.Fatalf("failed to generate random bytes: %v", err) + } + enc, err := EncodeBlobs(data) + if err != nil { + t.Errorf("failed to encode blobs for length %v: %v", len(data), err) + continue + } + for _, b := range enc { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + bigInt := new(big.Int).SetBytes(b[fieldElement*32 : (fieldElement+1)*32]) + if bigInt.Cmp(blsModulus) >= 0 { + t.Errorf("for length %v blob %v has field element %v value %v >= modulus %v", len(data), b, fieldElement, bigInt, blsModulus) + continue outer + } + } + } + dec, err := DecodeBlobs(enc) + if err != nil { + t.Errorf("failed to decode blobs for length %v: %v", len(data), err) + continue + } + if !bytes.Equal(data, dec) { + t.Errorf("got different decoding for length %v", len(data)) + continue + } + } +} diff --git a/util/jsonapi/uint64_string.go b/util/jsonapi/uint64_string.go new file mode 100644 index 0000000000..980b97a949 --- /dev/null +++ b/util/jsonapi/uint64_string.go @@ -0,0 +1,38 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package jsonapi + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// Uint64String is a uint64 that JSON marshals and unmarshals as string in decimal +type Uint64String uint64 + +func (u *Uint64String) UnmarshalJSON(b []byte) error { + jsonString := string(b) + if jsonString == "null" { + return nil + } + + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + value, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + + *u = Uint64String(value) + return nil +} + +func (u Uint64String) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%d\"", uint64(u))), nil +} diff --git a/validator/validation_entry.go b/validator/validation_entry.go index fed1940f1f..8bb021335e 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -6,8 +6,9 @@ import ( ) type BatchInfo struct { - Number uint64 - Data []byte + Number uint64 + BlockHash common.Hash + Data []byte } type ValidationInput struct {