diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b27c196a6f..9f9591b222 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -130,13 +130,13 @@ jobs: if: matrix.test-mode == 'defaults' run: | packages=`go list ./...` - gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... + gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m - name: run tests with race detection if: matrix.test-mode == 'race' run: | packages=`go list ./...` - gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 -- -race + gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 -- -race -timeout 30m - name: run redis tests if: matrix.test-mode == 'defaults' diff --git a/arbitrator/wasm-libraries/go-stub/src/value.rs b/arbitrator/wasm-libraries/go-stub/src/value.rs index 3a015bbf70..22c1ed6a86 100644 --- a/arbitrator/wasm-libraries/go-stub/src/value.rs +++ b/arbitrator/wasm-libraries/go-stub/src/value.rs @@ -164,9 +164,9 @@ pub unsafe fn get_field(source: u32, field: &[u8]) -> GoValue { } } else if source == GO_ID { if field == b"_pendingEvent" { - if let Some(event) = &PENDING_EVENT { + if let Some(event) = PENDING_EVENT.clone() { let id = DynamicObjectPool::singleton() - .insert(DynamicObject::PendingEvent(event.clone())); + .insert(DynamicObject::PendingEvent(event)); return GoValue::Object(id); } else { return GoValue::Null; diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 14d5affa08..e9cfe1dd3a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -18,6 +18,7 @@ import ( "github.com/andybalholm/brotli" "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -130,20 +131,21 @@ type BatchPosterConfig struct { // Batch post polling interval. PollInterval time.Duration `koanf:"poll-interval" reload:"hot"` // Batch posting error delay. - ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` - CompressionLevel int `koanf:"compression-level" reload:"hot"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` - GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` - Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` - IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` - ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` - L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` - UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` + ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` + CompressionLevel int `koanf:"compression-level" reload:"hot"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` + GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` + L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` + GasEstimateBaseFeeMultipleBips arbmath.Bips `koanf:"gas-estimate-base-fee-multiple-bips"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -194,6 +196,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") f.Bool(prefix+".use-access-lists", DefaultBatchPosterConfig.UseAccessLists, "post batches with access lists to reduce gas usage (disabled for L3s)") + f.Uint64(prefix+".gas-estimate-base-fee-multiple-bips", uint64(DefaultBatchPosterConfig.GasEstimateBaseFeeMultipleBips), "for gas estimation, use this multiple of the basefee (measured in basis points) as the max fee per gas") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) @@ -205,23 +208,24 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? - Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 1000, - PollInterval: time.Second * 10, - ErrorDelay: time.Second * 10, - MaxDelay: time.Hour, - WaitForMaxDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - Post4844Blobs: false, - IgnoreBlobPrice: false, - DataPoster: dataposter.DefaultDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, - UseAccessLists: true, - RedisLock: redislock.DefaultCfg, + Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 1000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + Post4844Blobs: false, + IgnoreBlobPrice: false, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + UseAccessLists: true, + RedisLock: redislock.DefaultCfg, + GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -233,24 +237,25 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ } var TestBatchPosterConfig = BatchPosterConfig{ - Enable: true, - MaxSize: 100000, - Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, - PollInterval: time.Millisecond * 10, - ErrorDelay: time.Millisecond * 10, - MaxDelay: 0, - WaitForMaxDelay: false, - CompressionLevel: 2, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 10_000, - Post4844Blobs: true, - IgnoreBlobPrice: false, - DataPoster: dataposter.TestDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, - UseAccessLists: true, + Enable: true, + MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, + PollInterval: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + Post4844Blobs: true, + IgnoreBlobPrice: false, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + UseAccessLists: true, + GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, } type BatchPosterOpts struct { @@ -427,6 +432,35 @@ func AccessList(opts *AccessListOpts) types.AccessList { return l } +type txInfo struct { + Hash common.Hash `json:"hash"` + Nonce hexutil.Uint64 `json:"nonce"` + From common.Address `json:"from"` + To *common.Address `json:"to"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` + GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` + Input hexutil.Bytes `json:"input"` + Value *hexutil.Big `json:"value"` + Accesses *types.AccessList `json:"accessList,omitempty"` +} + +// getTxsInfoByBlock fetches all the transactions inside block of id 'number' using json rpc +// and returns an array of txInfo which has fields that are necessary in checking for batch reverts +func (b *BatchPoster) getTxsInfoByBlock(ctx context.Context, number int64) ([]txInfo, error) { + blockNrStr := rpc.BlockNumber(number).String() + rawRpcClient := b.l1Reader.Client().Client() + var blk struct { + Transactions []txInfo `json:"transactions"` + } + err := rawRpcClient.CallContext(ctx, &blk, "eth_getBlockByNumber", blockNrStr, true) + if err != nil { + return nil, fmt.Errorf("error fetching block %d : %w", number, err) + } + return blk.Transactions, nil +} + // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts @@ -436,20 +470,15 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) return false, fmt.Errorf("wrong range, from: %d > to: %d", b.nextRevertCheckBlock, to) } for ; b.nextRevertCheckBlock <= to; b.nextRevertCheckBlock++ { - number := big.NewInt(b.nextRevertCheckBlock) - block, err := b.l1Reader.Client().BlockByNumber(ctx, number) + txs, err := b.getTxsInfoByBlock(ctx, b.nextRevertCheckBlock) if err != nil { - return false, fmt.Errorf("getting block: %v by number: %w", number, err) + return false, fmt.Errorf("error getting transactions data of block %d: %w", b.nextRevertCheckBlock, err) } - for idx, tx := range block.Transactions() { - from, err := b.l1Reader.Client().TransactionSender(ctx, tx, block.Hash(), uint(idx)) - if err != nil { - return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) - } - if from == b.dataPoster.Sender() { - r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) + for _, tx := range txs { + if tx.From == b.dataPoster.Sender() { + r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash) if err != nil { - return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) + return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash, err) } if r.Status == types.ReceiptStatusFailed { shouldHalt := !b.config().DataPoster.UseNoOpStorage @@ -457,8 +486,22 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) if shouldHalt { logLevel = log.Error } - txErr := arbutil.DetailTxError(ctx, b.l1Reader.Client(), tx, r) - logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) + al := types.AccessList{} + if tx.Accesses != nil { + al = *tx.Accesses + } + txErr := arbutil.DetailTxErrorUsingCallMsg(ctx, b.l1Reader.Client(), tx.Hash, r, ethereum.CallMsg{ + From: tx.From, + To: tx.To, + Gas: uint64(tx.Gas), + GasPrice: tx.GasPrice.ToInt(), + GasFeeCap: tx.GasFeeCap.ToInt(), + GasTipCap: tx.GasTipCap.ToInt(), + Value: tx.Value.ToInt(), + Data: tx.Input, + AccessList: al, + }) + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce, "txHash", tx.Hash, "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) return shouldHalt, nil } } @@ -846,11 +889,12 @@ func (b *BatchPoster) encodeAddBatch( var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") type estimateGasParams struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Data hexutil.Bytes `json:"data"` - AccessList types.AccessList `json:"accessList"` - BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + From common.Address `json:"from"` + To *common.Address `json:"to"` + Data hexutil.Bytes `json:"data"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + AccessList types.AccessList `json:"accessList"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { @@ -861,16 +905,22 @@ func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimat func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() + rpcClient := b.l1Reader.Client() + rawRpcClient := rpcClient.Client() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 if !useNormalEstimation { // Check if we can use normal estimation anyways because we're at the latest nonce - latestNonce, err := b.l1Reader.Client().NonceAt(ctx, b.dataPoster.Sender(), nil) + latestNonce, err := rpcClient.NonceAt(ctx, b.dataPoster.Sender(), nil) if err != nil { return 0, err } useNormalEstimation = latestNonce == realNonce } - rawRpcClient := b.l1Reader.Client().Client() + latestHeader, err := rpcClient.HeaderByNumber(ctx, nil) + if err != nil { + return 0, err + } + maxFeePerGas := arbmath.BigMulByBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips) if useNormalEstimation { _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) if err != nil { @@ -878,11 +928,12 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, } // If we're at the latest nonce, we can skip the special future tx estimate stuff gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: realData, - BlobHashes: realBlobHashes, - AccessList: realAccessList, + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: realData, + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + BlobHashes: realBlobHashes, + AccessList: realAccessList, }) if err != nil { return 0, fmt.Errorf("%w: %w", ErrNormalGasEstimationFailed, err) @@ -903,10 +954,11 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, return 0, fmt.Errorf("failed to compute blob commitments: %w", err) } gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: data, - BlobHashes: blobHashes, + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: data, + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + BlobHashes: blobHashes, // This isn't perfect because we're probably estimating the batch at a different sequence number, // but it should overestimate rather than underestimate which is fine. AccessList: realAccessList, @@ -959,7 +1011,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } var use4844 bool config := b.config() - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.Post4844Blobs && b.daWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) if err != nil { return false, err diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index e22d5b0581..c74672e08f 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -40,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/spf13/pflag" @@ -360,7 +361,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi weightDiff := arbmath.MinInt(newCumulativeWeight-confirmedWeight, (nextNonce-unconfirmedNonce)*params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) if weightDiff > cfg.MaxMempoolWeight { - return fmt.Errorf("%w: transaction nonce: %d, transaction cumulative weight: %d, unconfirmed nonce: %d, confirmed weight: %d, new mempool weight: %d, max mempool weight: %d", ErrExceedsMaxMempoolSize, nextNonce, newCumulativeWeight, unconfirmedNonce, confirmedWeight, weightDiff, cfg.MaxMempoolTransactions) + return fmt.Errorf("%w: transaction nonce: %d, transaction cumulative weight: %d, unconfirmed nonce: %d, confirmed weight: %d, new mempool weight: %d, max mempool weight: %d", ErrExceedsMaxMempoolSize, nextNonce, newCumulativeWeight, unconfirmedNonce, confirmedWeight, weightDiff, cfg.MaxMempoolWeight) } } return nil @@ -465,13 +466,10 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs uint64, lastTx *types.Transaction, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs uint64, lastTx *types.Transaction, dataCreatedAt time.Time, dataPosterBacklog uint64, latestHeader *types.Header) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() - latestHeader, err := p.headerReader.LastHeader(ctx) - if err != nil { - return nil, nil, nil, err - } + if latestHeader.BaseFee == nil { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } @@ -597,6 +595,20 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newBlobFeeCap = arbmath.BigDivByUint(newBlobCost, blobGasUsed) } + if config.MaxFeeBidMultipleBips > 0 { + // Limit the fee caps to be no greater than max(MaxFeeBidMultipleBips, minRbf) + maxNonBlobFee := arbmath.BigMulByBips(currentNonBlobFee, config.MaxFeeBidMultipleBips) + if lastTx != nil { + maxNonBlobFee = arbmath.BigMax(maxNonBlobFee, arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease)) + } + maxBlobFee := arbmath.BigMulByBips(currentBlobFee, config.MaxFeeBidMultipleBips) + if lastTx != nil && lastTx.BlobGasFeeCap() != nil { + maxBlobFee = arbmath.BigMax(maxBlobFee, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) + } + newBaseFeeCap = arbmath.BigMin(newBaseFeeCap, maxNonBlobFee) + newBlobFeeCap = arbmath.BigMin(newBlobFeeCap, maxBlobFee) + } + if arbmath.BigGreaterThan(newTipCap, newBaseFeeCap) { log.Info( "reducing new tip cap to new basefee cap", @@ -688,7 +700,12 @@ func (p *DataPoster) postTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, uint64(len(kzgBlobs)), nil, dataCreatedAt, 0) + latestHeader, err := p.headerReader.LastHeader(ctx) + if err != nil { + return nil, err + } + + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, uint64(len(kzgBlobs)), nil, dataCreatedAt, 0, latestHeader) if err != nil { return nil, err } @@ -823,7 +840,7 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return err } if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { - if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { + if !rpcclient.IsAlreadyKnownError(err) && !strings.Contains(err.Error(), "nonce too low") { log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) return err } @@ -873,7 +890,12 @@ func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *b // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogWeight uint64) error { - newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), uint64(len(prevTx.FullTx.BlobHashes())), prevTx.FullTx, prevTx.Created, backlogWeight) + latestHeader, err := p.headerReader.LastHeader(ctx) + if err != nil { + return err + } + + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), uint64(len(prevTx.FullTx.BlobHashes())), prevTx.FullTx, prevTx.Created, backlogWeight, latestHeader) if err != nil { return err } @@ -1127,6 +1149,7 @@ type DataPosterConfig struct { MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"` + MaxFeeBidMultipleBips arbmath.Bips `koanf:"max-fee-bid-multiple-bips" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseDBStorage bool `koanf:"use-db-storage"` @@ -1181,6 +1204,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPost f.Float64(prefix+".min-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MinBlobTxTipCapGwei, "the minimum tip cap to post EIP-4844 blob carrying transactions at") f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") f.Float64(prefix+".max-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MaxBlobTxTipCapGwei, "the maximum tip cap to post EIP-4844 blob carrying transactions at") + f.Uint64(prefix+".max-fee-bid-multiple-bips", uint64(defaultDataPosterConfig.MaxFeeBidMultipleBips), "the maximum multiple of the current price to bid for a transaction's fees (may be exceeded due to min rbf increase, 0 = unlimited)") f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") @@ -1222,6 +1246,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time MaxTipCapGwei: 5, MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: true, @@ -1255,6 +1280,7 @@ var TestDataPosterConfig = DataPosterConfig{ MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, MaxBlobTxTipCapGwei: 1, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: false, diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 06e3144ed1..a8e2e110a0 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -9,13 +9,18 @@ import ( "time" "github.com/Knetic/govaluate" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/google/go-cmp/cmp" "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestParseReplacementTimes(t *testing.T) { @@ -187,3 +192,293 @@ func TestMaxFeeCapFormulaCalculation(t *testing.T) { t.Fatalf("Unexpected result. Got: %d, want: >0", result) } } + +type stubL1Client struct { + senderNonce uint64 + suggestedGasTipCap *big.Int + + // Define most of the required methods that aren't used by feeAndTipCaps + backends.SimulatedBackend +} + +func (c *stubL1Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + return c.senderNonce, nil +} + +func (c *stubL1Client) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return c.suggestedGasTipCap, nil +} + +// Not used but we need to define +func (c *stubL1Client) BlockNumber(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (c *stubL1Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) { + return []byte{}, nil +} + +func (c *stubL1Client) ChainID(ctx context.Context) (*big.Int, error) { + return nil, nil +} + +func (c *stubL1Client) Client() rpc.ClientInterface { + return nil +} + +func (c *stubL1Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) { + return common.Address{}, nil +} + +func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T) { + conf := func() *DataPosterConfig { + // Set only the fields that are used by feeAndTipCaps + // Start with defaults, maybe change for test. + return &DataPosterConfig{ + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, + MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, + MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 10, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + AllocateMempoolBalance: true, + + UrgencyGwei: 2., + ElapsedTimeBase: 10 * time.Minute, + ElapsedTimeImportance: 10, + TargetPriceGwei: 60., + } + } + expression, err := govaluate.NewEvaluableExpression(DefaultDataPosterConfig.MaxFeeCapFormula) + if err != nil { + t.Fatalf("error creating govaluate evaluable expression: %v", err) + } + + p := DataPoster{ + config: conf, + extraBacklog: func() uint64 { return 0 }, + balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), + usingNoOpStorage: false, + client: &stubL1Client{ + senderNonce: 1, + suggestedGasTipCap: big.NewInt(2 * params.GWei), + }, + auth: &bind.TransactOpts{ + From: common.Address{}, + }, + maxFeeCapExpression: expression, + } + + ctx := context.Background() + var nonce uint64 = 1 + var gasLimit uint64 = 300_000 // reasonable upper bound for mainnet blob batches + var numBlobs uint64 = 6 + var lastTx *types.Transaction // PostTransaction leaves this nil, used when replacing + dataCreatedAt := time.Now() + var dataPosterBacklog uint64 = 0 // Zero backlog for PostTransaction + var blobGasUsed uint64 = 0xc0000 // 6 blobs of gas + var excessBlobGas uint64 = 0 // typical current mainnet conditions + latestHeader := types.Header{ + Number: big.NewInt(1), + BaseFee: big.NewInt(1_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, dataCreatedAt, dataPosterBacklog, &latestHeader) + if err != nil { + t.Fatalf("%s", err) + } + + // There is no backlog and almost no time elapses since the batch data was + // created to when it was posted so the maxNormalizedFeeCap is ~60.01 gwei. + // This is multiplied with the normalizedGas to get targetMaxCost. + // This is greatly in excess of currentTotalCost * MaxFeeBidMultipleBips, + // so targetMaxCost is reduced to the current base fee + suggested tip cap + + // current blob fee multipled by MaxFeeBidMultipleBips (factor of 10). + // The blob and non blob factors are then proportionally split out and so + // the newGasFeeCap is set to (current base fee + suggested tip cap) * 10 + // and newBlobFeeCap is set to current blob gas base fee (1 wei + // since there is no excess blob gas) * 10. + expectedGasFeeCap := big.NewInt(30 * params.GWei) + expectedBlobFeeCap := big.NewInt(10) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + + // 2 gwei is the amount suggested by the L1 client, so that is the value + // returned because it doesn't exceed the configured bounds, there is no + // lastTx to scale against with rbf, and it is not bigger than the computed + // gasFeeCap. + expectedTipCap := big.NewInt(2 * params.GWei) + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + + lastBlobTx := &types.BlobTx{} + err = updateTxDataGasCaps(lastBlobTx, newGasFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + t.Fatal(err) + } + lastTx = types.NewTx(lastBlobTx) + // Make creation time go backwards so elapsed time increases + retconnedCreationTime := dataCreatedAt.Add(-time.Minute) + // Base fee needs to have increased to simulate conditions to not include prev tx + latestHeader = types.Header{ + Number: big.NewInt(2), + BaseFee: big.NewInt(32_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err = p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, retconnedCreationTime, dataPosterBacklog, &latestHeader) + _, _, _, _ = newGasFeeCap, newTipCap, newBlobFeeCap, err + /* + // I think we expect an increase by *2 due to rbf rules for blob txs, + // currently appears to be broken since the increase exceeds the + // current cost (based on current basefees and tip) * config.MaxFeeBidMultipleBips + // since the previous attempt to send the tx was already using the current cost scaled by + // the multiple (* 10 bips). + expectedGasFeeCap = expectedGasFeeCap.Mul(expectedGasFeeCap, big.NewInt(2)) + expectedBlobFeeCap = expectedBlobFeeCap.Mul(expectedBlobFeeCap, big.NewInt(2)) + expectedTipCap = expectedTipCap.Mul(expectedTipCap, big.NewInt(2)) + + t.Log("newGasFeeCap", newGasFeeCap, "newTipCap", newTipCap, "newBlobFeeCap", newBlobFeeCap, "err", err) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + */ + +} + +func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) { + conf := func() *DataPosterConfig { + // Set only the fields that are used by feeAndTipCaps + // Start with defaults, maybe change for test. + return &DataPosterConfig{ + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, + MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, + MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 10, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + AllocateMempoolBalance: true, + + UrgencyGwei: 2., + ElapsedTimeBase: 10 * time.Minute, + ElapsedTimeImportance: 10, + TargetPriceGwei: 60., + } + } + expression, err := govaluate.NewEvaluableExpression(DefaultDataPosterConfig.MaxFeeCapFormula) + if err != nil { + t.Fatalf("error creating govaluate evaluable expression: %v", err) + } + + p := DataPoster{ + config: conf, + extraBacklog: func() uint64 { return 0 }, + balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), + usingNoOpStorage: false, + client: &stubL1Client{ + senderNonce: 1, + suggestedGasTipCap: big.NewInt(2 * params.GWei), + }, + auth: &bind.TransactOpts{ + From: common.Address{}, + }, + maxFeeCapExpression: expression, + } + + ctx := context.Background() + var nonce uint64 = 1 + var gasLimit uint64 = 300_000 // reasonable upper bound for mainnet blob batches + var numBlobs uint64 = 6 + var lastTx *types.Transaction // PostTransaction leaves this nil, used when replacing + dataCreatedAt := time.Now() + var dataPosterBacklog uint64 = 0 // Zero backlog for PostTransaction + var blobGasUsed uint64 = 0xc0000 // 6 blobs of gas + var excessBlobGas uint64 = 0 // typical current mainnet conditions + latestHeader := types.Header{ + Number: big.NewInt(1), + BaseFee: big.NewInt(1_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, dataCreatedAt, dataPosterBacklog, &latestHeader) + if err != nil { + t.Fatalf("%s", err) + } + + // There is no backlog and almost no time elapses since the batch data was + // created to when it was posted so the maxNormalizedFeeCap is ~60.01 gwei. + // This is multiplied with the normalizedGas to get targetMaxCost. + // This is greatly in excess of currentTotalCost * MaxFeeBidMultipleBips, + // so targetMaxCost is reduced to the current base fee + suggested tip cap + + // current blob fee multipled by MaxFeeBidMultipleBips (factor of 10). + // The blob and non blob factors are then proportionally split out and so + // the newGasFeeCap is set to (current base fee + suggested tip cap) * 10 + // and newBlobFeeCap is set to current blob gas base fee (1 wei + // since there is no excess blob gas) * 10. + expectedGasFeeCap := big.NewInt(30 * params.GWei) + expectedBlobFeeCap := big.NewInt(10) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + + // 2 gwei is the amount suggested by the L1 client, so that is the value + // returned because it doesn't exceed the configured bounds, there is no + // lastTx to scale against with rbf, and it is not bigger than the computed + // gasFeeCap. + expectedTipCap := big.NewInt(2 * params.GWei) + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + + lastBlobTx := &types.BlobTx{} + err = updateTxDataGasCaps(lastBlobTx, newGasFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + t.Fatal(err) + } + lastTx = types.NewTx(lastBlobTx) + // Make creation time go backwards so elapsed time increases + retconnedCreationTime := dataCreatedAt.Add(-time.Minute) + // Base fee has decreased but blob fee has increased + blobGasUsed = 0xc0000 // 6 blobs of gas + excessBlobGas = 8295804 // this should set blob fee to 12 wei + latestHeader = types.Header{ + Number: big.NewInt(2), + BaseFee: big.NewInt(100_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err = p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, retconnedCreationTime, dataPosterBacklog, &latestHeader) + + t.Log("newGasFeeCap", newGasFeeCap, "newTipCap", newTipCap, "newBlobFeeCap", newBlobFeeCap, "err", err) + if arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected NOT: %d", expectedGasFeeCap, newGasFeeCap) + } + if arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected NOT: %d", expectedBlobFeeCap, newBlobFeeCap) + } + if arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected NOT: %d", expectedTipCap, newTipCap) + } + +} diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 72881b52fd..a1f1a1a930 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -10,7 +10,6 @@ import ( "math" "math/big" "strings" - "sync" "sync/atomic" "time" @@ -99,10 +98,6 @@ type InboxReader struct { // Atomic lastSeenBatchCount uint64 - - // Behind the mutex - lastReadMutex sync.RWMutex - lastReadBlock uint64 lastReadBatchCount uint64 } @@ -396,10 +391,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { // There's nothing to do from = arbmath.BigAddByUint(currentHeight, 1) blocksToFetch = config.DefaultBlocksToRead - r.lastReadMutex.Lock() - r.lastReadBlock = currentHeight.Uint64() - r.lastReadBatchCount = checkingBatchCount - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) storeSeenBatchCount() if !r.caughtUp && readMode == "latest" { r.caughtUp = true @@ -531,10 +523,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if len(sequencerBatches) > 0 { readAnyBatches = true - r.lastReadMutex.Lock() - r.lastReadBlock = to.Uint64() - r.lastReadBatchCount = sequencerBatches[len(sequencerBatches)-1].SequenceNumber + 1 - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, sequencerBatches[len(sequencerBatches)-1].SequenceNumber+1) storeSeenBatchCount() } } @@ -561,10 +550,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if !readAnyBatches { - r.lastReadMutex.Lock() - r.lastReadBlock = currentHeight.Uint64() - r.lastReadBatchCount = checkingBatchCount - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) storeSeenBatchCount() } } @@ -635,10 +621,8 @@ func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint6 return nil, common.Hash{}, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) } -func (r *InboxReader) GetLastReadBlockAndBatchCount() (uint64, uint64) { - r.lastReadMutex.RLock() - defer r.lastReadMutex.RUnlock() - return r.lastReadBlock, r.lastReadBatchCount +func (r *InboxReader) GetLastReadBatchCount() uint64 { + return atomic.LoadUint64(&r.lastReadBatchCount) } // GetLastSeenBatchCount returns how many sequencer batches the inbox reader has read in from L1. diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index f98f93a3eb..b758e95e62 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -204,6 +204,11 @@ func (t *InboxTracker) GetBatchMessageCount(seqNum uint64) (arbutil.MessageIndex return metadata.MessageCount, err } +func (t *InboxTracker) GetBatchParentChainBlock(seqNum uint64) (uint64, error) { + metadata, err := t.GetBatchMetadata(seqNum) + return metadata.ParentChainBlock, err +} + // GetBatchAcc is a convenience function wrapping GetBatchMetadata func (t *InboxTracker) GetBatchAcc(seqNum uint64) (common.Hash, error) { metadata, err := t.GetBatchMetadata(seqNum) @@ -223,6 +228,54 @@ func (t *InboxTracker) GetBatchCount() (uint64, error) { return count, nil } +// err will return unexpected/internal errors +// bool will be false if batch not found (meaning, block not yet posted on a batch) +func (t *InboxTracker) FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, bool, error) { + batchCount, err := t.GetBatchCount() + if err != nil { + return 0, false, err + } + low := uint64(0) + high := batchCount - 1 + lastBatchMessageCount, err := t.GetBatchMessageCount(high) + if err != nil { + return 0, false, err + } + if lastBatchMessageCount <= pos { + return 0, false, nil + } + // Iteration preconditions: + // - high >= low + // - msgCount(low - 1) <= pos implies low <= target + // - msgCount(high) > pos implies high >= target + // Therefore, if low == high, then low == high == target + for { + // Due to integer rounding, mid >= low && mid < high + mid := (low + high) / 2 + count, err := t.GetBatchMessageCount(mid) + if err != nil { + return 0, false, err + } + if count < pos { + // Must narrow as mid >= low, therefore mid + 1 > low, therefore newLow > oldLow + // Keeps low precondition as msgCount(mid) < pos + low = mid + 1 + } else if count == pos { + return mid + 1, true, nil + } else if count == pos+1 || mid == low { // implied: count > pos + return mid, true, nil + } else { + // implied: count > pos + 1 + // Must narrow as mid < high, therefore newHigh < oldHigh + // Keeps high precondition as msgCount(mid) > pos + high = mid + } + if high == low { + return high, true, nil + } + } +} + func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcaster) error { batchCount, err := t.GetBatchCount() if err != nil { diff --git a/arbnode/node.go b/arbnode/node.go index b7045b6e80..43af8a44a9 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -30,6 +30,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" @@ -205,6 +206,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.BatchPoster.Enable = false config.SeqCoordinator.Enable = false config.BlockValidator = staker.TestBlockValidatorConfig + config.SyncMonitor = TestSyncMonitorConfig config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} @@ -222,6 +224,7 @@ func ConfigDefaultL2Test() *Config { config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true config.Staker = staker.TestL1ValidatorConfig + config.SyncMonitor = TestSyncMonitorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig @@ -274,7 +277,6 @@ type Node struct { SeqCoordinator *SeqCoordinator MaintenanceRunner *MaintenanceRunner DASLifecycleManager *das.LifecycleManager - ClassicOutboxRetriever *ClassicOutboxRetriever SyncMonitor *SyncMonitor configFetcher ConfigFetcher ctx context.Context @@ -390,17 +392,10 @@ func createNodeImpl( l2ChainId := l2Config.ChainID.Uint64() - syncMonitor := NewSyncMonitor(&config.SyncMonitor) - var classicOutbox *ClassicOutboxRetriever - classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) - if err != nil { - if l2Config.ArbitrumChainParams.GenesisBlockNum > 0 { - log.Warn("Classic Msg Database not found", "err", err) - } - classicOutbox = nil - } else { - classicOutbox = NewClassicOutboxRetriever(classicMsgDb) + syncConfigFetcher := func() *SyncMonitorConfig { + return &configFetcher.Get().SyncMonitor } + syncMonitor := NewSyncMonitor(syncConfigFetcher) var l1Reader *headerreader.HeaderReader if config.ParentChainReader.Enable { @@ -497,7 +492,6 @@ func createNodeImpl( SeqCoordinator: coordinator, MaintenanceRunner: maintenanceRunner, DASLifecycleManager: nil, - ClassicOutboxRetriever: classicOutbox, SyncMonitor: syncMonitor, configFetcher: configFetcher, ctx: ctx, @@ -802,7 +796,6 @@ func createNodeImpl( SeqCoordinator: coordinator, MaintenanceRunner: maintenanceRunner, DASLifecycleManager: dasLifecycleManager, - ClassicOutboxRetriever: classicOutbox, SyncMonitor: syncMonitor, configFetcher: configFetcher, ctx: ctx, @@ -865,16 +858,19 @@ func (n *Node) Start(ctx context.Context) error { execClient = nil } if execClient != nil { - err := execClient.Initialize(ctx, n, n.SyncMonitor) + err := execClient.Initialize(ctx) if err != nil { return fmt.Errorf("error initializing exec client: %w", err) } } - n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator, n.Execution) + n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator) err := n.Stack.Start() if err != nil { return fmt.Errorf("error starting geth stack: %w", err) } + if execClient != nil { + execClient.SetConsensusClient(n) + } err = n.Execution.Start(ctx) if err != nil { return fmt.Errorf("error starting exec client: %w", err) @@ -987,6 +983,7 @@ func (n *Node) Start(ctx context.Context) error { if n.configFetcher != nil { n.configFetcher.Start(ctx) } + n.SyncMonitor.Start(ctx) return nil } @@ -1040,6 +1037,7 @@ func (n *Node) StopAndWait() { // Just stops the redis client (most other stuff was stopped earlier) n.SeqCoordinator.StopAndWait() } + n.SyncMonitor.StopAndWait() if n.DASLifecycleManager != nil { n.DASLifecycleManager.StopAndWaitUntil(2 * time.Second) } @@ -1050,3 +1048,51 @@ func (n *Node) StopAndWait() { log.Error("error on stack close", "err", err) } } + +func (n *Node) FetchBatch(ctx context.Context, batchNum uint64) ([]byte, common.Hash, error) { + return n.InboxReader.GetSequencerMessageBytes(ctx, batchNum) +} + +func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) (uint64, bool, error) { + return n.InboxTracker.FindInboxBatchContainingMessage(message) +} + +func (n *Node) GetBatchParentChainBlock(seqNum uint64) (uint64, error) { + return n.InboxTracker.GetBatchParentChainBlock(seqNum) +} + +func (n *Node) FullSyncProgressMap() map[string]interface{} { + return n.SyncMonitor.FullSyncProgressMap() +} + +func (n *Node) Synced() bool { + return n.SyncMonitor.Synced() +} + +func (n *Node) SyncTargetMessageCount() arbutil.MessageIndex { + return n.SyncMonitor.SyncTargetMessageCount() +} + +// TODO: switch from pulling to pushing safe/finalized +func (n *Node) GetSafeMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + return n.InboxReader.GetSafeMsgCount(ctx) +} + +func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + return n.InboxReader.GetFinalizedMsgCount(ctx) +} + +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) +} + +func (n *Node) ExpectChosenSequencer() error { + return n.TxStreamer.ExpectChosenSequencer() +} + +func (n *Node) ValidatedMessageCount() (arbutil.MessageIndex, error) { + if n.BlockValidator == nil { + return 0, errors.New("validator not set up") + } + return n.BlockValidator.GetValidated(), nil +} diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 99a66abde2..d3b9a7e1c6 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -2,120 +2,146 @@ package arbnode import ( "context" - "errors" - "sync/atomic" + "sync" + "time" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) type SyncMonitor struct { - config *SyncMonitorConfig + stopwaiter.StopWaiter + config func() *SyncMonitorConfig inboxReader *InboxReader txStreamer *TransactionStreamer coordinator *SeqCoordinator - exec execution.FullExecutionClient initialized bool + + syncTargetLock sync.Mutex + nextSyncTarget arbutil.MessageIndex + syncTarget arbutil.MessageIndex } -func NewSyncMonitor(config *SyncMonitorConfig) *SyncMonitor { +func NewSyncMonitor(config func() *SyncMonitorConfig) *SyncMonitor { return &SyncMonitor{ config: config, } } type SyncMonitorConfig struct { - BlockBuildLag uint64 `koanf:"block-build-lag"` - BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` - CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` - SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` - FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` + MsgLag time.Duration `koanf:"msg-lag"` } var DefaultSyncMonitorConfig = SyncMonitorConfig{ - BlockBuildLag: 20, - BlockBuildSequencerInboxLag: 0, - CoordinatorMsgLag: 15, - SafeBlockWaitForBlockValidator: false, - FinalizedBlockWaitForBlockValidator: false, + MsgLag: time.Second, +} + +var TestSyncMonitorConfig = SyncMonitorConfig{ + MsgLag: time.Millisecond * 10, } func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".block-build-lag", DefaultSyncMonitorConfig.BlockBuildLag, "allowed lag between messages read and blocks built") - f.Uint64(prefix+".block-build-sequencer-inbox-lag", DefaultSyncMonitorConfig.BlockBuildSequencerInboxLag, "allowed lag between messages read from sequencer inbox and blocks built") - f.Uint64(prefix+".coordinator-msg-lag", DefaultSyncMonitorConfig.CoordinatorMsgLag, "allowed lag between local and remote messages") - f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") - f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") + f.Duration(prefix+".msg-lag", DefaultSyncMonitorConfig.MsgLag, "allowed msg lag while still considered in sync") } -func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator, exec execution.FullExecutionClient) { +func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator) { s.inboxReader = inboxReader s.txStreamer = txStreamer s.coordinator = coordinator - s.exec = exec s.initialized = true } -func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { - syncing := false - res := make(map[string]interface{}) +func (s *SyncMonitor) updateSyncTarget(ctx context.Context) time.Duration { + nextSyncTarget, err := s.maxMessageCount() + if err != nil { + log.Warn("failed readin max msg count", "err", err) + return s.config().MsgLag + } + s.syncTargetLock.Lock() + defer s.syncTargetLock.Unlock() + s.syncTarget = s.nextSyncTarget + s.nextSyncTarget = nextSyncTarget + return s.config().MsgLag +} - if !s.initialized { - res["err"] = "uninitialized" - return res +func (s *SyncMonitor) SyncTargetMessageCount() arbutil.MessageIndex { + s.syncTargetLock.Lock() + defer s.syncTargetLock.Unlock() + return s.syncTarget +} + +func (s *SyncMonitor) maxMessageCount() (arbutil.MessageIndex, error) { + msgCount, err := s.txStreamer.GetMessageCount() + if err != nil { + return 0, err } - broadcasterQueuedMessagesPos := atomic.LoadUint64(&(s.txStreamer.broadcasterQueuedMessagesPos)) + pending := s.txStreamer.FeedPendingMessageCount() + if pending > msgCount { + msgCount = pending + } - if broadcasterQueuedMessagesPos != 0 { // unprocessed feed - syncing = true + if s.inboxReader != nil { + batchProcessed := s.inboxReader.GetLastReadBatchCount() + + if batchProcessed > 0 { + batchMsgCount, err := s.inboxReader.Tracker().GetBatchMessageCount(batchProcessed - 1) + if err != nil { + return msgCount, err + } + if batchMsgCount > msgCount { + msgCount = batchMsgCount + } + } } - res["broadcasterQueuedMessagesPos"] = broadcasterQueuedMessagesPos - builtMessageCount, err := s.exec.HeadMessageNumber() - if err != nil { - res["builtMessageCountError"] = err.Error() - syncing = true - builtMessageCount = 0 - } else { - blockNum := s.exec.MessageIndexToBlockNumber(builtMessageCount) - res["blockNum"] = blockNum - builtMessageCount++ - res["messageOfLastBlock"] = builtMessageCount + if s.coordinator != nil { + coordinatorMessageCount, err := s.coordinator.GetRemoteMsgCount() //NOTE: this creates a remote call + if err != nil { + return msgCount, err + } + if coordinatorMessageCount > msgCount { + msgCount = coordinatorMessageCount + } } + return msgCount, nil +} + +func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} { + res := make(map[string]interface{}) + + if !s.initialized { + res["err"] = "uninitialized" + return res + } + + syncTarget := s.SyncTargetMessageCount() + res["syncTargetMsgCount"] = syncTarget + msgCount, err := s.txStreamer.GetMessageCount() if err != nil { res["msgCountError"] = err.Error() - syncing = true - } else { - res["msgCount"] = msgCount - if builtMessageCount+arbutil.MessageIndex(s.config.BlockBuildLag) < msgCount { - syncing = true - } + return res } + res["msgCount"] = msgCount + + res["feedPendingMessageCount"] = s.txStreamer.FeedPendingMessageCount() if s.inboxReader != nil { batchSeen := s.inboxReader.GetLastSeenBatchCount() - _, batchProcessed := s.inboxReader.GetLastReadBlockAndBatchCount() - - if (batchSeen == 0) || // error or not yet read inbox - (batchProcessed < batchSeen) { // unprocessed inbox messages - syncing = true - } res["batchSeen"] = batchSeen + + batchProcessed := s.inboxReader.GetLastReadBatchCount() res["batchProcessed"] = batchProcessed - processedMetadata, err := s.inboxReader.Tracker().GetBatchMetadata(batchProcessed - 1) + processedBatchMsgs, err := s.inboxReader.Tracker().GetBatchMessageCount(batchProcessed - 1) if err != nil { res["batchMetadataError"] = err.Error() - syncing = true } else { - res["messageOfProcessedBatch"] = processedMetadata.MessageCount - if builtMessageCount+arbutil.MessageIndex(s.config.BlockBuildSequencerInboxLag) < processedMetadata.MessageCount { - syncing = true - } + res["messageOfProcessedBatch"] = processedBatchMsgs } l1reader := s.inboxReader.l1Reader @@ -135,73 +161,55 @@ func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { coordinatorMessageCount, err := s.coordinator.GetRemoteMsgCount() //NOTE: this creates a remote call if err != nil { res["coordinatorMsgCountError"] = err.Error() - syncing = true } else { res["coordinatorMessageCount"] = coordinatorMessageCount - if msgCount+arbutil.MessageIndex(s.config.CoordinatorMsgLag) < coordinatorMessageCount { - syncing = true - } } } - if !syncing { - return make(map[string]interface{}) - } - return res } -func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { - if s.inboxReader == nil || !s.initialized { - return 0, errors.New("not set up for safeblock") - } - msg, err := s.inboxReader.GetSafeMsgCount(ctx) - if err != nil { - return 0, err - } - // If SafeBlockWaitForBlockValidator is true, we want to wait for the block validator to finish - if s.config.SafeBlockWaitForBlockValidator { - latestValidatedCount, err := s.getLatestValidatedCount() - if err != nil { - return 0, err - } - if msg > latestValidatedCount { - msg = latestValidatedCount - } +func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { + if s.Synced() { + return make(map[string]interface{}) } - block := s.exec.MessageIndexToBlockNumber(msg - 1) - return block, nil + + return s.FullSyncProgressMap() } -func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { - if s.txStreamer.validator == nil { - return 0, errors.New("validator not set up") - } - return s.txStreamer.validator.GetValidated(), nil +func (s *SyncMonitor) Start(ctx_in context.Context) { + s.StopWaiter.Start(ctx_in, s) + s.CallIteratively(s.updateSyncTarget) } -func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { - if s.inboxReader == nil || !s.initialized { - return 0, errors.New("not set up for safeblock") +func (s *SyncMonitor) Synced() bool { + if !s.initialized { + return false } - msg, err := s.inboxReader.GetFinalizedMsgCount(ctx) + if !s.Started() { + return false + } + syncTarget := s.SyncTargetMessageCount() + + msgCount, err := s.txStreamer.GetMessageCount() if err != nil { - return 0, err + return false } - // If FinalizedBlockWaitForBlockValidator is true, we want to wait for the block validator to finish - if s.config.FinalizedBlockWaitForBlockValidator { - latestValidatedCount, err := s.getLatestValidatedCount() - if err != nil { - return 0, err + + if syncTarget > msgCount { + return false + } + + if s.inboxReader != nil { + batchSeen := s.inboxReader.GetLastSeenBatchCount() + if batchSeen == 0 { + return false } - if msg > latestValidatedCount { - msg = latestValidatedCount + batchProcessed := s.inboxReader.GetLastReadBatchCount() + + if batchProcessed < batchSeen { + return false } } - block := s.exec.MessageIndexToBlockNumber(msg - 1) - return block, nil -} - -func (s *SyncMonitor) Synced() bool { - return len(s.SyncProgressMap()) == 0 + return true } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 7e9cf1dbad..fa161db6c5 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -113,7 +113,6 @@ func NewTransactionStreamer( fatalErrChan: fatalErrChan, config: config, } - streamer.exec.SetTransactionStreamer(streamer) err := streamer.cleanupInconsistentState() if err != nil { return nil, err @@ -427,6 +426,21 @@ func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreC return s.AddMessagesAndEndBatch(pos, messagesAreConfirmed, messages, nil) } +func (s *TransactionStreamer) FeedPendingMessageCount() arbutil.MessageIndex { + pos := atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + if pos == 0 { + return 0 + } + + s.insertionMutex.Lock() + defer s.insertionMutex.Unlock() + pos = atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + if pos == 0 { + return 0 + } + return arbutil.MessageIndex(pos + uint64(len(s.broadcasterQueuedMessages))) +} + func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { if len(feedMessages) == 0 { return nil @@ -820,10 +834,6 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return nil } -func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, common.Hash, error) { - return s.inboxReader.GetSequencerMessageBytes(context.TODO(), batchNum) -} - // The caller must hold the insertionMutex func (s *TransactionStreamer) ExpectChosenSequencer() error { if s.coordinator != nil { @@ -865,10 +875,6 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex return nil } -func (s *TransactionStreamer) GenesisBlockNumber() uint64 { - return s.chainConfig.ArbitrumChainParams.GenesisBlockNum -} - // PauseReorgs until a matching call to ResumeReorgs (may be called concurrently) func (s *TransactionStreamer) PauseReorgs() { s.reorgMutex.RLock() diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index 180ce1c67e..cfe24cf636 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -96,3 +96,25 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio } return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } + +func DetailTxErrorUsingCallMsg(ctx context.Context, client L1Interface, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error { + // Re-execute the transaction as a call to get a better error + if ctx.Err() != nil { + return ctx.Err() + } + if txRes == nil { + return errors.New("expected receipt") + } + if txRes.Status == types.ReceiptStatusSuccessful { + return nil + } + var err error + if _, err = client.CallContract(ctx, callMsg, txRes.BlockNumber); err == nil { + return fmt.Errorf("tx failed but call succeeded for tx hash %v", txHash) + } + callMsg.Gas = 0 + if _, err = client.CallContract(ctx, callMsg, txRes.BlockNumber); err == nil { + return fmt.Errorf("%w for tx hash %v", vm.ErrOutOfGas, txHash) + } + return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, txHash) +} diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 42bd1428dc..bb6de00cad 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -68,7 +68,7 @@ type BlocksReExecutor struct { stopwaiter.StopWaiter config *Config blockchain *core.BlockChain - stateFor func(header *types.Header) (*state.StateDB, error) + stateFor arbitrum.StateForHeaderFunction done chan struct{} fatalErrChan chan error startBlock uint64 @@ -110,7 +110,10 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block startBlock: start, done: make(chan struct{}, c.Room), fatalErrChan: fatalErrChan, - stateFor: func(header *types.Header) (*state.StateDB, error) { return blockchain.StateAt(header.Root) }, + stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { + state, err := blockchain.StateAt(header.Root) + return state, arbitrum.NoopStateRelease, err + }, } } @@ -120,7 +123,9 @@ func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentB if start < s.startBlock { start = s.startBlock } - startState, startHeader, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + // we don't use state release pattern here + // TODO do we want to use release pattern here? + startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) return s.startBlock diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ed3088ca2e..242b8f9eeb 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -7,6 +7,7 @@ import ( "context" "errors" "net" + "runtime/debug" "github.com/gobwas/ws" @@ -60,7 +61,7 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { - log.Error("recovered error in BroadcastSingle", "recover", r) + log.Error("recovered error in BroadcastSingle", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastSingle") } }() @@ -84,7 +85,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { - log.Error("recovered error in BroadcastMessages", "recover", r) + log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastMessages") } }() diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index fea95cbb15..3671c7ea8d 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -20,7 +20,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/validator/valnode" ) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index ebc57b13b8..72c767d00f 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -35,7 +35,6 @@ import ( "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/cmd/pruning" "github.com/offchainlabs/nitro/cmd/staterecovery" - "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/arbmath" @@ -284,14 +283,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := config.Chain.InfoFiles - if config.Chain.InfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) - if err != nil { - log.Error("error getting l2 chain info file from ipfs", "err", err) - } - combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) - } + combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, config.Chain.InfoFiles, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 1e1e79bbb3..9a24a5cfb8 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -51,7 +51,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/execution/gethexec" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" @@ -306,14 +306,7 @@ func mainImpl() int { } } - combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles - if nodeConfig.Chain.InfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) - if err != nil { - log.Error("error getting chain info file from ipfs", "err", err) - } - combinedL2ChainInfoFile = append(combinedL2ChainInfoFile, l2ChainInfoIpfsFile) - } + combinedL2ChainInfoFile := aggregateL2ChainInfoFiles(ctx, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) if nodeConfig.Node.Staker.Enable { if !nodeConfig.Node.ParentChainReader.Enable { @@ -518,9 +511,19 @@ func mainImpl() int { return 0 } - if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee && !nodeConfig.Node.DataAvailability.Enable { + chainInfo, err := chaininfo.ProcessChainInfo(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) + if err != nil { + log.Error("error processing l2 chain info", "err", err) + return 1 + } + if err := validateBlockChain(l2BlockChain, chainInfo.ChainConfig); err != nil { + log.Error("user provided chain config is not compatible with onchain chain config", "err", err) + return 1 + } + + if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee != nodeConfig.Node.DataAvailability.Enable { flag.Usage() - log.Error("a data availability service must be configured for this chain (see the --node.data-availability family of options)") + log.Error(fmt.Sprintf("data availability service usage for this chain is set to %v but --node.data-availability.enable is set to %v", l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee, nodeConfig.Node.DataAvailability.Enable)) return 1 } @@ -916,15 +919,19 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa return &nodeConfig, &l1Wallet, &l2DevWallet, nil } -func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { - combinedL2ChainInfoFiles := l2ChainInfoFiles +func aggregateL2ChainInfoFiles(ctx context.Context, l2ChainInfoFiles []string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) []string { if l2ChainInfoIpfsUrl != "" { l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) if err != nil { log.Error("error getting l2 chain info file from ipfs", "err", err) } - combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) + l2ChainInfoFiles = append(l2ChainInfoFiles, l2ChainInfoIpfsFile) } + return l2ChainInfoFiles +} + +func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { + combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, l2ChainInfoFiles, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, combinedL2ChainInfoFiles, l2ChainInfoJson) if err != nil { return err diff --git a/contracts b/contracts index 7c46876077..1cab72ff3d 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7c46876077c6353c7ebdf9cd364710d357fa3914 +Subproject commit 1cab72ff3dfcfe06ceed371a9db7a54a527e3bfb diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go deleted file mode 100644 index f8421bed1d..0000000000 --- a/das/bigcache_storage_service.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "fmt" - "time" - - "github.com/allegro/bigcache" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -type BigCacheConfig struct { - // TODO add other config information like HardMaxCacheSize - Enable bool `koanf:"enable"` - Expiration time.Duration `koanf:"expiration"` - MaxEntriesInWindow int -} - -var DefaultBigCacheConfig = BigCacheConfig{ - Expiration: time.Hour, -} - -var TestBigCacheConfig = BigCacheConfig{ - Enable: true, - Expiration: time.Hour, - MaxEntriesInWindow: 1000, -} - -func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Duration(prefix+".expiration", DefaultBigCacheConfig.Expiration, "Expiration time for in-memory cached sequencer batches") -} - -type BigCacheStorageService struct { - baseStorageService StorageService - bigCacheConfig BigCacheConfig - bigCache *bigcache.BigCache -} - -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) (StorageService, error) { - conf := bigcache.DefaultConfig(bigCacheConfig.Expiration) - if bigCacheConfig.MaxEntriesInWindow > 0 { - conf.MaxEntriesInWindow = bigCacheConfig.MaxEntriesInWindow - } - bigCache, err := bigcache.NewBigCache(conf) - if err != nil { - return nil, err - } - return &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: bigCacheConfig, - bigCache: bigCache, - }, nil -} - -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - - ret, err := bcs.bigCache.Get(string(key.Bytes())) - if err != nil { - ret, err = bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - err = bcs.bigCache.Set(string(key.Bytes()), ret) - if err != nil { - return nil, err - } - return ret, err - } - - return ret, err -} - -func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - logPut("das.BigCacheStorageService.Put", value, timeout, bcs) - err := bcs.baseStorageService.Put(ctx, value, timeout) - if err != nil { - return err - } - return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) -} - -func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { - return bcs.baseStorageService.Sync(ctx) -} - -func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - err := bcs.bigCache.Close() - if err != nil { - return err - } - return bcs.baseStorageService.Close(ctx) -} - -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return bcs.baseStorageService.ExpirationPolicy(ctx) -} - -func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(%+v)", bcs.bigCacheConfig) -} - -func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { - return bcs.baseStorageService.HealthCheck(ctx) -} diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go new file mode 100644 index 0000000000..13bdb189d3 --- /dev/null +++ b/das/cache_storage_service.go @@ -0,0 +1,95 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "fmt" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/pretty" + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/log" +) + +type CacheConfig struct { + Enable bool `koanf:"enable"` + Capacity int `koanf:"capacity"` +} + +var DefaultCacheConfig = CacheConfig{ + Capacity: 20_000, +} + +var TestCacheConfig = CacheConfig{ + Capacity: 1_000, +} + +func CacheConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") + f.Int(prefix+".capacity", DefaultCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") +} + +type CacheStorageService struct { + baseStorageService StorageService + cache *lru.Cache[common.Hash, []byte] +} + +func NewCacheStorageService(cacheConfig CacheConfig, baseStorageService StorageService) *CacheStorageService { + return &CacheStorageService{ + baseStorageService: baseStorageService, + cache: lru.NewCache[common.Hash, []byte](cacheConfig.Capacity), + } +} + +func (c *CacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", c) + + if val, wasCached := c.cache.Get(key); wasCached { + return val, nil + } + + val, err := c.baseStorageService.GetByHash(ctx, key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + +func (c *CacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { + logPut("das.CacheStorageService.Put", value, timeout, c) + err := c.baseStorageService.Put(ctx, value, timeout) + if err != nil { + return err + } + c.cache.Add(common.Hash(dastree.Hash(value)), value) + return nil +} + +func (c *CacheStorageService) Sync(ctx context.Context) error { + return c.baseStorageService.Sync(ctx) +} + +func (c *CacheStorageService) Close(ctx context.Context) error { + return c.baseStorageService.Close(ctx) +} + +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { + return c.baseStorageService.ExpirationPolicy(ctx) +} + +func (c *CacheStorageService) String() string { + return fmt.Sprintf("CacheStorageService(size:%+v)", len(c.cache.Keys())) +} + +func (c *CacheStorageService) HealthCheck(ctx context.Context) error { + return c.baseStorageService.HealthCheck(ctx) +} diff --git a/das/bigcache_storage_service_test.go b/das/cache_storage_service_test.go similarity index 57% rename from das/bigcache_storage_service_test.go rename to das/cache_storage_service_test.go index 5fd0cf68d2..8b4203dab5 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/cache_storage_service_test.go @@ -8,42 +8,32 @@ import ( "context" "errors" "testing" - "time" - "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/das/dastree" ) -func TestBigCacheStorageService(t *testing.T) { +func TestCacheStorageService(t *testing.T) { ctx := context.Background() - timeout := uint64(time.Now().Add(time.Hour).Unix()) baseStorageService := NewMemoryBackedStorageService(ctx) - bigCache, err := bigcache.NewBigCache(bigcache.DefaultConfig(TestBigCacheConfig.Expiration)) - Require(t, err) - bigCacheService := &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, - } - Require(t, err) + cacheService := NewCacheStorageService(TestCacheConfig, baseStorageService) val1 := []byte("The first value") val1CorrectKey := dastree.Hash(val1) val1IncorrectKey := dastree.Hash(append(val1, 0)) - _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) + _, err := cacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - err = bigCacheService.Put(ctx, val1, timeout) + err = cacheService.Put(ctx, val1, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val1IncorrectKey) + _, err = cacheService.GetByHash(ctx, val1IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err := bigCacheService.GetByHash(ctx, val1CorrectKey) + val, err := cacheService.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) @@ -54,14 +44,14 @@ func TestBigCacheStorageService(t *testing.T) { val2CorrectKey := dastree.Hash(val2) val2IncorrectKey := dastree.Hash(append(val2, 0)) - err = baseStorageService.Put(ctx, val2, timeout) + err = baseStorageService.Put(ctx, val2, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val2IncorrectKey) + _, err = cacheService.GetByHash(ctx, val2IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err = bigCacheService.GetByHash(ctx, val2CorrectKey) + val, err = cacheService.GetByHash(ctx, val2CorrectKey) Require(t, err) if !bytes.Equal(val, val2) { t.Fatal(val, val2) @@ -69,19 +59,18 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the cache storage but not present in the base. emptyBaseStorageService := NewMemoryBackedStorageService(ctx) - bigCacheServiceWithEmptyBaseStorage := &BigCacheStorageService{ + cacheServiceWithEmptyBaseStorage := &CacheStorageService{ baseStorageService: emptyBaseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, + cache: cacheService.cache, } - val, err = bigCacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) + val, err = cacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) } // Closes the base storage properly. - err = bigCacheService.Close(ctx) + err = cacheService.Close(ctx) Require(t, err) _, err = baseStorageService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrClosed) { diff --git a/das/das.go b/das/das.go index 910e511083..dd8e43a34d 100644 --- a/das/das.go +++ b/das/das.go @@ -40,8 +40,8 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCache BigCacheConfig `koanf:"local-cache"` - RedisCache RedisConfig `koanf:"redis-cache"` + LocalCache CacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` @@ -109,7 +109,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.Bool(prefix+".disable-signature-checking", DefaultDataAvailabilityConfig.DisableSignatureChecking, "disables signature checking on Data Availability Store requests (DANGEROUS, FOR TESTING ONLY)") // Cache options - BigCacheConfigAddOptions(prefix+".local-cache", f) + CacheConfigAddOptions(prefix+".local-cache", f) RedisConfigAddOptions(prefix+".redis-cache", f) // Storage options diff --git a/das/das_test.go b/das/das_test.go index 416744535b..4377dc4dce 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -30,6 +30,10 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Fail(t, "unknown storage type") } + dbConfig := DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -39,10 +43,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } @@ -122,6 +123,10 @@ func testDASMissingMessage(t *testing.T, storageType string) { Fail(t, "unknown storage type") } + dbConfig := DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -131,10 +136,7 @@ func testDASMissingMessage(t *testing.T, storageType string) { Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 6a98e3af1d..33d21942b2 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -25,9 +25,32 @@ type LocalDBStorageConfig struct { DiscardAfterTimeout bool `koanf:"discard-after-timeout"` SyncFromStorageService bool `koanf:"sync-from-storage-service"` SyncToStorageService bool `koanf:"sync-to-storage-service"` + + // BadgerDB options + NumMemtables int `koanf:"num-memtables"` + NumLevelZeroTables int `koanf:"num-level-zero-tables"` + NumLevelZeroTablesStall int `koanf:"num-level-zero-tables-stall"` + NumCompactors int `koanf:"num-compactors"` + BaseTableSize int64 `koanf:"base-table-size"` + ValueLogFileSize int64 `koanf:"value-log-file-size"` } -var DefaultLocalDBStorageConfig = LocalDBStorageConfig{} +var badgerDefaultOptions = badger.DefaultOptions("") + +var DefaultLocalDBStorageConfig = LocalDBStorageConfig{ + Enable: false, + DataDir: "", + DiscardAfterTimeout: false, + SyncFromStorageService: false, + SyncToStorageService: false, + + NumMemtables: badgerDefaultOptions.NumMemtables, + NumLevelZeroTables: badgerDefaultOptions.NumLevelZeroTables, + NumLevelZeroTablesStall: badgerDefaultOptions.NumLevelZeroTablesStall, + NumCompactors: badgerDefaultOptions.NumCompactors, + BaseTableSize: badgerDefaultOptions.BaseTableSize, + ValueLogFileSize: badgerDefaultOptions.ValueLogFileSize, +} func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") @@ -35,6 +58,14 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") + + f.Int(prefix+".num-memtables", DefaultLocalDBStorageConfig.NumMemtables, "BadgerDB option: sets the maximum number of tables to keep in memory before stalling") + f.Int(prefix+".num-level-zero-tables", DefaultLocalDBStorageConfig.NumLevelZeroTables, "BadgerDB option: sets the maximum number of Level 0 tables before compaction starts") + f.Int(prefix+".num-level-zero-tables-stall", DefaultLocalDBStorageConfig.NumLevelZeroTablesStall, "BadgerDB option: sets the number of Level 0 tables that once reached causes the DB to stall until compaction succeeds") + f.Int(prefix+".num-compactors", DefaultLocalDBStorageConfig.NumCompactors, "BadgerDB option: Sets the number of compaction workers to run concurrently") + f.Int64(prefix+".base-table-size", DefaultLocalDBStorageConfig.BaseTableSize, "BadgerDB option: sets the maximum size in bytes for LSM table or file in the base level") + f.Int64(prefix+".value-log-file-size", DefaultLocalDBStorageConfig.ValueLogFileSize, "BadgerDB option: sets the maximum size of a single log file") + } type DBStorageService struct { @@ -44,16 +75,23 @@ type DBStorageService struct { stopWaiter stopwaiter.StopWaiterSafe } -func NewDBStorageService(ctx context.Context, dirPath string, discardAfterTimeout bool) (StorageService, error) { - db, err := badger.Open(badger.DefaultOptions(dirPath)) +func NewDBStorageService(ctx context.Context, config *LocalDBStorageConfig) (StorageService, error) { + options := badger.DefaultOptions(config.DataDir). + WithNumMemtables(config.NumMemtables). + WithNumLevelZeroTables(config.NumLevelZeroTables). + WithNumLevelZeroTablesStall(config.NumLevelZeroTablesStall). + WithNumCompactors(config.NumCompactors). + WithBaseTableSize(config.BaseTableSize). + WithValueLogFileSize(config.ValueLogFileSize) + db, err := badger.Open(options) if err != nil { return nil, err } ret := &DBStorageService{ db: db, - discardAfterTimeout: discardAfterTimeout, - dirPath: dirPath, + discardAfterTimeout: config.DiscardAfterTimeout, + dirPath: config.DataDir, } if err := ret.stopWaiter.Start(ctx, ret); err != nil { return nil, err diff --git a/das/factory.go b/das/factory.go index 0e6b292005..a459d1a464 100644 --- a/das/factory.go +++ b/das/factory.go @@ -28,7 +28,7 @@ func CreatePersistentStorageService( storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager if config.LocalDBStorage.Enable { - s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout) + s, err := NewDBStorageService(ctx, &config.LocalDBStorage) if err != nil { return nil, nil, err } @@ -112,7 +112,7 @@ func WrapStorageWithCache( return nil, nil } - // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. + // Enable caches, Redis and (local) Cache. Local is the outermost, so it will be tried first. var err error if config.RedisCache.Enable { storageService, err = NewRedisStorageService(config.RedisCache, storageService) @@ -130,11 +130,8 @@ func WrapStorageWithCache( } } if config.LocalCache.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) + storageService = NewCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) - if err != nil { - return nil, err - } } return storageService, nil } diff --git a/execution/gethexec/arb_interface.go b/execution/gethexec/arb_interface.go index 50d7dfb891..dbf9c24015 100644 --- a/execution/gethexec/arb_interface.go +++ b/execution/gethexec/arb_interface.go @@ -21,30 +21,31 @@ type TransactionPublisher interface { } type ArbInterface struct { - exec *ExecutionEngine + blockchain *core.BlockChain + node *ExecutionNode txPublisher TransactionPublisher - arbNode interface{} } -func NewArbInterface(exec *ExecutionEngine, txPublisher TransactionPublisher) (*ArbInterface, error) { +func NewArbInterface(blockchain *core.BlockChain, txPublisher TransactionPublisher) (*ArbInterface, error) { return &ArbInterface{ - exec: exec, + blockchain: blockchain, txPublisher: txPublisher, }, nil } -func (a *ArbInterface) Initialize(arbnode interface{}) { - a.arbNode = arbnode +func (a *ArbInterface) Initialize(node *ExecutionNode) { + a.node = node } func (a *ArbInterface) PublishTransaction(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { return a.txPublisher.PublishTransaction(ctx, tx, options) } +// might be used before Initialize func (a *ArbInterface) BlockChain() *core.BlockChain { - return a.exec.bc + return a.blockchain } func (a *ArbInterface) ArbNode() interface{} { - return a.arbNode + return a.node } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index 06ee45cffe..8f5072ab4c 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -123,7 +123,7 @@ func (r *BlockRecorder) RecordBlockCreation( var readBatchInfo []validator.BatchInfo if msg != nil { batchFetcher := func(batchNum uint64) ([]byte, error) { - data, blockHash, err := r.execEngine.streamer.FetchBatch(batchNum) + data, blockHash, err := r.execEngine.consensus.FetchBatch(ctx, batchNum) if err != nil { return nil, err } diff --git a/arbnode/classicMessage.go b/execution/gethexec/classicMessage.go similarity index 99% rename from arbnode/classicMessage.go rename to execution/gethexec/classicMessage.go index f03ef5bd45..df749b98b4 100644 --- a/arbnode/classicMessage.go +++ b/execution/gethexec/classicMessage.go @@ -1,7 +1,7 @@ // Copyright 2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbnode +package gethexec import ( "encoding/binary" diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 03dfd1825d..9ce3084dbc 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -21,16 +22,24 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/sharedmetrics" "github.com/offchainlabs/nitro/util/stopwaiter" ) +var ( + baseFeeGauge = metrics.NewRegisteredGauge("arb/block/basefee", nil) + blockGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/gasused", nil, metrics.NewBoundedHistogramSample()) + txCountHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/count", nil, metrics.NewBoundedHistogramSample()) + txGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/gasused", nil, metrics.NewBoundedHistogramSample()) +) + type ExecutionEngine struct { stopwaiter.StopWaiter - bc *core.BlockChain - streamer execution.TransactionStreamer - recorder *BlockRecorder + bc *core.BlockChain + consensus execution.FullConsensusClient + recorder *BlockRecorder resequenceChan chan []*arbostypes.MessageWithMetadata createBlocksMutex sync.Mutex @@ -105,14 +114,18 @@ func (s *ExecutionEngine) EnablePrefetchBlock() { s.prefetchBlock = true } -func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { +func (s *ExecutionEngine) SetConsensus(consensus execution.FullConsensusClient) { if s.Started() { - panic("trying to set transaction streamer after start") + panic("trying to set transaction consensus after start") } - if s.streamer != nil { - panic("trying to set transaction streamer when already set") + if s.consensus != nil { + panic("trying to set transaction consensus when already set") } - s.streamer = streamer + s.consensus = consensus +} + +func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { + return s.consensus } func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { @@ -288,7 +301,7 @@ func (s *ExecutionEngine) sequencerWrapper(sequencerFunc func() (*types.Block, e } // We got SequencerInsertLockTaken // option 1: there was a race, we are no longer main sequencer - chosenErr := s.streamer.ExpectChosenSequencer() + chosenErr := s.consensus.ExpectChosenSequencer() if chosenErr != nil { return nil, chosenErr } @@ -375,7 +388,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } - err = s.streamer.WriteMessageFromSequencer(pos, msgWithMeta) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) if err != nil { return nil, err } @@ -419,7 +432,7 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp DelayedMessagesRead: delayedSeqNum + 1, } - err = s.streamer.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) if err != nil { return nil, err } @@ -485,6 +498,11 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith opts = append(opts, arbos.WithEvilProduction()) opts = append(opts, arbos.WithInterceptDepositSize(s.interceptDepositGweiAmount)) } + batchFetcher := func(num uint64) ([]byte, error) { + data, _, err := s.consensus.FetchBatch(s.GetContext(), num) + return data, err + } + block, receipts, err := arbos.ProduceBlock( msg.Message, msg.DelayedMessagesRead, @@ -496,6 +514,7 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith data, _, err := s.streamer.FetchBatch(batchNum) return data, err }, + batchFetcher, opts..., ) @@ -515,6 +534,15 @@ func (s *ExecutionEngine) appendBlock(block *types.Block, statedb *state.StateDB if status == core.SideStatTy { return errors.New("geth rejected block as non-canonical") } + baseFeeGauge.Update(block.BaseFee().Int64()) + txCountHistogram.Update(int64(len(block.Transactions()) - 1)) + var blockGasused uint64 + for i := 1; i < len(receipts); i++ { + val := arbmath.SaturatingUSub(receipts[i].GasUsed, receipts[i].GasUsedForL1) + txGasUsedHistogram.Update(int64(val)) + blockGasused += val + } + blockGasUsedHistogram.Update(int64(blockGasused)) return nil } diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 49c667946e..62e273592f 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -55,6 +55,7 @@ type Config struct { TxLookupLimit uint64 `koanf:"tx-lookup-limit"` Dangerous DangerousConfig `koanf:"dangerous"` EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` forwardingTarget string } @@ -87,6 +88,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) CachingConfigAddOptions(prefix+".caching", f) + SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") DangerousConfigAddOptions(prefix+".dangerous", f) f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks") @@ -123,8 +125,8 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault config.Sequencer = TestSequencerConfig - config.ForwardingTarget = "null" config.ParentChainReader = headerreader.TestConfig + config.ForwardingTarget = "null" _ = config.Validate() @@ -143,7 +145,9 @@ type ExecutionNode struct { Sequencer *Sequencer // either nil or same as TxPublisher TxPublisher TransactionPublisher ConfigFetcher ConfigFetcher + SyncMonitor *SyncMonitor ParentChainReader *headerreader.HeaderReader + ClassicOutbox *ClassicOutboxRetriever started atomic.Bool } @@ -179,6 +183,8 @@ func CreateExecutionNode( if err != nil { return nil, err } + } else if config.Sequencer.Enable { + log.Warn("sequencer enabled without l1 client") } if config.Sequencer.Enable { @@ -202,7 +208,7 @@ func CreateExecutionNode( txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher().TxPreChecker } txPublisher = NewTxPreChecker(txPublisher, l2BlockChain, txprecheckConfigFetcher) - arbInterface, err := NewArbInterface(execEngine, txPublisher) + arbInterface, err := NewArbInterface(l2BlockChain, txPublisher) if err != nil { return nil, err } @@ -215,6 +221,20 @@ func CreateExecutionNode( return nil, err } + syncMon := NewSyncMonitor(&config.SyncMonitor, execEngine) + + var classicOutbox *ClassicOutboxRetriever + + if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { + classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) + if err != nil { + log.Warn("Classic Msg Database not found", "err", err) + classicOutbox = nil + } else { + classicOutbox = NewClassicOutboxRetriever(classicMsgDb) + } + } + apis := []rpc.API{{ Namespace: "arb", Version: "1.0", @@ -258,13 +278,15 @@ func CreateExecutionNode( Sequencer: sequencer, TxPublisher: txPublisher, ConfigFetcher: configFetcher, + SyncMonitor: syncMon, ParentChainReader: parentChainReader, + ClassicOutbox: classicOutbox, }, nil } -func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, sync arbitrum.SyncProgressBackend) error { - n.ArbInterface.Initialize(arbnode) +func (n *ExecutionNode) Initialize(ctx context.Context) error { + n.ArbInterface.Initialize(n) err := n.Backend.Start() if err != nil { return fmt.Errorf("error starting geth backend: %w", err) @@ -273,7 +295,7 @@ func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, syn if err != nil { return fmt.Errorf("error initializing transaction publisher: %w", err) } - err = n.Backend.APIBackend().SetSyncBackend(sync) + err = n.Backend.APIBackend().SetSyncBackend(n.SyncMonitor) if err != nil { return fmt.Errorf("error setting sync backend: %w", err) } @@ -371,11 +393,13 @@ func (n *ExecutionNode) Pause() { n.Sequencer.Pause() } } + func (n *ExecutionNode) Activate() { if n.Sequencer != nil { n.Sequencer.Activate() } } + func (n *ExecutionNode) ForwardTo(url string) error { if n.Sequencer != nil { return n.Sequencer.ForwardTo(url) @@ -383,9 +407,12 @@ func (n *ExecutionNode) ForwardTo(url string) error { return errors.New("forwardTo not supported - sequencer not active") } } -func (n *ExecutionNode) SetTransactionStreamer(streamer execution.TransactionStreamer) { - n.ExecEngine.SetTransactionStreamer(streamer) + +func (n *ExecutionNode) SetConsensusClient(consensus execution.FullConsensusClient) { + n.ExecEngine.SetConsensus(consensus) + n.SyncMonitor.SetConsensusInfo(consensus) } + func (n *ExecutionNode) MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 { return n.ExecEngine.MessageIndexToBlockNumber(messageNum) } diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 5db38cbb4d..5befe3c374 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -481,7 +481,7 @@ func (s *Sequencer) CheckHealth(ctx context.Context) error { if pauseChan != nil { return nil } - return s.execEngine.streamer.ExpectChosenSequencer() + return s.execEngine.consensus.ExpectChosenSequencer() } func (s *Sequencer) ForwardTarget() string { diff --git a/execution/gethexec/sync_monitor.go b/execution/gethexec/sync_monitor.go new file mode 100644 index 0000000000..84f45083e9 --- /dev/null +++ b/execution/gethexec/sync_monitor.go @@ -0,0 +1,113 @@ +package gethexec + +import ( + "context" + + "github.com/offchainlabs/nitro/execution" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" +) + +type SyncMonitorConfig struct { + SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` + FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` +} + +var DefaultSyncMonitorConfig = SyncMonitorConfig{ + SafeBlockWaitForBlockValidator: false, + FinalizedBlockWaitForBlockValidator: false, +} + +func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") + f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") +} + +type SyncMonitor struct { + config *SyncMonitorConfig + consensus execution.ConsensusInfo + exec *ExecutionEngine +} + +func NewSyncMonitor(config *SyncMonitorConfig, exec *ExecutionEngine) *SyncMonitor { + return &SyncMonitor{ + config: config, + exec: exec, + } +} + +func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} { + res := s.consensus.FullSyncProgressMap() + consensusSyncTarget := s.consensus.SyncTargetMessageCount() + + built, err := s.exec.HeadMessageNumber() + if err != nil { + res["headMsgNumberError"] = err + } + + res["builtBlock"] = built + res["consensusSyncTarget"] = consensusSyncTarget + + return res +} + +func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { + if s.consensus.Synced() { + built, err := s.exec.HeadMessageNumber() + consensusSyncTarget := s.consensus.SyncTargetMessageCount() + if err != nil && built+1 >= consensusSyncTarget { + return make(map[string]interface{}) + } + } + return s.FullSyncProgressMap() +} + +func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { + if s.consensus == nil { + return 0, errors.New("not set up for safeblock") + } + msg, err := s.consensus.GetSafeMsgCount(ctx) + if err != nil { + return 0, err + } + if s.config.SafeBlockWaitForBlockValidator { + latestValidatedCount, err := s.consensus.ValidatedMessageCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } + block := s.exec.MessageIndexToBlockNumber(msg - 1) + return block, nil +} + +func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { + if s.consensus == nil { + return 0, errors.New("not set up for safeblock") + } + msg, err := s.consensus.GetFinalizedMsgCount(ctx) + if err != nil { + return 0, err + } + if s.config.FinalizedBlockWaitForBlockValidator { + latestValidatedCount, err := s.consensus.ValidatedMessageCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } + block := s.exec.MessageIndexToBlockNumber(msg - 1) + return block, nil +} + +func (s *SyncMonitor) Synced() bool { + return len(s.SyncProgressMap()) == 0 +} + +func (s *SyncMonitor) SetConsensusInfo(consensus execution.ConsensusInfo) { + s.consensus = consensus +} diff --git a/execution/interface.go b/execution/interface.go index 2cbbf550ad..b0817aeac4 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -54,7 +54,6 @@ type ExecutionSequencer interface { ForwardTo(url string) error SequenceDelayedMessage(message *arbostypes.L1IncomingMessage, delayedSeqNum uint64) error NextDelayedMessageNumber() (uint64, error) - SetTransactionStreamer(streamer TransactionStreamer) } type FullExecutionClient interface { @@ -67,19 +66,35 @@ type FullExecutionClient interface { Maintenance() error - // TODO: only used to get safe/finalized block numbers - MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 - ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) } // not implemented in execution, used as input +// BatchFetcher is required for any execution node type BatchFetcher interface { - FetchBatch(batchNum uint64) ([]byte, common.Hash, error) + FetchBatch(ctx context.Context, batchNum uint64) ([]byte, common.Hash, error) + FindInboxBatchContainingMessage(message arbutil.MessageIndex) (uint64, bool, error) + GetBatchParentChainBlock(seqNum uint64) (uint64, error) } -type TransactionStreamer interface { - BatchFetcher +type ConsensusInfo interface { + Synced() bool + FullSyncProgressMap() map[string]interface{} + SyncTargetMessageCount() arbutil.MessageIndex + + // TODO: switch from pulling to pushing safe/finalized + GetSafeMsgCount(ctx context.Context) (arbutil.MessageIndex, error) + GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) + ValidatedMessageCount() (arbutil.MessageIndex, error) +} + +type ConsensusSequencer interface { WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error ExpectChosenSequencer() error } + +type FullConsensusClient interface { + BatchFetcher + ConsensusInfo + ConsensusSequencer +} diff --git a/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go similarity index 88% rename from nodeInterface/NodeInterface.go rename to execution/nodeInterface/NodeInterface.go index bdcfb569f4..7e524731d0 100644 --- a/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -20,14 +20,12 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" - "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/merkletree" ) @@ -53,90 +51,129 @@ var merkleTopic common.Hash var l2ToL1TxTopic common.Hash var l2ToL1TransactionTopic common.Hash -var blockInGenesis = errors.New("") -var blockAfterLatestBatch = errors.New("") - func (n NodeInterface) NitroGenesisBlock(c ctx) (huge, error) { block := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum return arbmath.UintToBig(block), nil } +// bool will be false but no error if behind genesis +func (n NodeInterface) blockNumToMessageIndex(blockNum uint64) (arbutil.MessageIndex, bool, error) { + node, err := gethExecFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + blockchain, err := blockchainFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + if blockNum < blockchain.Config().ArbitrumChainParams.GenesisBlockNum { + return 0, true, nil + } + msgIndex, err := node.ExecEngine.BlockNumberToMessageIndex(blockNum) + if err != nil { + return 0, false, err + } + return msgIndex, true, nil +} + +func (n NodeInterface) msgNumToInboxBatch(msgIndex arbutil.MessageIndex) (uint64, bool, error) { + node, err := gethExecFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + fetcher := node.ExecEngine.GetBatchFetcher() + if fetcher == nil { + return 0, false, errors.New("batch fetcher not set") + } + return fetcher.FindInboxBatchContainingMessage(msgIndex) +} + func (n NodeInterface) FindBatchContainingBlock(c ctx, evm mech, blockNum uint64) (uint64, error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + msgIndex, found, err := n.blockNumToMessageIndex(blockNum) if err != nil { return 0, err } - return findBatchContainingBlock(node, node.TxStreamer.GenesisBlockNumber(), blockNum) + if !found { + return 0, fmt.Errorf("block %v is part of genesis", blockNum) + } + res, found, err := n.msgNumToInboxBatch(msgIndex) + if err == nil && !found { + return 0, errors.New("block not yet found on any batch") + } + return res, err } func (n NodeInterface) GetL1Confirmations(c ctx, evm mech, blockHash bytes32) (uint64, error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + node, err := gethExecFromNodeInterfaceBackend(n.backend) if err != nil { return 0, err } - if node.InboxReader == nil { - return 0, nil - } - bc, err := blockchainFromNodeInterfaceBackend(n.backend) + blockchain, err := blockchainFromNodeInterfaceBackend(n.backend) if err != nil { return 0, err } - header := bc.GetHeaderByHash(blockHash) + header := blockchain.GetHeaderByHash(blockHash) if header == nil { return 0, errors.New("unknown block hash") } blockNum := header.Number.Uint64() - genesis := node.TxStreamer.GenesisBlockNumber() - batch, err := findBatchContainingBlock(node, genesis, blockNum) + + // blocks behind genesis are treated as belonging to batch 0 + msgNum, _, err := n.blockNumToMessageIndex(blockNum) if err != nil { - if errors.Is(err, blockInGenesis) { - batch = 0 - } else if errors.Is(err, blockAfterLatestBatch) { - return 0, nil - } else { - return 0, err - } + return 0, err + } + // batches not yet posted have 0 confirmations but no error + batchNum, found, err := n.msgNumToInboxBatch(msgNum) + if err != nil { + return 0, err + } + if !found { + return 0, nil } - meta, err := node.InboxTracker.GetBatchMetadata(batch) + parentChainBlockNum, err := node.ExecEngine.GetBatchFetcher().GetBatchParentChainBlock(batchNum) if err != nil { return 0, err } - if node.L1Reader.IsParentChainArbitrum() { - parentChainClient := node.L1Reader.Client() + + if node.ParentChainReader.IsParentChainArbitrum() { + parentChainClient := node.ParentChainReader.Client() parentNodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, parentChainClient) if err != nil { return 0, err } - parentChainBlock, err := parentChainClient.BlockByNumber(n.context, new(big.Int).SetUint64(meta.ParentChainBlock)) + parentChainBlock, err := parentChainClient.BlockByNumber(n.context, new(big.Int).SetUint64(parentChainBlockNum)) if err != nil { // Hide the parent chain RPC error from the client in case it contains sensitive information. // Likely though, this error is just "not found" because the block got reorg'd. - return 0, fmt.Errorf("failed to get parent chain block %v containing batch", meta.ParentChainBlock) + return 0, fmt.Errorf("failed to get parent chain block %v containing batch", parentChainBlockNum) } confs, err := parentNodeInterface.GetL1Confirmations(&bind.CallOpts{Context: n.context}, parentChainBlock.Hash()) if err != nil { log.Warn( "Failed to get L1 confirmations from parent chain", - "blockNumber", meta.ParentChainBlock, + "blockNumber", parentChainBlockNum, "blockHash", parentChainBlock.Hash(), "err", err, ) return 0, fmt.Errorf("failed to get L1 confirmations from parent chain for block %v", parentChainBlock.Hash()) } return confs, nil } - latestL1Block, latestBatchCount := node.InboxReader.GetLastReadBlockAndBatchCount() - if latestBatchCount <= batch { - return 0, nil // batch was reorg'd out? - } - if latestL1Block < meta.ParentChainBlock || arbutil.BlockNumberToMessageCount(blockNum, genesis) > meta.MessageCount { + if node.ParentChainReader == nil { return 0, nil } - canonicalHash := bc.GetCanonicalHash(header.Number.Uint64()) - if canonicalHash != header.Hash() { - return 0, errors.New("block hash is non-canonical") + latestHeader, err := node.ParentChainReader.LastHeaderWithError() + if err != nil { + return 0, err + } + if latestHeader == nil { + return 0, errors.New("no headers read from l1") } - confs := (latestL1Block - meta.ParentChainBlock) + 1 + node.InboxReader.GetDelayBlocks() - return confs, nil + latestBlockNum := latestHeader.Number.Uint64() + if latestBlockNum < parentChainBlockNum { + return 0, nil + } + return (latestBlockNum - parentChainBlockNum), nil } func (n NodeInterface) EstimateRetryableTicket( @@ -561,42 +598,18 @@ func (n NodeInterface) GasEstimateComponents( return total, gasForL1, baseFee, l1BaseFeeEstimate, nil } -func findBatchContainingBlock(node *arbnode.Node, genesis uint64, block uint64) (uint64, error) { - if block <= genesis { - return 0, fmt.Errorf("%wblock %v is part of genesis", blockInGenesis, block) - } - pos := arbutil.BlockNumberToMessageCount(block, genesis) - 1 - high, err := node.InboxTracker.GetBatchCount() - if err != nil { - return 0, err - } - high-- - latestCount, err := node.InboxTracker.GetBatchMessageCount(high) - if err != nil { - return 0, err - } - latestBlock := arbutil.MessageCountToBlockNumber(latestCount, genesis) - if int64(block) > latestBlock { - return 0, fmt.Errorf( - "%wrequested block %v is after latest on-chain block %v published in batch %v", - blockAfterLatestBatch, block, latestBlock, high, - ) - } - return staker.FindBatchContainingMessageIndex(node.InboxTracker, pos, high) -} - func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum huge, index uint64) ( proof []bytes32, path huge, l2Sender addr, l1Dest addr, l2Block huge, l1Block huge, timestamp huge, amount huge, calldataForL1 []byte, err error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + node, err := gethExecFromNodeInterfaceBackend(n.backend) if err != nil { return } - if node.ClassicOutboxRetriever == nil { + if node.ClassicOutbox == nil { err = errors.New("this node doesnt support classicLookupMessageBatchProof") return } - msg, err := node.ClassicOutboxRetriever.GetMsg(batchNum, index) + msg, err := node.ClassicOutbox.GetMsg(batchNum, index) if err != nil { return } diff --git a/nodeInterface/NodeInterfaceDebug.go b/execution/nodeInterface/NodeInterfaceDebug.go similarity index 100% rename from nodeInterface/NodeInterfaceDebug.go rename to execution/nodeInterface/NodeInterfaceDebug.go diff --git a/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go similarity index 96% rename from nodeInterface/virtual-contracts.go rename to execution/nodeInterface/virtual-contracts.go index b35381a77a..3a863e31b5 100644 --- a/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -15,10 +15,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/precompiles" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" @@ -173,16 +173,16 @@ func init() { merkleTopic = arbSys.Events["SendMerkleUpdate"].ID } -func arbNodeFromNodeInterfaceBackend(backend BackendAPI) (*arbnode.Node, error) { +func gethExecFromNodeInterfaceBackend(backend BackendAPI) (*gethexec.ExecutionNode, error) { apiBackend, ok := backend.(*arbitrum.APIBackend) if !ok { return nil, errors.New("API backend isn't Arbitrum") } - arbNode, ok := apiBackend.GetArbitrumNode().(*arbnode.Node) + exec, ok := apiBackend.GetArbitrumNode().(*gethexec.ExecutionNode) if !ok { return nil, errors.New("failed to get Arbitrum Node from backend") } - return arbNode, nil + return exec, nil } func blockchainFromNodeInterfaceBackend(backend BackendAPI) (*core.BlockChain, error) { diff --git a/go-ethereum b/go-ethereum index 657dcf6626..22399a74e2 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 657dcf66263e940e86f9e89325c5100899d5ab58 +Subproject commit 22399a74e2b413e99a4f0d06c65862ced0d021c7 diff --git a/go.mod b/go.mod index c75a46f198..027ef8286d 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/OffchainLabs/bold v0.0.0-00010101000000-000000000000 github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.21.0 - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 github.com/andybalholm/brotli v1.0.4 github.com/aws/aws-sdk-go-v2 v1.16.4 github.com/aws/aws-sdk-go-v2/config v1.15.5 @@ -39,6 +38,7 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.12.1 github.com/multiformats/go-multihash v0.2.3 + github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -238,7 +238,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 4e7aa22cbe..56389ae80e 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -339,10 +339,14 @@ func (v *L1Validator) generateNodeAction( batchNum = localBatchCount - 1 validatedCount = messageCount } else { - batchNum, err = FindBatchContainingMessageIndex(v.inboxTracker, validatedCount-1, localBatchCount) + var found bool + batchNum, found, err = v.inboxTracker.FindInboxBatchContainingMessage(validatedCount - 1) if err != nil { return nil, false, err } + if !found { + return nil, false, errors.New("batch not found on L1") + } } execResult, err := v.txStreamer.ResultAtCount(validatedCount) if err != nil { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 6315dd981c..93bccd5fc1 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -59,6 +59,7 @@ type InboxTrackerInterface interface { GetBatchMessageCount(seqNum uint64) (arbutil.MessageIndex, error) GetBatchAcc(seqNum uint64) (common.Hash, error) GetBatchCount() (uint64, error) + FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, bool, error) } type TransactionStreamerInterface interface { @@ -111,39 +112,6 @@ func GlobalStatePositionsAtCount( return startPos, GlobalStatePosition{batch, posInBatch + 1}, nil } -func FindBatchContainingMessageIndex( - tracker InboxTrackerInterface, pos arbutil.MessageIndex, high uint64, -) (uint64, error) { - var low uint64 - // Iteration preconditions: - // - high >= low - // - msgCount(low - 1) <= pos implies low <= target - // - msgCount(high) > pos implies high >= target - // Therefore, if low == high, then low == high == target - for high > low { - // Due to integer rounding, mid >= low && mid < high - mid := (low + high) / 2 - count, err := tracker.GetBatchMessageCount(mid) - if err != nil { - return 0, err - } - if count < pos { - // Must narrow as mid >= low, therefore mid + 1 > low, therefore newLow > oldLow - // Keeps low precondition as msgCount(mid) < pos - low = mid + 1 - } else if count == pos { - return mid + 1, nil - } else if count == pos+1 || mid == low { // implied: count > pos - return mid, nil - } else { // implied: count > pos + 1 - // Must narrow as mid < high, therefore newHigh < lowHigh - // Keeps high precondition as msgCount(mid) > pos - high = mid - } - } - return low, nil -} - type ValidationEntryStage uint32 const ( @@ -365,13 +333,12 @@ func (v *StatelessBlockValidator) GlobalStatePositionsAtCount(count arbutil.Mess if count == 1 { return GlobalStatePosition{}, GlobalStatePosition{1, 0}, nil } - batchCount, err := v.inboxTracker.GetBatchCount() + batch, found, err := v.inboxTracker.FindInboxBatchContainingMessage(count - 1) if err != nil { return GlobalStatePosition{}, GlobalStatePosition{}, err } - batch, err := FindBatchContainingMessageIndex(v.inboxTracker, count-1, batchCount) - if err != nil { - return GlobalStatePosition{}, GlobalStatePosition{}, err + if !found { + return GlobalStatePosition{}, GlobalStatePosition{}, errors.New("batch not found on L1 yet") } return GlobalStatePositionsAtCount(v.inboxTracker, count, batch) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 0dda408aaa..cd65cd2edc 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -56,7 +57,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbutil" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -183,6 +184,13 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { } func (b *NodeBuilder) Build(t *testing.T) func() { + if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if b.execConfig.Caching.Archive { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = @@ -229,6 +237,13 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes if params.execConfig == nil { params.execConfig = b.execConfig } + if params.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if params.execConfig.Caching.Archive { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = @@ -1016,6 +1031,10 @@ func setupConfigWithDAS( dasSignerKey, _, err := das.GenerateAndStoreKeys(dbPath) Require(t, err) + dbConfig := das.DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, Key: das.KeyConfig{ @@ -1025,10 +1044,7 @@ func setupConfigWithDAS( Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: das.LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, RequestTimeout: 5 * time.Second, ParentChainNodeURL: "none", SequencerInboxAddress: "none", diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8edd91e1ec..602c6da5e6 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -253,19 +253,20 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { pubkey, _, err := das.GenerateAndStoreKeys(keyDir) Require(t, err) + dbConfig := das.DefaultLocalDBStorageConfig + dbConfig.Enable = true + dbConfig.DataDir = dbDataDir + serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCache: das.TestBigCacheConfig, + LocalCache: das.TestCacheConfig, LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: fileDataDir, }, - LocalDBStorage: das.LocalDBStorageConfig{ - Enable: true, - DataDir: dbDataDir, - }, + LocalDBStorage: dbConfig, Key: das.KeyConfig{ KeyDir: keyDir, diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 3ff3bfc43f..01ace5c7e2 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -119,8 +119,6 @@ func TestSequencerFeePaid(t *testing.T) { } func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { - t.Parallel() - _ = os.Mkdir("test-data", 0766) path := filepath.Join("test-data", fmt.Sprintf("testSequencerPriceAdjustsFrom%v.csv", initialEstimate)) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 29b1252de4..03b6d690f1 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -1,10 +1,6 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -// race detection makes things slow and miss timeouts -//go:build !race -// +build !race - package arbtest import ( @@ -29,14 +25,17 @@ import ( "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" @@ -165,7 +164,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b seqNum := new(big.Int).Lsh(common.Big1, 256) seqNum.Sub(seqNum, common.Big1) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) Require(t, err) receipt, err := EnsureTxSucceeded(ctx, backend, tx) Require(t, err) @@ -239,6 +238,16 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha return bridgeAddr, seqInbox, seqInboxAddr } +func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chainConfig *params.ChainConfig, l1Client arbutil.L1Interface, l2info *BlockchainTestInfo, rollupAddresses *chaininfo.RollupAddresses, initMsg *arbostypes.ParsedInitMessage, txOpts *bind.TransactOpts, signer signature.DataSignerFunc, fatalErrChan chan error) (*arbnode.Node, *gethexec.ExecutionNode) { + _, stack, l2ChainDb, l2ArbDb, l2Blockchain := createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMsg, nil, nil) + execNode, err := gethexec.CreateExecutionNode(ctx, stack, l2ChainDb, l2Blockchain, l1Client, gethexec.ConfigDefaultTest) + Require(t, err) + consensusNode, err := arbnode.CreateNode(ctx, stack, execNode, l2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Client, rollupAddresses, txOpts, txOpts, signer, fatalErrChan, big.NewInt(1337), nil) + Require(t, err) + + return consensusNode, execNode +} + func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64) { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.LvlInfo) @@ -281,25 +290,18 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) - asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr - asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) - Require(t, err) - parentChainID := big.NewInt(1337) - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) - Require(t, err) - err = asserterL2.Start(ctx) + asserterL2Info := NewArbTestInfo(t, chainConfig.ChainID) + asserterL2, asserterExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, asserterL2Info, asserterRollupAddresses, initMessage, nil, nil, fatalErrChan) + err := asserterL2.Start(ctx) Require(t, err) - challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) challengerRollupAddresses := *asserterRollupAddresses challengerRollupAddresses.Bridge = challengerBridgeAddr challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr - challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) - Require(t, err) - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) - Require(t, err) + challengerL2Info := NewArbTestInfo(t, chainConfig.ChainID) + challengerL2, challengerExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, challengerL2Info, &challengerRollupAddresses, initMessage, nil, nil, fatalErrChan) err = challengerL2.Start(ctx) Require(t, err) @@ -387,7 +389,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -404,7 +406,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -492,17 +494,3 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall Fatal(t, "challenge timed out without winner") } - -func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { - t.Parallel() - for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, false, true, i) - } -} - -func TestMockChallengeManagerAsserterCorrect(t *testing.T) { - t.Parallel() - for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, true, true, i) - } -} diff --git a/system_tests/full_challenge_mock_test.go b/system_tests/full_challenge_mock_test.go new file mode 100644 index 0000000000..d32c2b40ab --- /dev/null +++ b/system_tests/full_challenge_mock_test.go @@ -0,0 +1,21 @@ +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import "testing" + +func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { + t.Parallel() + for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { + RunChallengeTest(t, false, true, i) + } +} + +func TestMockChallengeManagerAsserterCorrect(t *testing.T) { + t.Parallel() + for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { + RunChallengeTest(t, true, true, i) + } +} diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index 11b68b558b..27ed8572c8 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -27,7 +27,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) @@ -69,7 +69,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { // Produce a new l1Block so that the batch ends up in a different l1Block than before builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) - tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 3424a58e9e..b692af6e30 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -1,6 +1,10 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + package arbtest import ( @@ -11,10 +15,82 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" ) +func TestFindBatch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l1Info := NewL1TestInfo(t) + initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) + l1Info.GenerateGenesisAccount("deployer", initialBalance) + l1Info.GenerateGenesisAccount("asserter", initialBalance) + l1Info.GenerateGenesisAccount("challenger", initialBalance) + l1Info.GenerateGenesisAccount("sequencer", initialBalance) + + l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) + conf := arbnode.ConfigDefaultL1Test() + conf.BlockValidator.Enable = false + conf.BatchPoster.Enable = false + + chainConfig := params.ArbitrumDevTestChainConfig() + fatalErrChan := make(chan error, 10) + rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + + bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + + callOpts := bind.CallOpts{Context: ctx} + + rollupAddresses.Bridge = bridgeAddr + rollupAddresses.SequencerInbox = seqInboxAddr + l2Info := NewArbTestInfo(t, chainConfig.ChainID) + consensus, _ := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, l2Info, rollupAddresses, initMsg, nil, nil, fatalErrChan) + err := consensus.Start(ctx) + Require(t, err) + + l2Client := ClientForStack(t, consensus.Stack) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2Client) + Require(t, err) + sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) + + l2Info.GenerateAccount("Destination") + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + + for blockNum := uint64(0); blockNum < uint64(makeBatch_MsgsPerBatch)*3; blockNum++ { + gotBatchNum, err := nodeInterface.FindBatchContainingBlock(&callOpts, blockNum) + Require(t, err) + expBatchNum := uint64(0) + if blockNum > 0 { + expBatchNum = 1 + (blockNum-1)/uint64(makeBatch_MsgsPerBatch) + } + if expBatchNum != gotBatchNum { + Fatal(t, "wrong result from findBatchContainingBlock. blocknum ", blockNum, " expected ", expBatchNum, " got ", gotBatchNum) + } + batchL1Block, err := consensus.InboxTracker.GetBatchParentChainBlock(gotBatchNum) + Require(t, err) + blockHeader, err := l2Client.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) + Require(t, err) + blockHash := blockHeader.Hash() + + minCurrentL1Block, err := l1Backend.BlockNumber(ctx) + Require(t, err) + gotConfirmations, err := nodeInterface.GetL1Confirmations(&callOpts, blockHash) + Require(t, err) + maxCurrentL1Block, err := l1Backend.BlockNumber(ctx) + Require(t, err) + + if gotConfirmations > (maxCurrentL1Block-batchL1Block) || gotConfirmations < (minCurrentL1Block-batchL1Block) { + Fatal(t, "wrong number of confirmations. got ", gotConfirmations) + } + } +} + func TestL2BlockRangeForL1(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) diff --git a/system_tests/pendingblock_test.go b/system_tests/pendingblock_test.go new file mode 100644 index 0000000000..dc21bca525 --- /dev/null +++ b/system_tests/pendingblock_test.go @@ -0,0 +1,53 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "testing" + "time" + + "github.com/offchainlabs/nitro/solgen/go/mocksgen" +) + +func TestPendingBlockTimeAndNumberAdvance(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, testTimeAndNr, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + time.Sleep(1 * time.Second) + + _, err = testTimeAndNr.IsAdvancing(&auth) + Require(t, err) +} + +func TestPendingBlockArbBlockHashReturnsLatest(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, pendingBlk, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + header, err := builder.L2.Client.HeaderByNumber(ctx, nil) + Require(t, err) + + _, err = pendingBlk.CheckArbBlockHashReturnsLatest(&auth, header.Hash()) + Require(t, err) +} diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 1973587ecb..777ed17961 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -2,31 +2,30 @@ package arbtest import ( "context" + "encoding/binary" "errors" + "fmt" "math/big" "strings" + "sync" "testing" + "time" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { - t.Helper() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.execConfig = execConfig - cleanup := builder.Build(t) - builder.L2Info.GenerateAccount("User2") +func makeSomeTransfers(t *testing.T, ctx context.Context, builder *NodeBuilder, txCount uint64) { var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) @@ -38,8 +37,16 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethe _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } +} - return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup +func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (*NodeBuilder, func()) { + t.Helper() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig = execConfig + cleanup := builder.Build(t) + builder.L2Info.GenerateAccount("User2") + makeSomeTransfers(t, ctx, builder, txCount) + return builder, cleanup } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -89,17 +96,19 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true + execConfig.Caching.SnapshotCache = 0 // disable snapshots // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -123,17 +132,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = depthGasLimit + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -157,17 +167,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = int64(200) - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = int64(200) + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -191,17 +202,18 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, headerCacheLimit+5) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -236,16 +248,17 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -271,17 +284,18 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, blockCacheLimit+4) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -306,7 +320,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { hash := rawdb.ReadCanonicalHash(db, lastBlock) Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) } - if !strings.Contains(err.Error(), "block not found while recreating") { + if !strings.Contains(err.Error(), fmt.Sprintf("block #%d not found", blockBodyToRemove)) { Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) } } @@ -358,9 +372,13 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) } } + bc := execNode.Backend.ArbInterface().BlockChain() genesis := uint64(0) - lastBlock, err := client.BlockNumber(ctx) - Require(t, err) + currentHeader := bc.CurrentBlock() + if currentHeader == nil { + Fatal(t, "missing current block") + } + lastBlock := currentHeader.Number.Uint64() if want := genesis + uint64(txCount); lastBlock < want { Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) } @@ -381,7 +399,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc := execNode.Backend.ArbInterface().BlockChain() + bc = execNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { @@ -391,8 +409,8 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig continue } gas += block.GasUsed() - blocks++ _, err := bc.StateAt(block.Root()) + blocks++ if (skipBlocks == 0 && skipGas == 0) || (skipBlocks != 0 && blocks > skipBlocks) || (skipGas != 0 && gas > skipGas) { if err != nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) @@ -401,13 +419,17 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig gas = 0 blocks = 0 } else { + if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) { + // skipping nonexistence check - the state might have been saved on node shutdown + continue + } if err == nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) Fatal(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) } expectedErr := &trie.MissingNodeError{} if !errors.As(err, &expectedErr) { - Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash:", block.Hash(), "err:", err) } } } @@ -429,7 +451,10 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig := gethexec.DefaultCachingConfig cacheConfig.Archive = true - //// test defaults + cacheConfig.SnapshotCache = 0 // disable snapshots + cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + + // test defaults testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 @@ -444,8 +469,10 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) - // one test block ~ 925000 gas - testBlockGas := uint64(925000) + // lower number of blocks in triegc below 100 blocks, to be able to check for nonexistence in testSkippingSavingStateAndRecreatingAfterRestart (it doesn't check last BlockCount blocks as some of them may be persisted on node shutdown) + cacheConfig.BlockCount = 16 + + testBlockGas := uint64(925000) // one test block ~ 925000 gas skipBlockValues := []uint64{0, 1, 2, 3, 5, 21, 51, 100, 101} var skipGasValues []uint64 for _, i := range skipBlockValues { @@ -459,3 +486,206 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { } } } + +func TestGettingStateForRPCFullNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.Archive = true + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 128 + execConfig.Caching.BlockCount = 128 + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestStateAndHeaderForRecentBlock(t *testing.T) { + threads := 32 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true + builder.execConfig.RPC.MaxRecreateStateDepth = 0 + cleanup := builder.Build(t) + defer cleanup() + builder.L2Info.GenerateAccount("User2") + + errors := make(chan error, threads+1) + senderDone := make(chan struct{}) + go func() { + defer close(senderDone) + for ctx.Err() == nil { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, new(big.Int).Lsh(big.NewInt(1), 128), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + _, err = builder.L2.EnsureTxSucceeded(tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + time.Sleep(10 * time.Millisecond) + } + }() + api := builder.L2.ExecNode.Backend.APIBackend() + db := builder.L2.ExecNode.Backend.ChainDb() + i := 1 + var mtx sync.RWMutex + var wgCallers sync.WaitGroup + for j := 0; j < threads && ctx.Err() == nil; j++ { + wgCallers.Add(1) + go func() { + defer wgCallers.Done() + mtx.RLock() + blockNumber := i + mtx.RUnlock() + for blockNumber < 300 && ctx.Err() == nil { + prefix := make([]byte, 8) + binary.BigEndian.PutUint64(prefix, uint64(blockNumber)) + prefix = append([]byte("b"), prefix...) + it := db.NewIterator(prefix, nil) + defer it.Release() + if it.Next() { + key := it.Key() + if len(key) != len(prefix)+common.HashLength { + Fatal(t, "Wrong key length, have:", len(key), "want:", len(prefix)+common.HashLength) + } + blockHash := common.BytesToHash(key[len(prefix):]) + start := time.Now() + for ctx.Err() == nil { + _, _, err := api.StateAndHeaderByNumberOrHash(ctx, rpc.BlockNumberOrHash{BlockHash: &blockHash}) + if err == nil { + mtx.Lock() + if blockNumber == i { + i++ + } + mtx.Unlock() + break + } + if ctx.Err() != nil { + return + } + if !strings.Contains(err.Error(), "ahead of current block") { + errors <- err + return + } + if time.Since(start) > 5*time.Second { + errors <- fmt.Errorf("timeout - failed to get state for more then 5 seconds, block: %d, err: %w", blockNumber, err) + return + } + } + } + it.Release() + mtx.RLock() + blockNumber = i + mtx.RUnlock() + } + }() + } + callersDone := make(chan struct{}) + go func() { + wgCallers.Wait() + close(callersDone) + }() + + select { + case <-callersDone: + cancel() + case <-senderDone: + cancel() + case err := <-errors: + t.Error(err) + cancel() + } + <-callersDone + <-senderDone + close(errors) + for err := range errors { + if err != nil { + t.Error(err) + } + } +} diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index be0ecc590f..b0691db173 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -563,7 +563,7 @@ func TestDepositETH(t *testing.T) { txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) txOpts.Value = big.NewInt(13) - l1tx, err := delayedInbox.DepositEth0(&txOpts) + l1tx, err := delayedInbox.DepositEth439370b1(&txOpts) if err != nil { t.Fatalf("DepositEth0() unexected error: %v", err) } diff --git a/system_tests/rpc_test.go b/system_tests/rpc_test.go index 357cb8e4c1..511a608e67 100644 --- a/system_tests/rpc_test.go +++ b/system_tests/rpc_test.go @@ -7,10 +7,8 @@ import ( "context" "path/filepath" "testing" - "time" "github.com/ethereum/go-ethereum/ethclient" - "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) func TestIpcRpc(t *testing.T) { @@ -27,23 +25,3 @@ func TestIpcRpc(t *testing.T) { _, err := ethclient.Dial(ipcPath) Require(t, err) } - -func TestPendingBlockTimeAndNumberAdvance(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - cleanup := builder.Build(t) - defer cleanup() - - auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) - - _, _, testTimeAndNr, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) - Require(t, err) - - time.Sleep(1 * time.Second) - - _, err = testTimeAndNr.IsAdvancing(&auth) - Require(t, err) -} diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index e00bda8e84..81dd2ad0d7 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -355,7 +355,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { - tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, common.Big0, common.Big0) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, common.Big0, common.Big0) } Require(t, err) txRes, err := builder.L1.EnsureTxSucceeded(tx) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 8989a321c7..664dbb5e30 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/jsonapi" @@ -26,10 +27,11 @@ import ( ) type BlobClient struct { - ec arbutil.L1Interface - beaconUrl *url.URL - httpClient *http.Client - authorization string + ec arbutil.L1Interface + beaconUrl *url.URL + secondaryBeaconUrl *url.URL + httpClient *http.Client + authorization string // Filled in in Initialize() genesisTime uint64 @@ -40,19 +42,22 @@ type BlobClient struct { } type BlobClientConfig struct { - BeaconUrl string `koanf:"beacon-url"` - BlobDirectory string `koanf:"blob-directory"` - Authorization string `koanf:"authorization"` + BeaconUrl string `koanf:"beacon-url"` + SecondaryBeaconUrl string `koanf:"secondary-beacon-url"` + BlobDirectory string `koanf:"blob-directory"` + Authorization string `koanf:"authorization"` } var DefaultBlobClientConfig = BlobClientConfig{ - BeaconUrl: "", - BlobDirectory: "", - Authorization: "", + BeaconUrl: "", + SecondaryBeaconUrl: "", + BlobDirectory: "", + Authorization: "", } func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".beacon-url", DefaultBlobClientConfig.BeaconUrl, "Beacon Chain RPC URL to use for fetching blobs (normally on port 3500)") + f.String(prefix+".secondary-beacon-url", DefaultBlobClientConfig.SecondaryBeaconUrl, "Backup beacon Chain RPC URL to use for fetching blobs (normally on port 3500) when unable to fetch from primary") f.String(prefix+".blob-directory", DefaultBlobClientConfig.BlobDirectory, "Full path of the directory to save fetched blobs") f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters") } @@ -62,6 +67,12 @@ func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient if err != nil { return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) } + var secondaryBeaconUrl *url.URL + if config.SecondaryBeaconUrl != "" { + if secondaryBeaconUrl, err = url.Parse(config.BeaconUrl); err != nil { + return nil, fmt.Errorf("failed to parse secondary beacon chain URL: %w", err) + } + } if config.BlobDirectory != "" { if _, err = os.Stat(config.BlobDirectory); err != nil { if os.IsNotExist(err) { @@ -74,11 +85,12 @@ func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient } } return &BlobClient{ - ec: ec, - beaconUrl: beaconUrl, - authorization: config.Authorization, - httpClient: &http.Client{}, - blobDirectory: config.BlobDirectory, + ec: ec, + beaconUrl: beaconUrl, + secondaryBeaconUrl: secondaryBeaconUrl, + authorization: config.Authorization, + httpClient: &http.Client{}, + blobDirectory: config.BlobDirectory, }, nil } @@ -91,22 +103,43 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath var empty T - // not really a deep copy, but copies the Path part we care about - url := *b.beaconUrl - url.Path = path.Join(url.Path, beaconPath) - - req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) - if err != nil { - return empty, err - } - - if b.authorization != "" { - req.Header.Set("Authorization", b.authorization) + fetchData := func(url url.URL) (*http.Response, error) { + url.Path = path.Join(url.Path, beaconPath) + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) + if err != nil { + return nil, err + } + if b.authorization != "" { + req.Header.Set("Authorization", b.authorization) + } + resp, err := b.httpClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + bodyStr := string(body) + log.Debug("beacon request returned response with non 200 OK status", "status", resp.Status, "body", bodyStr) + if len(bodyStr) > 100 { + return nil, fmt.Errorf("response returned with status %s, want 200 OK. body: %s ", resp.Status, bodyStr[len(bodyStr)-trailingCharsOfResponse:]) + } else { + return nil, fmt.Errorf("response returned with status %s, want 200 OK. body: %s", resp.Status, bodyStr) + } + } + return resp, nil } - resp, err := b.httpClient.Do(req) - if err != nil { - return empty, err + var resp *http.Response + var err error + if resp, err = fetchData(*b.beaconUrl); err != nil { + if b.secondaryBeaconUrl != nil { + log.Info("error fetching blob data from primary beacon URL, switching to secondary beacon URL", "err", err) + if resp, err = fetchData(*b.secondaryBeaconUrl); err != nil { + return empty, fmt.Errorf("error fetching blob data from secondary beacon URL: %w", err) + } + } else { + return empty, err + } } defer resp.Body.Close() @@ -133,7 +166,11 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio return nil, errors.New("BlobClient hasn't been initialized") } slot := (header.Time - b.genesisTime) / b.secondsPerSlot - return b.blobSidecars(ctx, slot, versionedHashes) + blobs, err := b.blobSidecars(ctx, slot, versionedHashes) + if err != nil { + return nil, fmt.Errorf("error fetching blobs in %d l1 block: %w", header.Number, err) + } + return blobs, nil } type blobResponseItem struct { @@ -147,6 +184,8 @@ type blobResponseItem struct { KzgProof hexutil.Bytes `json:"kzg_proof"` } +const trailingCharsOfResponse = 25 + func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { @@ -154,7 +193,13 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } var response []blobResponseItem if err := json.Unmarshal(rawData, &response); err != nil { - return nil, fmt.Errorf("error unmarshalling raw data into array of blobResponseItem in blobSidecars: %w", err) + rawDataStr := string(rawData) + log.Debug("response from beacon URL cannot be unmarshalled into array of blobResponseItem in blobSidecars", "slot", slot, "responseLength", len(rawDataStr), "response", rawDataStr) + if len(rawDataStr) > 100 { + return nil, fmt.Errorf("error unmarshalling response from beacon URL into array of blobResponseItem in blobSidecars: %w. Trailing %d characters of the response: %s", err, trailingCharsOfResponse, rawDataStr[len(rawDataStr)-trailingCharsOfResponse:]) + } else { + return nil, fmt.Errorf("error unmarshalling response from beacon URL into array of blobResponseItem in blobSidecars: %w. Response: %s", err, rawDataStr) + } } if len(response) < len(versionedHashes) { diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 275acdb283..02b41cf15d 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -127,6 +127,25 @@ func (m limitedArgumentsMarshal) String() string { return res } +var blobTxUnderpricedRegexp = regexp.MustCompile(`replacement transaction underpriced: new tx gas fee cap (\d*) <= (\d*) queued`) + +// IsAlreadyKnownError returns true if the error appears to be an "already known" error. +// This check is based on the error's string form and is not precise. +func IsAlreadyKnownError(err error) bool { + s := err.Error() + if strings.Contains(s, "already known") { + return true + } + // go-ethereum returns "replacement transaction underpriced" instead of "already known" for blob txs. + // This is fixed in https://github.com/ethereum/go-ethereum/pull/29210 + // TODO: Once a new geth release is out with this fix, we can remove this check. + matches := blobTxUnderpricedRegexp.FindSubmatch([]byte(s)) + if len(matches) == 3 { + return string(matches[1]) == string(matches[2]) + } + return false +} + func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, method string, args ...interface{}) error { if c.client == nil { return errors.New("not connected") @@ -159,7 +178,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth cancelCtx() logger := log.Trace limit := int(c.config().ArgLogLimit) - if err != nil && err.Error() != "already known" { + if err != nil && !IsAlreadyKnownError(err) { logger = log.Info } logEntry := []interface{}{ diff --git a/util/rpcclient/rpcclient_test.go b/util/rpcclient/rpcclient_test.go index b885770f60..8613671d37 100644 --- a/util/rpcclient/rpcclient_test.go +++ b/util/rpcclient/rpcclient_test.go @@ -182,6 +182,25 @@ func TestRpcClientRetry(t *testing.T) { } } +func TestIsAlreadyKnownError(t *testing.T) { + for _, testCase := range []struct { + input string + expected bool + }{ + {"already known", true}, + {"insufficient balance", false}, + {"foo already known\nbar", true}, + {"replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued", true}, + {"replacement transaction underpriced: new tx gas fee cap 1234 \u003c= 5678 queued", false}, + {"foo replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued bar", true}, + } { + got := IsAlreadyKnownError(errors.New(testCase.input)) + if got != testCase.expected { + t.Errorf("IsAlreadyKnownError(%q) = %v expected %v", testCase.input, got, testCase.expected) + } + } +} + func Require(t *testing.T, err error, printables ...interface{}) { t.Helper() testhelpers.RequireImpl(t, err, printables...)