diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 036bf46538..5282510e87 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -25,3 +25,6 @@ jobs: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} restore-keys: ${{ runner.os }}-buildx- + + - name: Startup Nitro testnode + run: ./scripts/startup-testnode.bash diff --git a/Dockerfile b/Dockerfile index de054843cc..947d6b5a47 100644 --- a/Dockerfile +++ b/Dockerfile @@ -164,6 +164,7 @@ RUN ./download-machine.sh consensus-v10.2 0x0754e09320c381566cc0449904c377a52bd3 RUN ./download-machine.sh consensus-v10.3 0xf559b6d4fa869472dabce70fe1c15221bdda837533dfd891916836975b434dec RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 +RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 FROM golang:1.20-bookworm as node-builder WORKDIR /workspace diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e3af0b2afb..14d5affa08 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -41,6 +41,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -73,22 +74,24 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - bridgeAddr common.Address - gasRefunderAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - messagesPerBatch *arbmath.MovingAverage[uint64] + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + arbOSVersionGetter execution.FullExecutionClient + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + messagesPerBatch *arbmath.MovingAverage[uint64] + non4844BatchCount int // Count of consecutive non-4844 batches posted // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -136,7 +139,7 @@ type BatchPosterConfig struct { RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` - ForcePost4844Blobs bool `koanf:"force-post-4844-blobs" reload:"hot"` + IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -186,7 +189,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") - f.Bool(prefix+".force-post-4844-blobs", DefaultBatchPosterConfig.ForcePost4844Blobs, "if the parent chain supports 4844 blobs and post-4844-blobs is true, post 4844 blobs even if it's not price efficient") + f.Bool(prefix+".ignore-blob-price", DefaultBatchPosterConfig.IgnoreBlobPrice, "if the parent chain supports 4844 blobs and ignore-blob-price is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -202,7 +205,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? - Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, + Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 1000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -212,7 +215,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 50_000, Post4844Blobs: false, - ForcePost4844Blobs: false, + IgnoreBlobPrice: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -242,7 +245,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 10_000, Post4844Blobs: true, - ForcePost4844Blobs: false, + IgnoreBlobPrice: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -255,6 +258,7 @@ type BatchPosterOpts struct { L1Reader *headerreader.HeaderReader Inbox *InboxTracker Streamer *TransactionStreamer + VersionGetter execution.FullExecutionClient SyncMonitor *SyncMonitor Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses @@ -293,19 +297,20 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e return nil, err } b := &BatchPoster{ - l1Reader: opts.L1Reader, - inbox: opts.Inbox, - streamer: opts.Streamer, - syncMonitor: opts.SyncMonitor, - config: opts.Config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: opts.DeployInfo.SequencerInbox, - gasRefunderAddr: opts.Config().gasRefunder, - bridgeAddr: opts.DeployInfo.Bridge, - daWriter: opts.DAWriter, - redisLock: redisLock, + l1Reader: opts.L1Reader, + inbox: opts.Inbox, + streamer: opts.Streamer, + arbOSVersionGetter: opts.VersionGetter, + syncMonitor: opts.SyncMonitor, + config: opts.Config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: opts.DeployInfo.SequencerInbox, + gasRefunderAddr: opts.Config().gasRefunder, + bridgeAddr: opts.DeployInfo.Bridge, + daWriter: opts.DAWriter, + redisLock: redisLock, } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) if err != nil { @@ -947,7 +952,6 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if dbBatchCount > batchPosition.NextSeqNum { return false, fmt.Errorf("attempting to post batch %v, but the local inbox tracker database already has %v batches", batchPosition.NextSeqNum, dbBatchCount) } - if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { latestHeader, err := b.l1Reader.LastHeader(ctx) if err != nil { @@ -956,17 +960,34 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) var use4844 bool config := b.config() if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - if config.ForcePost4844Blobs { - use4844 = true - } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) + if err != nil { + return false, err + } + if arbOSVersion >= 20 { + if config.IgnoreBlobPrice { + use4844 = true + } else { + backlog := atomic.LoadUint64(&b.backlog) + // Logic to prevent switching from non-4844 batches to 4844 batches too often, + // so that blocks can be filled efficiently. The geth txpool rejects txs for + // accounts that already have the other type of txs in the pool with + // "address already reserved". This logic makes sure that, if there is a backlog, + // that enough non-4844 batches have been posted to fill a block before switching. + if backlog == 0 || + b.non4844BatchCount == 0 || + b.non4844BatchCount > 16 { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + } } } + b.building = &buildingBatch{ segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, @@ -1198,9 +1219,15 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "totalSegments", len(b.building.segments.rawSegments), "numBlobs", len(kzgBlobs), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) + if b.building.use4844 { + b.non4844BatchCount = 0 + } else { + b.non4844BatchCount++ + } unpostedMessages := msgCount - b.building.msgCount messagesPerBatch := b.messagesPerBatch.Average() if messagesPerBatch == 0 { @@ -1340,3 +1367,56 @@ func (b *BatchPoster) StopAndWait() { b.dataPoster.StopAndWait() b.redisLock.StopAndWait() } + +type BoolRing struct { + buffer []bool + bufferPosition int +} + +func NewBoolRing(size int) *BoolRing { + return &BoolRing{ + buffer: make([]bool, 0, size), + } +} + +func (b *BoolRing) Update(value bool) { + period := cap(b.buffer) + if period == 0 { + return + } + if len(b.buffer) < period { + b.buffer = append(b.buffer, value) + } else { + b.buffer[b.bufferPosition] = value + } + b.bufferPosition = (b.bufferPosition + 1) % period +} + +func (b *BoolRing) Empty() bool { + return len(b.buffer) == 0 +} + +// Peek returns the most recently inserted value. +// Assumes not empty, check Empty() first +func (b *BoolRing) Peek() bool { + lastPosition := b.bufferPosition - 1 + if lastPosition < 0 { + // This is the case where we have wrapped around, since Peek() shouldn't + // be called without checking Empty(), so we can just use capactity. + lastPosition = cap(b.buffer) - 1 + } + return b.buffer[lastPosition] +} + +// All returns true if the BoolRing is full and all values equal value. +func (b *BoolRing) All(value bool) bool { + if len(b.buffer) < cap(b.buffer) { + return false + } + for _, v := range b.buffer { + if v != value { + return false + } + } + return true +} diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1415f78140..495e9eb0e9 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -58,17 +58,18 @@ import ( // DataPoster must be RLP serializable and deserializable type DataPoster struct { stopwaiter.StopWaiter - headerReader *headerreader.HeaderReader - client arbutil.L1Interface - auth *bind.TransactOpts - signer signerFn - config ConfigFetcher - usingNoOpStorage bool - replacementTimes []time.Duration - metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) - extraBacklog func() uint64 - parentChainID *big.Int - parentChainID256 *uint256.Int + headerReader *headerreader.HeaderReader + client arbutil.L1Interface + auth *bind.TransactOpts + signer signerFn + config ConfigFetcher + usingNoOpStorage bool + replacementTimes []time.Duration + blobTxReplacementTimes []time.Duration + metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) + extraBacklog func() uint64 + parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -129,6 +130,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro if err != nil { return nil, err } + blobTxReplacementTimes, err := parseReplacementTimes(cfg.BlobTxReplacementTimes) + if err != nil { + return nil, err + } useNoOpStorage := cfg.UseNoOpStorage if opts.HeaderReader.IsParentChainArbitrum() && !cfg.UseNoOpStorage { useNoOpStorage = true @@ -172,15 +177,16 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro signer: func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return opts.Auth.Signer(addr, tx) }, - config: opts.Config, - usingNoOpStorage: useNoOpStorage, - replacementTimes: replacementTimes, - metadataRetriever: opts.MetadataRetriever, - queue: queue, - errorCount: make(map[uint64]int), - maxFeeCapExpression: expression, - extraBacklog: opts.ExtraBacklog, - parentChainID: opts.ParentChainID, + config: opts.Config, + usingNoOpStorage: useNoOpStorage, + replacementTimes: replacementTimes, + blobTxReplacementTimes: blobTxReplacementTimes, + metadataRetriever: opts.MetadataRetriever, + queue: queue, + errorCount: make(map[uint64]int), + maxFeeCapExpression: expression, + extraBacklog: opts.ExtraBacklog, + parentChainID: opts.ParentChainID, } var overflow bool dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) @@ -322,14 +328,15 @@ func (p *DataPoster) MaxMempoolTransactions() uint64 { if p.usingNoOpStorage { return 1 } - return p.config().MaxMempoolTransactions + config := p.config() + return arbmath.MinInt(config.MaxMempoolTransactions, config.MaxMempoolWeight) } var ErrExceedsMaxMempoolSize = errors.New("posting this transaction will exceed max mempool size") // Does basic check whether posting transaction with specified nonce would // result in exceeding maximum queue length or maximum transactions in mempool. -func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) error { +func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thisWeight uint64) error { cfg := p.config() // If the queue has reached configured max size, don't post a transaction. if cfg.MaxQueuedTransactions > 0 { @@ -352,6 +359,43 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) } } + // Check that posting a new transaction won't exceed maximum pending + // weight in mempool. + if cfg.MaxMempoolWeight > 0 { + unconfirmedNonce, err := p.client.NonceAt(ctx, p.Sender(), nil) + if err != nil { + return fmt.Errorf("getting nonce of a dataposter sender: %w", err) + } + if unconfirmedNonce > nextNonce { + return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce) + } + + var confirmedWeight uint64 + if unconfirmedNonce > 0 { + confirmedMeta, err := p.queue.Get(ctx, unconfirmedNonce-1) + if err != nil { + return err + } + if confirmedMeta != nil { + confirmedWeight = confirmedMeta.CumulativeWeight() + } + } + previousTxMeta, err := p.queue.FetchLast(ctx) + if err != nil { + return err + } + var previousTxCumulativeWeight uint64 + if previousTxMeta != nil { + previousTxCumulativeWeight = previousTxMeta.CumulativeWeight() + } + previousTxCumulativeWeight = arbmath.MaxInt(previousTxCumulativeWeight, confirmedWeight) + newCumulativeWeight := previousTxCumulativeWeight + thisWeight + + weightDiff := arbmath.MinInt(newCumulativeWeight-confirmedWeight, (nextNonce-unconfirmedNonce)*params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + if weightDiff > cfg.MaxMempoolWeight { + return fmt.Errorf("%w: transaction nonce: %d, transaction cumulative weight: %d, unconfirmed nonce: %d, confirmed weight: %d, new mempool weight: %d, max mempool weight: %d", ErrExceedsMaxMempoolSize, nextNonce, newCumulativeWeight, unconfirmedNonce, confirmedWeight, weightDiff, cfg.MaxMempoolTransactions) + } + } return nil } @@ -360,41 +404,41 @@ func (p *DataPoster) waitForL1Finality() bool { } // Requires the caller hold the mutex. -// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, the cumulative weight, and an error if present. // Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. -func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context, thisWeight uint64) (uint64, []byte, bool, uint64, error) { // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, nil, false, err + return 0, nil, false, 0, err } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) + return 0, nil, false, 0, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.FullTx.Nonce() + 1 - if err := p.canPostWithNonce(ctx, nextNonce); err != nil { - return 0, nil, false, err + if err := p.canPostWithNonce(ctx, nextNonce, thisWeight); err != nil { + return 0, nil, false, 0, err } - return nextNonce, lastQueueItem.Meta, true, nil + return nextNonce, lastQueueItem.Meta, true, lastQueueItem.CumulativeWeight(), nil } if err := p.updateNonce(ctx); err != nil { if !p.queue.IsPersistent() && p.waitForL1Finality() { - return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + return 0, nil, false, 0, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) nonce, err := p.client.NonceAt(ctx, p.Sender(), nonceQueryBlock) if err != nil { - return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, 0, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - return p.nonce, nil, false, nil + return p.nonce, nil, false, p.nonce, nil } // GetNextNonceAndMeta retrieves generates next nonce, validates that a @@ -403,7 +447,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { p.mutex.Lock() defer p.mutex.Unlock() - nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + nonce, meta, hasMeta, _, err := p.getNextNonceAndMaybeMeta(ctx, 1) if err != nil { return 0, nil, err } @@ -413,7 +457,8 @@ func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, e return nonce, meta, err } -const minRbfIncrease = arbmath.OneInBips * 11 / 10 +const minNonBlobRbfIncrease = arbmath.OneInBips * 11 / 10 +const minBlobRbfIncrease = arbmath.OneInBips * 2 // evalMaxFeeCapExpr uses MaxFeeCapFormula from config to calculate the expression's result by plugging in appropriate parameter values // backlogOfBatches should already include extraBacklog @@ -452,7 +497,7 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs uint64, lastTx *types.Transaction, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() latestHeader, err := p.headerReader.LastHeader(ctx) @@ -462,10 +507,9 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if latestHeader.BaseFee == nil { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } - newBlobFeeCap := big.NewInt(0) + currentBlobFee := big.NewInt(0) if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) } else if numBlobs > 0 { return nil, nil, nil, fmt.Errorf( "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ @@ -478,106 +522,163 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if err != nil { return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) - newTipCap, err := p.client.SuggestGasTipCap(ctx) + suggestedTip, err := p.client.SuggestGasTipCap(ctx) if err != nil { return nil, nil, nil, err } - newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) - newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) - - hugeTipIncrease := false - if lastTipCap != nil { - newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTipCap, minRbfIncrease)) - // hugeTipIncrease is true if the new tip cap is at least 10x the last tip cap - hugeTipIncrease = lastTipCap.Sign() == 0 || arbmath.BigDiv(newTipCap, lastTipCap).Cmp(big.NewInt(10)) >= 0 + minTipCapGwei, maxTipCapGwei, minRbfIncrease := config.MinTipCapGwei, config.MaxTipCapGwei, minNonBlobRbfIncrease + if numBlobs > 0 { + minTipCapGwei, maxTipCapGwei, minRbfIncrease = config.MinBlobTxTipCapGwei, config.MaxBlobTxTipCapGwei, minBlobRbfIncrease } + newTipCap := suggestedTip + newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(minTipCapGwei*params.GWei)) + newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(maxTipCapGwei*params.GWei)) - newFeeCap.Add(newFeeCap, newTipCap) - if lastFeeCap != nil && hugeTipIncrease { - log.Warn("data poster recommending huge tip increase", "lastTipCap", lastTipCap, "newTipCap", newTipCap) - // If we're trying to drastically increase the tip, make sure we increase the fee cap by minRbfIncrease. - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) - } - - // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease - // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) - + // Compute the max fee with normalized gas so that blob txs aren't priced differently. + // Later, split the total cost bid into blob and non-blob fee caps. elapsed := time.Since(dataCreatedAt) - maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) + maxNormalizedFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { return nil, nil, nil, err } - if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { - log.Warn( - "reducing proposed fee cap to current maximum", - "proposedFeeCap", newFeeCap, - "maxFeeCap", maxFeeCap, - "elapsed", elapsed, - ) - newFeeCap = maxFeeCap - } + normalizedGas := gasLimit + numBlobs*blobs.BlobEncodableData*params.TxDataNonZeroGasEIP2028 + targetMaxCost := arbmath.BigMulByUint(maxNormalizedFeeCap, normalizedGas) - // TODO: also have an expression limiting the max blob fee cap + maxMempoolWeight := arbmath.MinInt(config.MaxMempoolWeight, config.MaxMempoolTransactions) latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) + weight := arbmath.MaxInt(1, numBlobs) + weightRemaining := weight + if config.AllocateMempoolBalance && !p.usingNoOpStorage { - // We split the transactions into three groups: - // - The first transaction gets 1/2 of the balance. - // - The first half of transactions get 1/3 of the balance split among them. - // - The remaining transactions get the remaining 1/6 of the balance split among them. + // We split the transaction weight into three groups: + // - The first weight point gets 1/2 of the balance. + // - The first half of the weight gets 1/3 of the balance split among them. + // - The remaining weight get the remaining 1/6 of the balance split among them. // This helps ensure batch posting is reliable under a variety of fee conditions. // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. - balanceForTx.Div(balanceForTx, common.Big2) - if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + balancePerWeight := new(big.Int).Div(balanceForTx, common.Big2) + balanceForTx = big.NewInt(0) + if nonce == softConfNonce || maxMempoolWeight == 1 { + balanceForTx.Add(balanceForTx, balancePerWeight) + weightRemaining -= 1 + } + if weightRemaining > 0 { // Compared to dividing the remaining transactions by balance equally, // the first half of transactions should get a 4/3 weight, // and the remaining half should get a 2/3 weight. // This makes sure the average weight is 1, and the first half of transactions // have twice the weight of the second half of transactions. // The +1 and -1 here are to account for the first transaction being handled separately. - if nonce > softConfNonce && nonce < softConfNonce+1+(config.MaxMempoolTransactions-1)/2 { - balanceForTx.Mul(balanceForTx, big4) + if nonce > softConfNonce && nonce < softConfNonce+1+(maxMempoolWeight-1)/2 { + balancePerWeight.Mul(balancePerWeight, big4) } else { - balanceForTx.Mul(balanceForTx, common.Big2) + balancePerWeight.Mul(balancePerWeight, common.Big2) } - balanceForTx.Div(balanceForTx, common.Big3) + balancePerWeight.Div(balancePerWeight, common.Big3) // After weighting, split the balance between each of the transactions // other than the first tx which already got half. // balanceForTx /= config.MaxMempoolTransactions-1 - balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + balancePerWeight.Div(balancePerWeight, arbmath.UintToBig(maxMempoolWeight-1)) + balanceForTx.Add(balanceForTx, arbmath.BigMulByUint(balancePerWeight, weight)) } } - // TODO: take into account blob costs - balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) - if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { + + if arbmath.BigGreaterThan(targetMaxCost, balanceForTx) { log.Warn( "lack of L1 balance prevents posting transaction with desired fee cap", "balance", latestBalance, - "maxTransactions", config.MaxMempoolTransactions, + "weight", weight, + "maxMempoolWeight", maxMempoolWeight, "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, - "desiredFeeCap", newFeeCap, - "balanceFeeCap", balanceFeeCap, + "targetMaxCost", targetMaxCost, "nonce", nonce, "softConfNonce", softConfNonce, ) - newFeeCap = balanceFeeCap - } - - if arbmath.BigGreaterThan(newTipCap, newFeeCap) { - log.Warn( - "reducing new tip cap to new fee cap", + targetMaxCost = balanceForTx + } + + if lastTx != nil { + // Replace by fee rules require that the tip cap is increased + newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTx.GasTipCap(), minRbfIncrease)) + } + + // Divide the targetMaxCost into blob and non-blob costs. + currentNonBlobFee := arbmath.BigAdd(latestHeader.BaseFee, newTipCap) + blobGasUsed := params.BlobTxBlobGasPerBlob * numBlobs + currentBlobCost := arbmath.BigMulByUint(currentBlobFee, blobGasUsed) + currentNonBlobCost := arbmath.BigMulByUint(currentNonBlobFee, gasLimit) + newBlobFeeCap := arbmath.BigMul(targetMaxCost, currentBlobFee) + newBlobFeeCap.Div(newBlobFeeCap, arbmath.BigAdd(currentBlobCost, currentNonBlobCost)) + if lastTx != nil && lastTx.BlobGasFeeCap() != nil { + newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) + } + targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) + targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) + newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) + if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + // Increase the non-blob fee cap to the minimum rbf increase + newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) + newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) + // Increasing the non-blob fee cap requires lowering the blob fee cap to compensate + baseFeeCostIncrease := arbmath.BigSub(newNonBlobCost, targetNonBlobCost) + newBlobCost := arbmath.BigSub(targetBlobCost, baseFeeCostIncrease) + newBlobFeeCap = arbmath.BigDivByUint(newBlobCost, blobGasUsed) + } + + if arbmath.BigGreaterThan(newTipCap, newBaseFeeCap) { + log.Info( + "reducing new tip cap to new basefee cap", "proposedTipCap", newTipCap, - "newFeeCap", newFeeCap, + "newBasefeeCap", newBaseFeeCap, ) - newTipCap = new(big.Int).Set(newFeeCap) + newTipCap = new(big.Int).Set(newBaseFeeCap) + } + + logFields := []any{ + "targetMaxCost", targetMaxCost, + "elapsed", elapsed, + "dataPosterBacklog", dataPosterBacklog, + "nonce", nonce, + "isReplacing", lastTx != nil, + "balanceForTx", balanceForTx, + "currentBaseFee", latestHeader.BaseFee, + "newBasefeeCap", newBaseFeeCap, + "suggestedTip", suggestedTip, + "newTipCap", newTipCap, + "currentBlobFee", currentBlobFee, + "newBlobFeeCap", newBlobFeeCap, + } + + log.Debug("calculated data poster fee and tip caps", logFields...) + + if newBaseFeeCap.Sign() < 0 || newTipCap.Sign() < 0 || newBlobFeeCap.Sign() < 0 { + msg := "can't meet data poster fee cap obligations with current target max cost" + log.Info(msg, logFields...) + if lastTx != nil { + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil + } else { + return nil, nil, nil, errors.New(msg) + } + } + + if lastTx != nil && (arbmath.BigLessThan(newBaseFeeCap, currentNonBlobFee) || (numBlobs > 0 && arbmath.BigLessThan(newBlobFeeCap, currentBlobFee))) { + // Make sure our replace by fee can meet the current parent chain fee demands. + // Without this check, we'd blindly increase each fee component by the min rbf amount each time, + // without looking at which component(s) actually need increased. + // E.g. instead of 2x basefee and 2x blobfee, we might actually want to 4x basefee and 2x blobfee. + // This check lets us hold off on the rbf until we are actually meet the current fee requirements, + // which lets us move in a particular direction (biasing towards either basefee or blobfee). + log.Info("can't meet current parent chain fees with current target max cost", logFields...) + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } - return newFeeCap, newTipCap, newBlobFeeCap, nil + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { @@ -588,7 +689,11 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim p.mutex.Lock() defer p.mutex.Unlock() - expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) + var weight uint64 = 1 + if len(kzgBlobs) > 0 { + weight = uint64(len(kzgBlobs)) + } + expectedNonce, _, _, lastCumulativeWeight, err := p.getNextNonceAndMaybeMeta(ctx, weight) if err != nil { return nil, err } @@ -601,14 +706,16 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, uint64(len(kzgBlobs)), nil, dataCreatedAt, 0) if err != nil { return nil, err } var deprecatedData types.DynamicFeeTx var inner types.TxData + replacementTimes := p.replacementTimes if len(kzgBlobs) > 0 { + replacementTimes = p.blobTxReplacementTimes value256, overflow := uint256.FromBig(value) if overflow { return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) @@ -662,13 +769,15 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } + cumulativeWeight := lastCumulativeWeight + weight queuedTx := storage.QueuedTransaction{ - DeprecatedData: deprecatedData, - FullTx: fullTx, - Meta: meta, - Sent: false, - Created: dataCreatedAt, - NextReplacement: time.Now().Add(p.replacementTimes[0]), + DeprecatedData: deprecatedData, + FullTx: fullTx, + Meta: meta, + Sent: false, + Created: dataCreatedAt, + NextReplacement: time.Now().Add(replacementTimes[0]), + StoredCumulativeWeight: &cumulativeWeight, } return fullTx, p.sendTx(ctx, nil, &queuedTx) } @@ -701,17 +810,44 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { + latestHeader, err := p.client.HeaderByNumber(ctx, nil) + if err != nil { + return err + } + var currentBlobFee *big.Int + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + } + + if arbmath.BigLessThan(newTx.FullTx.GasFeeCap(), latestHeader.BaseFee) { + log.Info( + "submitting transaction with GasFeeCap less than latest basefee", + "txBasefeeCap", newTx.FullTx.GasFeeCap(), + "latestBasefee", latestHeader.BaseFee, + "elapsed", time.Since(newTx.Created), + ) + } + + if newTx.FullTx.BlobGasFeeCap() != nil && currentBlobFee != nil && arbmath.BigLessThan(newTx.FullTx.BlobGasFeeCap(), currentBlobFee) { + log.Info( + "submitting transaction with BlobGasFeeCap less than latest blobfee", + "txBlobGasFeeCap", newTx.FullTx.BlobGasFeeCap(), + "latestBlobFee", currentBlobFee, + "elapsed", time.Since(newTx.Created), + ) + } + if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { - log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) return err } log.Info("DataPoster transaction already known", "err", err, "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash()) } else { - log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) } newerTx := *newTx newerTx.Sent = true @@ -754,16 +890,20 @@ func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *b } // The mutex must be held by the caller. -func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) +func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogWeight uint64) error { + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), uint64(len(prevTx.FullTx.BlobHashes())), prevTx.FullTx, prevTx.Created, backlogWeight) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) + minRbfIncrease := minNonBlobRbfIncrease + if len(prevTx.FullTx.BlobHashes()) > 0 { + minRbfIncrease = minBlobRbfIncrease + } + newTx := *prevTx - // TODO: also look at the blob fee cap - if newFeeCap.Cmp(minNewFeeCap) < 0 { + if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || + (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), @@ -771,13 +911,20 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa "recommendedFeeCap", newFeeCap, "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, + "lastBlobFeeCap", prevTx.FullTx.BlobGasFeeCap(), + "recommendedBlobFeeCap", newBlobFeeCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) return p.sendTx(ctx, prevTx, &newTx) } + replacementTimes := p.replacementTimes + if len(prevTx.FullTx.BlobHashes()) > 0 { + replacementTimes = p.blobTxReplacementTimes + } + elapsed := time.Since(prevTx.Created) - for _, replacement := range p.replacementTimes { + for _, replacement := range replacementTimes { if elapsed >= replacement { continue } @@ -877,7 +1024,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "blobFeeCap", tx.FullTx.BlobGasFeeCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 @@ -899,7 +1046,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Warn("failed to update tx poster nonce", "err", err) } now := time.Now() - nextCheck := now.Add(p.replacementTimes[0]) + nextCheck := now.Add(arbmath.MinInt(p.replacementTimes[0], p.blobTxReplacementTimes[0])) maxTxsToRbf := p.config().MaxMempoolTransactions if maxTxsToRbf == 0 { maxTxsToRbf = 512 @@ -917,12 +1064,23 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Error("Failed to fetch tx queue contents", "err", err) return minWait } - for index, tx := range queueContents { - backlogOfBatches := len(queueContents) - index - 1 + latestQueued, err := p.queue.FetchLast(ctx) + if err != nil { + log.Error("Failed to fetch lastest queued tx", "err", err) + return minWait + } + var latestCumulativeWeight, latestNonce uint64 + if latestQueued != nil { + latestCumulativeWeight = latestQueued.CumulativeWeight() + latestNonce = latestQueued.FullTx.Nonce() + } + for _, tx := range queueContents { replacing := false if now.After(tx.NextReplacement) { replacing = true - err := p.replaceTx(ctx, tx, uint64(backlogOfBatches)) + nonceBacklog := arbmath.SaturatingUSub(latestNonce, tx.FullTx.Nonce()) + weightBacklog := arbmath.SaturatingUSub(latestCumulativeWeight, tx.CumulativeWeight()) + err := p.replaceTx(ctx, tx, arbmath.MaxInt(nonceBacklog, weightBacklog)) p.maybeLogError(err, tx, "failed to replace-by-fee transaction") } if nextCheck.After(tx.NextReplacement) { @@ -957,7 +1115,9 @@ func (p *DataPoster) Start(ctxIn context.Context) { type QueueStorage interface { // Returns at most maxResults items starting from specified index. FetchContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) - // Returns item with the biggest index. + // Returns the item at index, or nil if not found. + Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) + // Returns item with the biggest index, or nil if the queue is empty. FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) // Prunes items up to (excluding) specified index. Prune(ctx context.Context, until uint64) error @@ -970,18 +1130,21 @@ type QueueStorage interface { } type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + BlobTxReplacementTimes string `koanf:"blob-tx-replacement-times"` // This is forcibly disabled if the parent chain is an Arbitrum chain, // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxMempoolWeight uint64 `koanf:"max-mempool-weight" reload:"hot"` MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseDBStorage bool `koanf:"use-db-storage"` @@ -1025,14 +1188,17 @@ type ConfigFetcher func() *DataPosterConfig func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.String(prefix+".blob-tx-replacement-times", defaultDataPosterConfig.BlobTxReplacementTimes, "comma-separated list of durations since first posting a blob transaction to attempt a replace-by-fee") f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Uint64(prefix+".max-mempool-weight", defaultDataPosterConfig.MaxMempoolWeight, "the maximum number of weight (weight = min(1, tx.blobs)) to have queued in the mempool at once (0 = unlimited)") f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".min-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MinBlobTxTipCapGwei, "the minimum tip cap to post EIP-4844 blob carrying transactions at") f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Float64(prefix+".max-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MaxBlobTxTipCapGwei, "the maximum tip cap to post EIP-4844 blob carrying transactions at") f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") @@ -1064,12 +1230,16 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { var DefaultDataPosterConfig = DataPosterConfig{ ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", + BlobTxReplacementTimes: "5m,10m,30m,1h,4h,8h,16h,22h", WaitForL1Finality: true, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: true, @@ -1084,19 +1254,25 @@ var DefaultDataPosterConfig = DataPosterConfig{ var DefaultDataPosterConfigForValidator = func() DataPosterConfig { config := DefaultDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() var TestDataPosterConfig = DataPosterConfig{ ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", + BlobTxReplacementTimes: "1s,10s,30s,5m", RedisSigner: signature.TestSimpleHmacConfig, WaitForL1Finality: false, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: false, @@ -1110,6 +1286,8 @@ var TestDataPosterConfig = DataPosterConfig{ var TestDataPosterConfigForValidator = func() DataPosterConfig { config := TestDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 473bfa2c3b..2cfda5d779 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -58,6 +58,18 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, it.Error() } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + key := idxToKey(index) + value, err := s.db.Get(key) + if err != nil { + if errors.Is(err, leveldb.ErrNotFound) { + return nil, nil + } + return nil, err + } + return s.encDec().Decode(value) +} + func (s *Storage) lastItemIdx(context.Context) ([]byte, error) { return s.db.Get(lastItemIdxKey) } diff --git a/arbnode/dataposter/noop/storage.go b/arbnode/dataposter/noop/storage.go index b3947bcaa0..c90e36b067 100644 --- a/arbnode/dataposter/noop/storage.go +++ b/arbnode/dataposter/noop/storage.go @@ -16,6 +16,10 @@ func (s *Storage) FetchContents(_ context.Context, _, _ uint64) ([]*storage.Queu return nil, nil } +func (s *Storage) Get(_ context.Context, _ uint64) (*storage.QueuedTransaction, error) { + return nil, nil +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { return nil, nil } diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index f2393611b2..8b6dcf65ac 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -78,6 +78,20 @@ func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxRe return items, nil } +func (s *Storage) Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) { + contents, err := s.FetchContents(ctx, index, 1) + if err != nil { + return nil, err + } + if len(contents) == 0 { + return nil, nil + } else if len(contents) == 1 { + return contents[0], nil + } else { + return nil, fmt.Errorf("expected only one return value for Get but got %v", len(contents)) + } +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { query := redis.ZRangeArgs{ Key: s.key, diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index dbd7a3ea5e..69de7564a3 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -45,6 +45,13 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, nil } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + if index >= s.firstNonce+uint64(len(s.queue)) || index < s.firstNonce { + return nil, nil + } + return s.encDec().Decode(s.queue[index-s.firstNonce]) +} + func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) { if len(s.queue) == 0 { return nil, nil diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 9586b9c9a9..8e5a7e1798 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -26,31 +26,42 @@ var ( ) type QueuedTransaction struct { - FullTx *types.Transaction - DeprecatedData types.DynamicFeeTx // FullTx should be used instead - Meta []byte - Sent bool - Created time.Time // may be earlier than the tx was given to the tx poster - NextReplacement time.Time + FullTx *types.Transaction + DeprecatedData types.DynamicFeeTx // FullTx should be used instead + Meta []byte + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time + StoredCumulativeWeight *uint64 +} + +// CumulativeWeight returns a rough estimate of the total number of batches submitted at this point, not guaranteed to be exact +func (t *QueuedTransaction) CumulativeWeight() uint64 { + if t.StoredCumulativeWeight != nil { + return *t.StoredCumulativeWeight + } + return t.FullTx.Nonce() } type queuedTransactionForEncoding struct { - FullTx *types.Transaction - Data types.DynamicFeeTx - Meta []byte - Sent bool - Created RlpTime - NextReplacement RlpTime + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta []byte + Sent bool + Created RlpTime + NextReplacement RlpTime + StoredCumulativeWeight *uint64 `rlp:"optional"` } func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ - FullTx: qt.FullTx, - Data: qt.DeprecatedData, - Meta: qt.Meta, - Sent: qt.Sent, - Created: (RlpTime)(qt.Created), - NextReplacement: (RlpTime)(qt.NextReplacement), + FullTx: qt.FullTx, + Data: qt.DeprecatedData, + Meta: qt.Meta, + Sent: qt.Sent, + Created: (RlpTime)(qt.Created), + NextReplacement: (RlpTime)(qt.NextReplacement), + StoredCumulativeWeight: qt.StoredCumulativeWeight, }) } @@ -65,6 +76,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) qt.NextReplacement = time.Time(qtEnc.NextReplacement) + qt.StoredCumulativeWeight = qtEnc.StoredCumulativeWeight return nil } diff --git a/arbnode/delayed.go b/arbnode/delayed.go index 2a1745c540..c166aa2b90 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -221,10 +221,10 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type msgKey := common.BigToHash(parsedLog.MessageIndex) data, ok := messageData[msgKey] if !ok { - return nil, errors.New("message not found") + return nil, fmt.Errorf("message %v data not found", parsedLog.MessageIndex) } if crypto.Keccak256Hash(data) != parsedLog.MessageDataHash { - return nil, errors.New("found message data with mismatched hash") + return nil, fmt.Errorf("found message %v data with mismatched hash", parsedLog.MessageIndex) } requestId := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index eaf863bffc..f98f93a3eb 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -374,11 +374,11 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } if seqNum != pos { - return errors.New("unexpected delayed sequence number") + return fmt.Errorf("unexpected delayed sequence number %v, expected %v", seqNum, pos) } if nextAcc != message.BeforeInboxAcc { - return errors.New("previous delayed accumulator mismatch") + return fmt.Errorf("previous delayed accumulator mismatch for message %v", seqNum) } nextAcc = message.AfterInboxAcc() @@ -606,8 +606,14 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) + var daProviders []arbstate.DataAvailabilityProvider + if t.das != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) + } + if t.blobReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) + } + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 31c13133d8..c19e02dddc 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -43,6 +43,7 @@ import ( "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -66,10 +67,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 60 * 60 * 24 / 15, - FutureBlocks: 12, - DelaySeconds: 60 * 60 * 24, - FutureSeconds: 60 * 60, + DelayBlocks: big.NewInt(60 * 60 * 24 / 15), + FutureBlocks: big.NewInt(12), + DelaySeconds: big.NewInt(60 * 60 * 24), + FutureSeconds: big.NewInt(60 * 60), }, } } @@ -86,7 +87,6 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -151,7 +151,6 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) - BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -190,6 +189,7 @@ func ConfigDefaultL1Test() *Config { func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.DelayedSequencer.Enable = false @@ -198,13 +198,14 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.BlockValidator = staker.TestBlockValidatorConfig config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false - config.BlockValidator.ValidationServer.URL = "" + config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} return &config } func ConfigDefaultL2Test() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig config.Feed.Input.Verify.Dangerous.AcceptMissing = true @@ -213,7 +214,7 @@ func ConfigDefaultL2Test() *Config { config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false - config.BlockValidator.ValidationServer.URL = "" + config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig return &config @@ -222,16 +223,25 @@ func ConfigDefaultL2Test() *Config { type DangerousConfig struct { NoL1Listener bool `koanf:"no-l1-listener"` NoSequencerCoordinator bool `koanf:"no-sequencer-coordinator"` + DisableBlobReader bool `koanf:"disable-blob-reader"` } var DefaultDangerousConfig = DangerousConfig{ NoL1Listener: false, NoSequencerCoordinator: false, + DisableBlobReader: false, +} + +var TestDangerousConfig = DangerousConfig{ + NoL1Listener: false, + NoSequencerCoordinator: false, + DisableBlobReader: true, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".no-l1-listener", DefaultDangerousConfig.NoL1Listener, "DANGEROUS! disables listening to L1. To be used in test nodes only") f.Bool(prefix+".no-sequencer-coordinator", DefaultDangerousConfig.NoSequencerCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") + f.Bool(prefix+".disable-blob-reader", DefaultDangerousConfig.DisableBlobReader, "DANGEROUS! disables the EIP-4844 blob reader, which is necessary to read batches") } type Node struct { @@ -241,6 +251,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses + BlobReader arbstate.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -359,6 +370,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -462,6 +474,7 @@ func createNodeImpl( L1Reader: nil, TxStreamer: txStreamer, DeployInfo: nil, + BlobReader: blobReader, InboxReader: nil, InboxTracker: nil, DelayedSequencer: nil, @@ -522,14 +535,6 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - var blobReader arbstate.BlobReader - if config.BlobClient.BeaconChainUrl != "" { - blobReader, err = NewBlobClient(config.BlobClient, l1client) - if err != nil { - return nil, err - } - } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err @@ -541,7 +546,7 @@ func createNodeImpl( txStreamer.SetInboxReaders(inboxReader, delayedBridge) var statelessBlockValidator *staker.StatelessBlockValidator - if config.BlockValidator.ValidationServer.URL != "" { + if config.BlockValidator.ValidationServerConfigs[0].URL != "" { statelessBlockValidator, err = staker.NewStatelessBlockValidator( inboxReader, inboxTracker, @@ -661,6 +666,7 @@ func createNodeImpl( L1Reader: l1Reader, Inbox: inboxTracker, Streamer: txStreamer, + VersionGetter: exec, SyncMonitor: syncMonitor, Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, @@ -686,6 +692,7 @@ func createNodeImpl( L1Reader: l1Reader, TxStreamer: txStreamer, DeployInfo: deployInfo, + BlobReader: blobReader, InboxReader: inboxReader, InboxTracker: inboxTracker, DelayedSequencer: delayedSequencer, @@ -725,8 +732,9 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { - currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID) + currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { return nil, err } @@ -775,6 +783,12 @@ func (n *Node) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("error starting exec client: %w", err) } + if n.BlobReader != nil { + err = n.BlobReader.Initialize(ctx) + if err != nil { + return fmt.Errorf("error initializing blob reader: %w", err) + } + } if n.InboxTracker != nil { err = n.InboxTracker.Initialize() if err != nil { @@ -815,12 +829,6 @@ func (n *Node) Start(ctx context.Context) error { if n.SeqCoordinator != nil { n.SeqCoordinator.Start(ctx) } else { - if n.DelayedSequencer != nil { - err := n.DelayedSequencer.ForceSequenceDelayed(ctx) - if err != nil { - return fmt.Errorf("error initially sequencing delayed instructions: %w", err) - } - } n.Execution.Activate() } if n.MaintenanceRunner != nil { diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index b743bf0ef9..edda4e5512 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -45,7 +45,7 @@ func init() { } batchDeliveredID = sequencerBridgeABI.Events["SequencerBatchDelivered"].ID sequencerBatchDataABI = sequencerBridgeABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin0"] } type SequencerInbox struct { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 7fdb61aba2..9e3b90532e 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -318,18 +318,10 @@ func (state *ArbosState) UpgradeArbosVersion( } // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. case 20: - if !chainConfig.DebugMode() { - // This upgrade isn't finalized so we only want to support it for testing - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - nextArbosVersion, - ErrFatalNodeOutOfDate, - ) - } // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) default: - if nextArbosVersion >= 12 && state.arbosVersion < 20 { + if nextArbosVersion >= 12 && nextArbosVersion <= 19 { // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. } else { return fmt.Errorf( diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 46d01b7bb1..f131a53608 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -46,10 +46,18 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +// KnownHeaderBits is all header bits with known meaning to this nitro version +const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte + +// hasBits returns true if `checking` has all `bits` func hasBits(checking byte, bits byte) bool { return (checking & bits) == bits } +func IsL1AuthenticatedMessageHeaderByte(header byte) bool { + return hasBits(header, L1AuthenticatedMessageHeaderFlag) +} + func IsDASMessageHeaderByte(header byte) bool { return hasBits(header, DASMessageHeaderFlag) } @@ -70,6 +78,11 @@ func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } +// IsKnownHeaderByte returns true if the supplied header byte has only known bits +func IsKnownHeaderByte(b uint8) bool { + return b&^KnownHeaderBits == 0 +} + type DataAvailabilityCertificate struct { KeysetHash [32]byte DataHash [32]byte diff --git a/arbstate/inbox.go b/arbstate/inbox.go index fcb1c1ebcb..3105ee92b1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" @@ -45,6 +46,7 @@ type BlobReader interface { batchBlockHash common.Hash, versionedHashes []common.Hash, ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error } type sequencerMessage struct { @@ -61,7 +63,12 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +var ( + ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") + ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") +) + +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -75,47 +82,47 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } payload := data[40:] + // Stage 0: Check if our node is out of date and we don't understand this batch type + // If the parent chain sequencer inbox smart contract authenticated this batch, + // an unknown header byte must mean that this node is out of date, + // because the smart contract understands the header byte and this node doesn't. + if len(payload) > 0 && IsL1AuthenticatedMessageHeaderByte(payload[0]) && !IsKnownHeaderByte(payload[0]) { + return nil, fmt.Errorf("%w: batch has unsupported authenticated header byte 0x%02x", arbosState.ErrFatalNodeOutOfDate, payload[0]) + } + // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { - if dasReader == nil { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else { - var err error - payload, err = RecoverPayloadFromDasBatch(ctx, batchNum, data, dasReader, nil, keysetValidationMode) - if err != nil { - return nil, err - } - if payload == nil { - return parsedMsg, nil + // We try to extract payload from the first occuring valid DA provider in the daProviders list + if len(payload) > 0 { + foundDA := false + var err error + for _, provider := range daProviders { + if provider != nil && provider.IsValidHeaderByte(payload[0]) { + payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + if err != nil { + return nil, err + } + if payload == nil { + return parsedMsg, nil + } + foundDA = true + break } } - } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { - blobHashes := payload[1:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - - if blobReader == nil { - return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") - } - kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err = blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return parsedMsg, nil + if !foundDA { + if IsDASMessageHeaderByte(payload[0]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else if IsBlobHashesHeaderByte(payload[0]) { + return nil, ErrNoBlobReader + } } } + // At this point, `payload` has not been validated by the sequencer inbox at all. + // It's not safe to trust any part of the payload from this point onwards. + // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) @@ -271,6 +278,92 @@ func RecoverPayloadFromDasBatch( return payload, nil } +type DataAvailabilityProvider interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, + ) ([]byte, error) +} + +// NewDAProviderDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { + return &dAProviderForDAS{ + das: das, + } +} + +type dAProviderForDAS struct { + das DataAvailabilityReader +} + +func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *dAProviderForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) +} + +// NewDAProviderBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { + return &dAProviderForBlobReader{ + blobReader: blobReader, + } +} + +type dAProviderForBlobReader struct { + blobReader BlobReader +} + +func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, ErrInvalidBlobDataFormat + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} + type KeysetValidationMode uint8 const KeysetValidate KeysetValidationMode = 0 @@ -280,8 +373,7 @@ const KeysetDontValidate KeysetValidationMode = 2 type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - dasReader DataAvailabilityReader - blobReader BlobReader + daProviders []DataAvailabilityProvider cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -291,12 +383,11 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - dasReader: dasReader, - blobReader: blobReader, + daProviders: daProviders, keysetValidationMode: keysetValidationMode, } } @@ -318,7 +409,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index dcf43fd0da..b34c02534b 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -67,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go new file mode 100644 index 0000000000..42bd1428dc --- /dev/null +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -0,0 +1,174 @@ +package blocksreexecutor + +import ( + "context" + "errors" + "fmt" + "math/rand" + "runtime" + "strings" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/stopwaiter" + flag "github.com/spf13/pflag" +) + +type Config struct { + Enable bool `koanf:"enable"` + Mode string `koanf:"mode"` + StartBlock uint64 `koanf:"start-block"` + EndBlock uint64 `koanf:"end-block"` + Room int `koanf:"room"` + BlocksPerThread uint64 `koanf:"blocks-per-thread"` +} + +func (c *Config) Validate() error { + c.Mode = strings.ToLower(c.Mode) + if c.Enable && c.Mode != "random" && c.Mode != "full" { + return errors.New("invalid mode for blocks re-execution") + } + if c.EndBlock < c.StartBlock { + return errors.New("invalid block range for blocks re-execution") + } + if c.Room == 0 { + return errors.New("room for blocks re-execution cannot be zero") + } + return nil +} + +var DefaultConfig = Config{ + Enable: false, + Mode: "random", + Room: runtime.NumCPU(), + BlocksPerThread: 10000, +} + +var TestConfig = Config{ + Enable: true, + Mode: "full", + Room: runtime.NumCPU(), + BlocksPerThread: 10, +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultConfig.Enable, "enables re-execution of a range of blocks against historic state") + f.String(prefix+".mode", DefaultConfig.Mode, "mode to run the blocks-reexecutor on. Valid modes full and random. full - execute all the blocks in the given range. random - execute a random sample range of blocks with in a given range") + f.Uint64(prefix+".start-block", DefaultConfig.StartBlock, "first block number of the block range for re-execution") + f.Uint64(prefix+".end-block", DefaultConfig.EndBlock, "last block number of the block range for re-execution") + f.Int(prefix+".room", DefaultConfig.Room, "number of threads to parallelize blocks re-execution") + f.Uint64(prefix+".blocks-per-thread", DefaultConfig.BlocksPerThread, "minimum number of blocks to execute per thread. When mode is random this acts as the size of random block range sample") +} + +type BlocksReExecutor struct { + stopwaiter.StopWaiter + config *Config + blockchain *core.BlockChain + stateFor func(header *types.Header) (*state.StateDB, error) + done chan struct{} + fatalErrChan chan error + startBlock uint64 + currentBlock uint64 +} + +func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *BlocksReExecutor { + start := c.StartBlock + end := c.EndBlock + chainStart := blockchain.Config().ArbitrumChainParams.GenesisBlockNum + chainEnd := blockchain.CurrentBlock().Number.Uint64() + if start == 0 && end == 0 { + start = chainStart + end = chainEnd + } + if start < chainStart { + log.Warn("state reexecutor's start block number is lower than genesis, resetting to genesis") + start = chainStart + } + if end > chainEnd { + log.Warn("state reexecutor's end block number is greater than latest, resetting to latest") + end = chainEnd + } + if c.Mode == "random" && end != start { + if c.BlocksPerThread > end-start { + c.BlocksPerThread = end - start + } + start += uint64(rand.Intn(int(end - start - c.BlocksPerThread + 1))) + end = start + c.BlocksPerThread + } + // inclusive of block reexecution [start, end] + if start > 0 { + start-- + } + return &BlocksReExecutor{ + config: c, + blockchain: blockchain, + currentBlock: end, + startBlock: start, + done: make(chan struct{}, c.Room), + fatalErrChan: fatalErrChan, + stateFor: func(header *types.Header) (*state.StateDB, error) { return blockchain.StateAt(header.Root) }, + } +} + +// LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state +func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { + start := arbmath.SaturatingUSub(currentBlock, s.config.BlocksPerThread) + if start < s.startBlock { + start = s.startBlock + } + startState, startHeader, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + if err != nil { + s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) + return s.startBlock + } + start = startHeader.Number.Uint64() + s.LaunchThread(func(ctx context.Context) { + _, err := arbitrum.AdvanceStateUpToBlock(ctx, s.blockchain, startState, s.blockchain.GetHeaderByNumber(currentBlock), startHeader, nil) + if err != nil { + s.fatalErrChan <- fmt.Errorf("blocksReExecutor errored advancing state from block %d to block %d, err: %w", start, currentBlock, err) + } else { + log.Info("Successfully reexecuted blocks against historic state", "stateAt", start, "startBlock", start+1, "endBlock", currentBlock) + } + s.done <- struct{}{} + }) + return start +} + +func (s *BlocksReExecutor) Impl(ctx context.Context) { + var threadsLaunched uint64 + end := s.currentBlock + for i := 0; i < s.config.Room && s.currentBlock > s.startBlock; i++ { + threadsLaunched++ + s.currentBlock = s.LaunchBlocksReExecution(ctx, s.currentBlock) + } + for { + select { + case <-s.done: + if s.currentBlock > s.startBlock { + s.currentBlock = s.LaunchBlocksReExecution(ctx, s.currentBlock) + } else { + threadsLaunched-- + } + + case <-ctx.Done(): + return + } + if threadsLaunched == 0 { + break + } + } + log.Info("BlocksReExecutor successfully completed re-execution of blocks against historic state", "stateAt", s.startBlock, "startBlock", s.startBlock+1, "endBlock", end) +} + +func (s *BlocksReExecutor) Start(ctx context.Context) { + s.StopWaiter.Start(ctx, s) + s.LaunchThread(s.Impl) +} + +func (s *BlocksReExecutor) StopAndWait() { + s.StopWaiter.StopAndWait() +} diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index e9ec2af0c1..531945b4d6 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -7,14 +7,16 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" flag "github.com/spf13/pflag" ) -type L1Config struct { - ID uint64 `koanf:"id"` - Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` - Wallet genericconf.WalletConfig `koanf:"wallet"` +type ParentChainConfig struct { + ID uint64 `koanf:"id"` + Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` + Wallet genericconf.WalletConfig `koanf:"wallet"` + BlobClient headerreader.BlobClientConfig `koanf:"blob-client"` } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ @@ -25,10 +27,11 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ ArgLogLimit: 2048, } -var L1ConfigDefault = L1Config{ +var L1ConfigDefault = ParentChainConfig{ ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, + BlobClient: headerreader.DefaultBlobClientConfig, } var DefaultL1WalletConfig = genericconf.WalletConfig{ @@ -43,13 +46,14 @@ func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) + headerreader.BlobClientAddOptions(prefix+".blob-client", f) } -func (c *L1Config) ResolveDirectoryNames(chain string) { +func (c *ParentChainConfig) ResolveDirectoryNames(chain string) { c.Wallet.ResolveDirectoryNames(chain) } -func (c *L1Config) Validate() error { +func (c *ParentChainConfig) Validate() error { return c.Connection.Validate() } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index afbcddec62..1c8b858106 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "strings" "time" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -41,6 +42,8 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") + batchPostersString := flag.String("batchPosters", "", "the comma separated array of addresses of batch posters. Defaults to sequencer address") + batchPosterManagerAddressString := flag.String("batchPosterManger", "", "the batch poster manger's address. Defaults to owner address") nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum data size of a batch or a cross-chain message (default = 90% of Geth's 128KB tx size limit)") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") @@ -56,6 +59,7 @@ func main() { authorizevalidators := flag.Uint64("authorizevalidators", 0, "Number of validators to preemptively authorize") txTimeout := flag.Duration("txtimeout", 10*time.Minute, "Timeout when waiting for a transaction to be included in a block") prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing") + isUsingFeeToken := flag.Bool("isUsingFeeToken", false, "true if the chain uses custom fee token") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint) @@ -92,15 +96,47 @@ func main() { if !common.IsHexAddress(*sequencerAddressString) && len(*sequencerAddressString) > 0 { panic("specified sequencer address is invalid") } + sequencerAddress := common.HexToAddress(*sequencerAddressString) + if !common.IsHexAddress(*ownerAddressString) { panic("please specify a valid rollup owner address") } + ownerAddress := common.HexToAddress(*ownerAddressString) + if *prod && !common.IsHexAddress(*loserEscrowAddressString) { panic("please specify a valid loser escrow address") } - sequencerAddress := common.HexToAddress(*sequencerAddressString) - ownerAddress := common.HexToAddress(*ownerAddressString) + var batchPosters []common.Address + if len(*batchPostersString) > 0 { + batchPostersArr := strings.Split(*batchPostersString, ",") + for _, address := range batchPostersArr { + if !common.IsHexAddress(address) { + log.Error("invalid address in batch posters array", "address", address) + continue + } + batchPosters = append(batchPosters, common.HexToAddress(address)) + } + if len(batchPosters) != len(batchPostersArr) { + panic("found at least one invalid address in batch posters array") + } + } + if len(batchPosters) == 0 { + log.Info("batch posters array was empty, defaulting to sequencer address") + batchPosters = append(batchPosters, sequencerAddress) + } + + var batchPosterManagerAddress common.Address + if common.IsHexAddress(*batchPosterManagerAddressString) { + batchPosterManagerAddress = common.HexToAddress(*batchPosterManagerAddressString) + } else { + if len(*batchPosterManagerAddressString) > 0 { + panic("please specify a valid batch poster manager address") + } + log.Info("batch poster manager address was empty, defaulting to owner address") + batchPosterManagerAddress = ownerAddress + } + loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString) if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From { panic("cannot specify sequencer address if owner is not deployer") @@ -146,11 +182,13 @@ func main() { ctx, l1Reader, l1TransactionOpts, - sequencerAddress, + batchPosters, + batchPosterManagerAddress, *authorizevalidators, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), nativeToken, maxDataSize, + *isUsingFeeToken, ) if err != nil { flag.Usage() diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f956b4674e..c32d2e6c80 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,7 +42,9 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -328,6 +330,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client + var l1Reader *headerreader.HeaderReader + var blobReader arbstate.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) @@ -350,6 +354,22 @@ func mainImpl() int { if err != nil { log.Crit("error getting rollup addresses", "err", err) } + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) + if err != nil { + log.Crit("failed to get L1 headerreader", "err", err) + } + if !l1Reader.IsParentChainArbitrum() && !nodeConfig.Node.Dangerous.DisableBlobReader { + if nodeConfig.ParentChain.BlobClient.BeaconUrl == "" { + flag.Usage() + log.Crit("a beacon chain RPC URL is required to read batches, but it was not configured (CLI argument: --parent-chain.blob-client.beacon-url [URL])") + } + blobClient, err := headerreader.NewBlobClient(nodeConfig.ParentChain.BlobClient, l1Client) + if err != nil { + log.Crit("failed to initialize blob client", "err", err) + } + blobReader = blobClient + } } if nodeConfig.Node.Staker.OnlyCreateWalletContract { @@ -357,12 +377,10 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) - if err != nil { - log.Crit("failed to get L1 headerreader", "error", err) + if l1Reader == nil { + flag.Usage() + log.Crit("--node.validator.only-create-wallet-contract conflicts with --node.dangerous.no-l1-listener") } - // Just create validator smart wallet if needed then exit deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { @@ -387,7 +405,7 @@ func mainImpl() int { } var sameProcessValidationNodeEnabled bool - if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { + if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServerConfigs[0].URL == "self" || nodeConfig.Node.BlockValidator.ValidationServerConfigs[0].URL == "self-auth") { sameProcessValidationNodeEnabled = true valnode.EnsureValidationExposedViaAuthRPC(&stackConf) } @@ -535,6 +553,7 @@ func mainImpl() int { dataSigner, fatalErrChan, big.NewInt(int64(nodeConfig.ParentChain.ID)), + blobReader, ) if err != nil { log.Error("failed to create node", "err", err) @@ -622,6 +641,11 @@ func mainImpl() int { // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } + if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { + blocksReExecutor := blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) + blocksReExecutor.Start(ctx) + deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) + } sigint := make(chan os.Signal, 1) signal.Notify(sigint, os.Interrupt, syscall.SIGTERM) @@ -657,53 +681,55 @@ func mainImpl() int { } type NodeConfig struct { - Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` - Node arbnode.Config `koanf:"node" reload:"hot"` - Execution gethexec.Config `koanf:"execution" reload:"hot"` - Validation valnode.Config `koanf:"validation" reload:"hot"` - ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` - Chain conf.L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level" reload:"hot"` - LogType string `koanf:"log-type" reload:"hot"` - FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` - Persistent conf.PersistentConfig `koanf:"persistent"` - HTTP genericconf.HTTPConfig `koanf:"http"` - WS genericconf.WSConfig `koanf:"ws"` - IPC genericconf.IPCConfig `koanf:"ipc"` - Auth genericconf.AuthRPCConfig `koanf:"auth"` - GraphQL genericconf.GraphQLConfig `koanf:"graphql"` - P2P genericconf.P2PConfig `koanf:"p2p"` - Metrics bool `koanf:"metrics"` - MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` - PProf bool `koanf:"pprof"` - PprofCfg genericconf.PProf `koanf:"pprof-cfg"` - Init conf.InitConfig `koanf:"init"` - Rpc genericconf.RpcConfig `koanf:"rpc"` + Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` + Node arbnode.Config `koanf:"node" reload:"hot"` + Execution gethexec.Config `koanf:"execution" reload:"hot"` + Validation valnode.Config `koanf:"validation" reload:"hot"` + ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` + LogLevel int `koanf:"log-level" reload:"hot"` + LogType string `koanf:"log-type" reload:"hot"` + FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` + Persistent conf.PersistentConfig `koanf:"persistent"` + HTTP genericconf.HTTPConfig `koanf:"http"` + WS genericconf.WSConfig `koanf:"ws"` + IPC genericconf.IPCConfig `koanf:"ipc"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` + GraphQL genericconf.GraphQLConfig `koanf:"graphql"` + P2P genericconf.P2PConfig `koanf:"p2p"` + Metrics bool `koanf:"metrics"` + MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` + Init conf.InitConfig `koanf:"init"` + Rpc genericconf.RpcConfig `koanf:"rpc"` + BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"` } var NodeConfigDefault = NodeConfig{ - Conf: genericconf.ConfConfigDefault, - Node: arbnode.ConfigDefault, - Execution: gethexec.ConfigDefault, - Validation: valnode.DefaultValidationConfig, - ParentChain: conf.L1ConfigDefault, - Chain: conf.L2ConfigDefault, - LogLevel: int(log.LvlInfo), - LogType: "plaintext", - FileLogging: genericconf.DefaultFileLoggingConfig, - Persistent: conf.PersistentConfigDefault, - HTTP: genericconf.HTTPConfigDefault, - WS: genericconf.WSConfigDefault, - IPC: genericconf.IPCConfigDefault, - Auth: genericconf.AuthRPCConfigDefault, - GraphQL: genericconf.GraphQLConfigDefault, - P2P: genericconf.P2PConfigDefault, - Metrics: false, - MetricsServer: genericconf.MetricsServerConfigDefault, - Init: conf.InitConfigDefault, - Rpc: genericconf.DefaultRpcConfig, - PProf: false, - PprofCfg: genericconf.PProfDefault, + Conf: genericconf.ConfConfigDefault, + Node: arbnode.ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, + LogLevel: int(log.LvlInfo), + LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, + Persistent: conf.PersistentConfigDefault, + HTTP: genericconf.HTTPConfigDefault, + WS: genericconf.WSConfigDefault, + IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, + P2P: genericconf.P2PConfigDefault, + Metrics: false, + MetricsServer: genericconf.MetricsServerConfigDefault, + Init: conf.InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, + PProf: false, + PprofCfg: genericconf.PProfDefault, + BlocksReExecutor: blocksreexecutor.DefaultConfig, } func NodeConfigAddOptions(f *flag.FlagSet) { @@ -730,6 +756,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { conf.InitConfigAddOptions("init", f) genericconf.RpcConfigAddOptions("rpc", f) + blocksreexecutor.ConfigAddOptions("blocks-reexecutor", f) } func (c *NodeConfig) ResolveDirectoryNames() error { @@ -797,6 +824,9 @@ func (c *NodeConfig) Validate() error { if err := c.Execution.Validate(); err != nil { return err } + if err := c.BlocksReExecutor.Validate(); err != nil { + return err + } return c.Persistent.Validate() } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index dd8a0fd1f7..7ab59fc513 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -143,6 +143,10 @@ func (r *BlobPreimageReader) GetBlobs( return blobs, nil } +func (r *BlobPreimageReader) Initialize(ctx context.Context) error { + return nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -206,7 +210,12 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) + var daProviders []arbstate.DataAvailabilityProvider + if dasReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + } + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index 782ab3801b..e963c0e96c 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -16,6 +16,9 @@ type RedisCoordinator struct { // UpdatePriorities updates the priority list of sequencers func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + if len(priorities) == 0 { + return rc.Client.Del(ctx, redisutil.PRIORITIES_KEY).Err() + } prioritiesString := strings.Join(priorities, ",") err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() if err != nil { diff --git a/contracts b/contracts index 9a6bfad236..7c46876077 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 9a6bfad2363322099d399698751551ff044c7a72 +Subproject commit 7c46876077c6353c7ebdf9cd364710d357fa3914 diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 9fa39d1959..2f1fc1fd42 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -5,6 +5,7 @@ package das import ( "context" + "errors" "fmt" "net" "net/http" @@ -44,6 +45,9 @@ func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServ } func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { + if daWriter == nil { + return nil, errors.New("No writer backend was configured for DAS RPC server. Has the BLS signing key been set up (--data-availability.key.key-dir or --data-availability.key.priv-key options)?") + } rpcServer := rpc.NewServer() err := rpcServer.RegisterName("das", &DASRPCServer{ daReader: daReader, diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 91f2e522a7..c79cd80400 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -53,7 +53,7 @@ func init() { } BatchDeliveredID = sequencerInboxABI.Events[sequencerBatchDeliveredEvent].ID sequencerBatchDataABI = sequencerInboxABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin0"] } type SyncToStorageConfig struct { diff --git a/deploy/deploy.go b/deploy/deploy.go index 59760e2c21..5e7755cae3 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -31,7 +31,7 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } -func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (common.Address, error) { +func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, isUsingFeeToken bool) (common.Address, error) { client := l1Reader.Client() /// deploy eth based templates @@ -46,7 +46,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844, isUsingFeeToken) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) @@ -161,8 +161,8 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize) +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, isUsingFeeToken bool) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { + bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize, isUsingFeeToken) if err != nil { return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } @@ -234,12 +234,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, nil } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPosters []common.Address, batchPosterManager common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, isUsingFeeToken bool) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize, isUsingFeeToken) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -251,12 +251,13 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ Config: config, - BatchPoster: batchPoster, Validators: validatorAddrs, MaxDataSize: maxDataSize, NativeToken: nativeToken, DeployFactoriesToL2: false, MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + BatchPosters: batchPosters, + BatchPosterManager: batchPosterManager, } tx, err := rollupCreator.CreateRollup( diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 003159589a..a662de3621 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -101,7 +101,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost resequencing := false defer func() { // if we are resequencing old messages - don't release the lock - // lock will be relesed by thread listening to resequenceChan + // lock will be released by thread listening to resequenceChan if !resequencing { s.createBlocksMutex.Unlock() } @@ -601,6 +601,15 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, return nil } +func (s *ExecutionEngine) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { + block := s.bc.GetBlockByNumber(s.MessageIndexToBlockNumber(messageNum)) + if block == nil { + return 0, fmt.Errorf("couldn't find block for message number %d", messageNum) + } + extra := types.DeserializeHeaderExtraInformation(block.Header()) + return extra.ArbOSFormatVersion, nil +} + func (s *ExecutionEngine) Start(ctx_in context.Context) { s.StopWaiter.Start(ctx_in, s) s.LaunchThread(func(ctx context.Context) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 1ad73febe7..80c2939af6 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -50,6 +50,7 @@ type Config struct { RPC arbitrum.Config `koanf:"rpc"` TxLookupLimit uint64 `koanf:"tx-lookup-limit"` Dangerous DangerousConfig `koanf:"dangerous"` + EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` forwardingTarget string } @@ -84,6 +85,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { CachingConfigAddOptions(prefix+".caching", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") DangerousConfigAddOptions(prefix+".dangerous", f) + f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks") } var ConfigDefault = Config{ @@ -98,6 +100,7 @@ var ConfigDefault = Config{ Caching: DefaultCachingConfig, Dangerous: DefaultDangerousConfig, Forwarder: DefaultNodeForwarderConfig, + EnablePrefetchBlock: true, } func ConfigDefaultNonSequencerTest() *Config { @@ -149,6 +152,9 @@ func CreateExecutionNode( ) (*ExecutionNode, error) { config := configFetcher() execEngine, err := NewExecutionEngine(l2BlockChain) + if config.EnablePrefetchBlock { + execEngine.EnablePrefetchBlock() + } if err != nil { return nil, err } @@ -332,6 +338,9 @@ func (n *ExecutionNode) SequenceDelayedMessage(message *arbostypes.L1IncomingMes func (n *ExecutionNode) ResultAtPos(pos arbutil.MessageIndex) (*execution.MessageResult, error) { return n.ExecEngine.ResultAtPos(pos) } +func (n *ExecutionNode) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { + return n.ExecEngine.ArbOSVersionForMessageNumber(messageNum) +} func (n *ExecutionNode) RecordBlockCreation( ctx context.Context, diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 9bc6f4378d..5db38cbb4d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,7 +66,6 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` - EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` } func (c *SequencerConfig) Validate() error { @@ -98,7 +97,6 @@ var DefaultSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, - EnablePrefetchBlock: false, } var TestSequencerConfig = SequencerConfig{ @@ -114,7 +112,6 @@ var TestSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, - EnablePrefetchBlock: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -130,7 +127,6 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") - f.Bool(prefix+".enable-prefetch-block", DefaultSequencerConfig.EnablePrefetchBlock, "enable prefetching of blocks") } type txQueueItem struct { @@ -328,9 +324,6 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead } s.Pause() execEngine.EnableReorgSequencing() - if config.EnablePrefetchBlock { - execEngine.EnablePrefetchBlock() - } return s, nil } diff --git a/execution/interface.go b/execution/interface.go index 6761011a77..2cbbf550ad 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -69,6 +69,8 @@ type FullExecutionClient interface { // TODO: only used to get safe/finalized block numbers MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 + + ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) } // not implemented in execution, used as input diff --git a/go-ethereum b/go-ethereum index 36cc857932..657dcf6626 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 36cc85793228ad142923402b969fd489c02db2a5 +Subproject commit 657dcf66263e940e86f9e89325c5100899d5ab58 diff --git a/nitro-testnode b/nitro-testnode index aee6ceff9c..3922df9caf 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit aee6ceff9c9d3fb2749da55a7d7842f23d1bfc8e +Subproject commit 3922df9caf7a65dd4168b8158c1244c5fe88780e diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 9b9a31de37..0627ef4c7b 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -4,10 +4,12 @@ package precompiles import ( + "bytes" "errors" "fmt" "math/big" "reflect" + "sort" "strconv" "strings" "unicode" @@ -783,6 +785,9 @@ func (p *Precompile) Get4ByteMethodSignatures() [][4]byte { for sig := range p.methods { ret = append(ret, sig) } + sort.Slice(ret, func(i, j int) bool { + return bytes.Compare(ret[i][:], ret[j][:]) < 0 + }) return ret } diff --git a/scripts/startup-testnode.bash b/scripts/startup-testnode.bash new file mode 100755 index 0000000000..701e7ff59a --- /dev/null +++ b/scripts/startup-testnode.bash @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# The script starts up the test node (with timeout 1 minute), to make sure the +# nitro-testnode script isn't out of sync with flags with nitro. +# This is used in CI, basically as smoke test. + +timeout 60 ./nitro-testnode/test-node.bash --init --dev || exit_status=$? + +if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then + echo "Startup failed." + exit $exit_status +fi + +echo "Startup succeeded." diff --git a/staker/block_validator.go b/staker/block_validator.go index 03d216654a..56cd5307d8 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -5,6 +5,7 @@ package staker import ( "context" + "encoding/json" "errors" "fmt" "runtime" @@ -82,16 +83,18 @@ type BlockValidator struct { } type BlockValidatorConfig struct { - Enable bool `koanf:"enable"` - ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` - ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` - PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` - ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` - CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload - PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload - FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` - Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` - MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` + Enable bool `koanf:"enable"` + ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` + ValidationServerConfigs []rpcclient.ClientConfig `koanf:"validation-server-configs" reload:"hot"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` + PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` + ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` + CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload + PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload + FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` + Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` + MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` + ValidationServerConfigsList string `koanf:"validation-server-configs-list" reload:"hot"` memoryFreeLimit int } @@ -106,7 +109,26 @@ func (c *BlockValidatorConfig) Validate() error { } c.memoryFreeLimit = limit } - return c.ValidationServer.Validate() + if c.ValidationServerConfigs == nil { + if c.ValidationServerConfigsList == "default" { + c.ValidationServerConfigs = []rpcclient.ClientConfig{c.ValidationServer} + } else { + var validationServersConfigs []rpcclient.ClientConfig + if err := json.Unmarshal([]byte(c.ValidationServerConfigsList), &validationServersConfigs); err != nil { + return fmt.Errorf("failed to parse block-validator validation-server-configs-list string: %w", err) + } + c.ValidationServerConfigs = validationServersConfigs + } + } + if len(c.ValidationServerConfigs) == 0 { + return fmt.Errorf("block-validator validation-server-configs is empty, need at least one validation server config") + } + for _, serverConfig := range c.ValidationServerConfigs { + if err := serverConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate one of the block-validator validation-server-configs. url: %s, err: %w", serverConfig.URL, err) + } + } + return nil } type BlockValidatorDangerousConfig struct { @@ -118,6 +140,7 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) + f.String(prefix+".validation-server-configs-list", DefaultBlockValidatorConfig.ValidationServerConfigsList, "array of validation rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds") f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)") f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)") @@ -133,21 +156,23 @@ func BlockValidatorDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { } var DefaultBlockValidatorConfig = BlockValidatorConfig{ - Enable: false, - ValidationServer: rpcclient.DefaultClientConfig, - ValidationPoll: time.Second, - ForwardBlocks: 1024, - PrerecordedBlocks: uint64(2 * runtime.NumCPU()), - CurrentModuleRoot: "current", - PendingUpgradeModuleRoot: "latest", - FailureIsFatal: true, - Dangerous: DefaultBlockValidatorDangerousConfig, - MemoryFreeLimit: "default", + Enable: false, + ValidationServerConfigsList: "default", + ValidationServer: rpcclient.DefaultClientConfig, + ValidationPoll: time.Second, + ForwardBlocks: 1024, + PrerecordedBlocks: uint64(2 * runtime.NumCPU()), + CurrentModuleRoot: "current", + PendingUpgradeModuleRoot: "latest", + FailureIsFatal: true, + Dangerous: DefaultBlockValidatorDangerousConfig, + MemoryFreeLimit: "default", } var TestBlockValidatorConfig = BlockValidatorConfig{ Enable: false, ValidationServer: rpcclient.TestClientConfig, + ValidationServerConfigs: []rpcclient.ClientConfig{rpcclient.TestClientConfig}, ValidationPoll: 100 * time.Millisecond, ForwardBlocks: 128, PrerecordedBlocks: uint64(2 * runtime.NumCPU()), @@ -552,15 +577,21 @@ func (v *BlockValidator) iterativeValidationEntryCreator(ctx context.Context, ig return v.config().ValidationPoll } +func (v *BlockValidator) isMemoryLimitExceeded() bool { + if v.MemoryFreeLimitChecker == nil { + return false + } + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + return exceeded +} + func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, error) { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } v.reorgMutex.RLock() pos := v.recordSent() @@ -591,14 +622,9 @@ func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, erro return true, nil } for pos <= recordUntil { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } validationStatus, found := v.validations.Load(pos) if !found { @@ -667,14 +693,12 @@ func (v *BlockValidator) advanceValidations(ctx context.Context) (*arbutil.Messa defer v.reorgMutex.RUnlock() wasmRoots := v.GetModuleRootsToValidate() - room := 100 // even if there is more room then that it's fine - for _, spawner := range v.validationSpawners { + rooms := make([]int, len(v.validationSpawners)) + currentSpawnerIndex := 0 + for i, spawner := range v.validationSpawners { here := spawner.Room() / len(wasmRoots) - if here <= 0 { - room = 0 - } - if here < room { - room = here + if here > 0 { + rooms[i] = here } } pos := v.validated() - 1 // to reverse the first +1 in the loop @@ -745,18 +769,19 @@ validationsLoop: log.Trace("result validated", "count", v.validated(), "blockHash", v.lastValidGS.BlockHash) continue } - if room == 0 { + for currentSpawnerIndex < len(rooms) { + if rooms[currentSpawnerIndex] > 0 { + break + } + currentSpawnerIndex++ + } + if currentSpawnerIndex == len(rooms) { log.Trace("advanceValidations: no more room", "pos", pos) return nil, nil } - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return nil, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("advanceValidations: aborting due to running low on memory") + return nil, nil } if currentStatus == Prepared { input, err := validationStatus.Entry.ToInput() @@ -772,11 +797,9 @@ validationsLoop: defer validatorPendingValidationsGauge.Dec(1) var runs []validator.ValidationRun for _, moduleRoot := range wasmRoots { - for i, spawner := range v.validationSpawners { - run := spawner.Launch(input, moduleRoot) - log.Trace("advanceValidations: launched", "pos", validationStatus.Entry.Pos, "moduleRoot", moduleRoot, "spawner", i) - runs = append(runs, run) - } + run := v.validationSpawners[currentSpawnerIndex].Launch(input, moduleRoot) + log.Trace("advanceValidations: launched", "pos", validationStatus.Entry.Pos, "moduleRoot", moduleRoot, "spawner", currentSpawnerIndex) + runs = append(runs, run) } validationCtx, cancel := context.WithCancel(ctx) validationStatus.Runs = runs @@ -798,7 +821,10 @@ validationsLoop: } nonBlockingTrigger(v.progressValidationsChan) }) - room-- + rooms[currentSpawnerIndex]-- + if rooms[currentSpawnerIndex] == 0 { + currentSpawnerIndex++ + } } } } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 13b16e42cd..fcd1f247c2 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -226,14 +226,18 @@ func NewStatelessBlockValidator( config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { - valConfFetcher := func() *rpcclient.ClientConfig { return &config().ValidationServer } - valClient := server_api.NewValidationClient(valConfFetcher, stack) + validationSpawners := make([]validator.ValidationSpawner, len(config().ValidationServerConfigs)) + for i, serverConfig := range config().ValidationServerConfigs { + valConfFetcher := func() *rpcclient.ClientConfig { return &serverConfig } + validationSpawners[i] = server_api.NewValidationClient(valConfFetcher, stack) + } + valConfFetcher := func() *rpcclient.ClientConfig { return &config().ValidationServerConfigs[0] } execClient := server_api.NewExecutionClient(valConfFetcher, stack) validator := &StatelessBlockValidator{ config: config(), execSpawner: execClient, recorder: recorder, - validationSpawners: []validator.ValidationSpawner{valClient}, + validationSpawners: validationSpawners, inboxReader: inboxReader, inboxTracker: inbox, streamer: streamer, @@ -306,7 +310,10 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) } for i, blob := range blobs { - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] } } if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index cacbe3cee4..68dea4167f 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -166,6 +166,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { L1Reader: builder.L2.ConsensusNode.L1Reader, Inbox: builder.L2.ConsensusNode.InboxTracker, Streamer: builder.L2.ConsensusNode.TxStreamer, + VersionGetter: builder.L2.ExecNode, SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go new file mode 100644 index 0000000000..c2941ddcc4 --- /dev/null +++ b/system_tests/blocks_reexecutor_test.go @@ -0,0 +1,87 @@ +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode" + blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" + "github.com/offchainlabs/nitro/execution/gethexec" +) + +func TestBlocksReExecutorModes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execConfig := gethexec.ConfigDefaultTest() + Require(t, execConfig.Validate()) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + parentChainID := big.NewInt(1234) + feedErrChan := make(chan error, 10) + node, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) + Require(t, err) + err = node.TxStreamer.AddFakeInitMessage() + Require(t, err) + Require(t, node.Start(ctx)) + client := ClientForStack(t, stack) + + l2info.GenerateAccount("User2") + for i := 0; i < 100; i++ { + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + err := client.SendTransaction(ctx, tx) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want { + Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) + } + } + + success := make(chan struct{}) + + // Reexecute blocks at mode full + go func() { + executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) + executorFull.StopWaiter.Start(ctx, executorFull) + executorFull.Impl(ctx) + executorFull.StopAndWait() + success <- struct{}{} + }() + select { + case err := <-feedErrChan: + t.Errorf("error occurred: %v", err) + if node != nil { + node.StopAndWait() + } + t.FailNow() + case <-success: + } + + // Reexecute blocks at mode random + go func() { + c := &blocksreexecutor.TestConfig + c.Mode = "random" + executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) + executorRandom.StopWaiter.Start(ctx, executorRandom) + executorRandom.Impl(ctx) + executorRandom.StopAndWait() + success <- struct{}{} + }() + select { + case err := <-feedErrChan: + t.Errorf("error occurred: %v", err) + if node != nil { + node.StopAndWait() + } + t.FailNow() + case <-success: + } +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index be782c72fd..0dda408aaa 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -541,15 +541,15 @@ func StaticFetcherFrom[T any](t *testing.T, config *T) func() *T { } func configByValidationNode(t *testing.T, clientConfig *arbnode.Config, valStack *node.Node) { - clientConfig.BlockValidator.ValidationServer.URL = valStack.WSEndpoint() - clientConfig.BlockValidator.ValidationServer.JWTSecret = "" + clientConfig.BlockValidator.ValidationServerConfigs[0].URL = valStack.WSEndpoint() + clientConfig.BlockValidator.ValidationServerConfigs[0].JWTSecret = "" } func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool) { if !nodeConfig.ValidatorRequired() { return } - if nodeConfig.BlockValidator.ValidationServer.URL != "" { + if nodeConfig.BlockValidator.ValidationServerConfigs[0].URL != "" { return } conf := valnode.TestValidationConfig @@ -666,11 +666,13 @@ func DeployOnTestL1( ctx, l1Reader, &l1TransactionOpts, - l1info.GetAddress("Sequencer"), + []common.Address{l1info.GetAddress("Sequencer")}, + l1info.GetAddress("RollupOwner"), 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), nativeToken, maxDataSize, + false, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) @@ -789,7 +791,7 @@ func createTestNodeWithL1( Require(t, err) currentNode, err = arbnode.CreateNode( ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, ) Require(t, err) @@ -825,7 +827,7 @@ func createTestNode( execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337), nil) Require(t, err) // Give the node an init message @@ -925,11 +927,12 @@ func Create2ndNodeWithConfig( AddDefaultValNode(t, ctx, nodeConfig, true) Require(t, execConfig.Validate()) + Require(t, nodeConfig.Validate()) configFetcher := func() *gethexec.Config { return execConfig } currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) err = currentNode.Start(ctx) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 96de52e197..8edd91e1ec 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -141,7 +141,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -188,7 +188,7 @@ func TestDASRekey(t *testing.T) { Require(t, err) l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -321,7 +321,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337)) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 4568e2809a..52a6bb25c4 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -2,12 +2,17 @@ package arbtest import ( "context" + "github.com/ethereum/go-ethereum/eth/tracers" "testing" + "encoding/json" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" ) func TestDebugAPI(t *testing.T) { @@ -35,4 +40,19 @@ func TestDebugAPI(t *testing.T) { err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.PendingBlockNumber, hexutil.Bytes{}, 10, true, true, false) Require(t, err) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) + Require(t, err) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + tx, err := arbSys.WithdrawEth(&auth, common.Address{}) + Require(t, err) + receipt, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + if len(receipt.Logs) != 1 { + Fatal(t, "Unexpected number of logs", len(receipt.Logs)) + } + + var result json.RawMessage + flatCallTracer := "flatCallTracer" + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), &tracers.TraceConfig{Tracer: &flatCallTracer}) + Require(t, err) } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index b8f891e3e7..29b1252de4 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -184,6 +184,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b } func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend arbutil.L1Interface) { + t.Helper() // With SimulatedBeacon running in on-demand block production mode, the // finalized block is considered to be be the nearest multiple of 32 less // than or equal to the block number. @@ -205,10 +206,10 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 10000, - FutureBlocks: 10000, - DelaySeconds: 10000, - FutureSeconds: 10000, + DelayBlocks: big.NewInt(10000), + FutureBlocks: big.NewInt(10000), + DelaySeconds: big.NewInt(10000), + FutureSeconds: big.NewInt(10000), } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, @@ -218,6 +219,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha timeBounds, big.NewInt(117964), reader4844, + false, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -285,7 +287,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) parentChainID := big.NewInt(1337) - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) Require(t, err) err = asserterL2.Start(ctx) Require(t, err) @@ -296,7 +298,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) Require(t, err) err = challengerL2.Start(ctx) Require(t, err) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index e1715dc635..11b68b558b 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -27,7 +27,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) @@ -69,7 +69,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { // Produce a new l1Block so that the batch ends up in a different l1Block than before builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index f5bdca0970..1973587ecb 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -335,7 +335,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Require(t, err) parentChainID := big.NewInt(1337) - node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID) + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -376,7 +376,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID) + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 4e7bd2c7d8..be0ecc590f 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -157,7 +157,10 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { ) Require(t, err, "failed to estimate retryable submission") estimate := tx.Gas() - colors.PrintBlue("estimate: ", estimate) + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if estimate != expectedEstimate { + t.Errorf("estimated retryable ticket at %v gas but expected %v", estimate, expectedEstimate) + } // submit & auto redeem the retryable using the gas estimate usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) @@ -336,6 +339,12 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { receipt, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) + redemptionL2Gas := receipt.GasUsed - receipt.GasUsedForL1 + var maxRedemptionL2Gas uint64 = 1_000_000 + if redemptionL2Gas > maxRedemptionL2Gas { + t.Errorf("manual retryable redemption used %v gas, more than expected max %v gas", redemptionL2Gas, maxRedemptionL2Gas) + } + retryTxId := receipt.Logs[0].Topics[2] // check the receipt for the retry diff --git a/system_tests/ipc_test.go b/system_tests/rpc_test.go similarity index 50% rename from system_tests/ipc_test.go rename to system_tests/rpc_test.go index 511a608e67..357cb8e4c1 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/rpc_test.go @@ -7,8 +7,10 @@ import ( "context" "path/filepath" "testing" + "time" "github.com/ethereum/go-ethereum/ethclient" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) func TestIpcRpc(t *testing.T) { @@ -25,3 +27,23 @@ func TestIpcRpc(t *testing.T) { _, err := ethclient.Dial(ipcPath) Require(t, err) } + +func TestPendingBlockTimeAndNumberAdvance(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, testTimeAndNr, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + time.Sleep(1 * time.Second) + + _, err = testTimeAndNr.IsAdvancing(&auth) + Require(t, err) +} diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index c4dd17ef53..e00bda8e84 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -355,7 +355,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, common.Big0, common.Big0) } Require(t, err) txRes, err := builder.L1.EnsureTxSucceeded(tx) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 28bcbec9b4..2c11435485 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -121,6 +121,9 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func FuzzStateTransition(f *testing.F) { f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { + if len(seqMsg) > 0 && arbstate.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { + return + } chainDb := rawdb.NewMemoryDatabase() chainConfig := params.ArbitrumRollupGoerliTestnetChainConfig() serializedChainConfig, err := json.Marshal(chainConfig) diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go new file mode 100644 index 0000000000..4c3c8661c8 --- /dev/null +++ b/system_tests/unsupported_txtypes_test.go @@ -0,0 +1,133 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +func TestBlobAndInternalTxsReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User") + builder.L2Info.GenerateAccount("User2") + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "User") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + blobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + err = builder.L2.Client.SendTransaction(ctx, blobTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting blob transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + internalTx := types.NewTx(txDataInternal) + err = builder.L2.Client.SendTransaction(ctx, internalTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting arbitrum internal transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } +} +func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + + l1Txs := make([]*types.Transaction, 0, 4) + txAcceptStatus := make(map[common.Hash]bool, 4) + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "Owner") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + delayedBlobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + txAcceptStatus[delayedBlobTx.Hash()] = false + l1TxBlob := WrapL2ForDelayed(t, delayedBlobTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxBlob) + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + delayedInternalTx := types.NewTx(txDataInternal) + txAcceptStatus[delayedInternalTx.Hash()] = false + l1TxInternal := WrapL2ForDelayed(t, delayedInternalTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxInternal) + + delayedTx1 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx1.Hash()] = false + l1tx := WrapL2ForDelayed(t, delayedTx1, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + delayedTx2 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx2.Hash()] = false + l1tx = WrapL2ForDelayed(t, delayedTx2, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + errs := builder.L1.L1Backend.TxPool().Add(l1Txs, true, false) + for _, err := range errs { + Require(t, err) + } + + confirmLatestBlock(ctx, t, builder.L1Info, builder.L1.Client) + for _, tx := range l1Txs { + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + } + + blocknum, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + for i := int64(0); i <= int64(blocknum); i++ { + block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i)) + Require(t, err) + for _, tx := range block.Transactions() { + if _, ok := txAcceptStatus[tx.Hash()]; ok { + txAcceptStatus[tx.Hash()] = true + } + } + } + if !txAcceptStatus[delayedTx1.Hash()] || !txAcceptStatus[delayedTx2.Hash()] { + t.Fatalf("transaction of valid transaction type wasn't accepted as a delayed message") + } + if txAcceptStatus[delayedBlobTx.Hash()] { + t.Fatalf("blob transaction was successfully accepted as a delayed message") + } + if txAcceptStatus[delayedInternalTx.Hash()] { + t.Fatalf("arbitrum internal transaction was successfully accepted as a delayed message") + } +} diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index 1e788df064..83c7a61ec2 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -36,3 +36,10 @@ func UintMulByBips(value uint64, bips Bips) uint64 { func SaturatingCastToBips(value uint64) Bips { return Bips(SaturatingCast(value)) } + +// BigDivToBips returns dividend/divisor as bips, saturating if out of bounds +func BigDivToBips(dividend, divisor *big.Int) Bips { + value := BigMulByInt(dividend, int64(OneInBips)) + value.Div(value, divisor) + return Bips(BigToUintSaturating(value)) +} diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 2852f2b29f..405c776bad 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -29,6 +29,9 @@ func fillBlobBytes(blob []byte, data []byte) []byte { // The number of bits in a BLS scalar that aren't part of a whole byte. const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 +// The number of bytes encodable in a blob with the current encoding scheme. +const BlobEncodableData = 254 * params.BlobTxFieldElementsPerBlob / 8 + func fillBlobBits(blob []byte, data []byte) ([]byte, error) { var acc uint16 accBits := 0 diff --git a/arbnode/blob_reader.go b/util/headerreader/blob_client.go similarity index 58% rename from arbnode/blob_reader.go rename to util/headerreader/blob_client.go index 1424285832..8989a321c7 100644 --- a/arbnode/blob_reader.go +++ b/util/headerreader/blob_client.go @@ -1,15 +1,17 @@ -// Copyright 2023, Offchain Labs, Inc. +// Copyright 2023-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbnode +package headerreader import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" "net/url" + "os" "path" "github.com/ethereum/go-ethereum/common" @@ -24,36 +26,59 @@ import ( ) type BlobClient struct { - ec arbutil.L1Interface - beaconUrl *url.URL - httpClient *http.Client + ec arbutil.L1Interface + beaconUrl *url.URL + httpClient *http.Client + authorization string - // The genesis time time and seconds per slot won't change so only request them once. - cachedGenesisTime uint64 - cachedSecondsPerSlot uint64 + // Filled in in Initialize() + genesisTime uint64 + secondsPerSlot uint64 + + // Directory to save the fetched blobs + blobDirectory string } type BlobClientConfig struct { - BeaconChainUrl string `koanf:"beacon-chain-url"` + BeaconUrl string `koanf:"beacon-url"` + BlobDirectory string `koanf:"blob-directory"` + Authorization string `koanf:"authorization"` } var DefaultBlobClientConfig = BlobClientConfig{ - BeaconChainUrl: "", + BeaconUrl: "", + BlobDirectory: "", + Authorization: "", } func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") + f.String(prefix+".beacon-url", DefaultBlobClientConfig.BeaconUrl, "Beacon Chain RPC URL to use for fetching blobs (normally on port 3500)") + f.String(prefix+".blob-directory", DefaultBlobClientConfig.BlobDirectory, "Full path of the directory to save fetched blobs") + f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters") } func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { - beaconUrl, err := url.Parse(config.BeaconChainUrl) + beaconUrl, err := url.Parse(config.BeaconUrl) if err != nil { return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) } + if config.BlobDirectory != "" { + if _, err = os.Stat(config.BlobDirectory); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(config.BlobDirectory, os.ModePerm); err != nil { + return nil, fmt.Errorf("error creating blob directory: %w", err) + } + } else { + return nil, fmt.Errorf("invalid blob directory path: %w", err) + } + } + } return &BlobClient{ - ec: ec, - beaconUrl: beaconUrl, - httpClient: &http.Client{}, + ec: ec, + beaconUrl: beaconUrl, + authorization: config.Authorization, + httpClient: &http.Client{}, + blobDirectory: config.BlobDirectory, }, nil } @@ -75,6 +100,10 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath return empty, err } + if b.authorization != "" { + req.Header.Set("Authorization", b.authorization) + } + resp, err := b.httpClient.Do(req) if err != nil { return empty, err @@ -100,15 +129,10 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio if err != nil { return nil, err } - genesisTime, err := b.genesisTime(ctx) - if err != nil { - return nil, err - } - secondsPerSlot, err := b.secondsPerSlot(ctx) - if err != nil { - return nil, err + if b.secondsPerSlot == 0 { + return nil, errors.New("BlobClient hasn't been initialized") } - slot := (header.Time - genesisTime) / secondsPerSlot + slot := (header.Time - b.genesisTime) / b.secondsPerSlot return b.blobSidecars(ctx, slot, versionedHashes) } @@ -124,10 +148,14 @@ type blobResponseItem struct { } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { - response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) } + var response []blobResponseItem + if err := json.Unmarshal(rawData, &response); err != nil { + return nil, fmt.Errorf("error unmarshalling raw data into array of blobResponseItem in blobSidecars: %w", err) + } if len(response) < len(versionedHashes) { return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) @@ -178,39 +206,57 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } } - return output, nil -} + if b.blobDirectory != "" { + if err := saveBlobDataToDisk(rawData, slot, b.blobDirectory); err != nil { + return nil, err + } + } -type genesisResponse struct { - GenesisTime jsonapi.Uint64String `json:"genesis_time"` - // don't currently care about other fields, add if needed + return output, nil } -func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { - if b.cachedGenesisTime > 0 { - return b.cachedGenesisTime, nil +func saveBlobDataToDisk(rawData json.RawMessage, slot uint64, blobDirectory string) error { + filePath := path.Join(blobDirectory, fmt.Sprint(slot)) + file, err := os.Create(filePath) + if err != nil { + return fmt.Errorf("could not create file to store fetched blobs") } - gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + full := fullResult[json.RawMessage]{Data: rawData} + fullbytes, err := json.Marshal(full) if err != nil { - return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) + return fmt.Errorf("unable to marshal data into bytes while attempting to store fetched blobs") } - b.cachedGenesisTime = uint64(gr.GenesisTime) - return b.cachedGenesisTime, nil + if _, err := file.Write(fullbytes); err != nil { + return fmt.Errorf("failed to write blob data to disk") + } + file.Close() + return nil +} + +type genesisResponse struct { + GenesisTime jsonapi.Uint64String `json:"genesis_time"` + // don't currently care about other fields, add if needed } type getSpecResponse struct { SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` } -func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { - if b.cachedSecondsPerSlot > 0 { - return b.cachedSecondsPerSlot, nil +func (b *BlobClient) Initialize(ctx context.Context) error { + genesis, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + if err != nil { + return fmt.Errorf("error calling beacon client to get genesisTime: %w", err) } - gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + b.genesisTime = uint64(genesis.GenesisTime) + + spec, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") if err != nil { - return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + return fmt.Errorf("error calling beacon client to get secondsPerSlot: %w", err) + } + if spec.SecondsPerSlot == 0 { + return errors.New("got SECONDS_PER_SLOT of zero from beacon client") } - b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) - return b.cachedSecondsPerSlot, nil + b.secondsPerSlot = uint64(spec.SecondsPerSlot) + return nil } diff --git a/util/headerreader/blob_client_test.go b/util/headerreader/blob_client_test.go new file mode 100644 index 0000000000..9735899daa --- /dev/null +++ b/util/headerreader/blob_client_test.go @@ -0,0 +1,69 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package headerreader + +import ( + "encoding/json" + "io" + "os" + "path" + "reflect" + "testing" + + "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/r3labs/diff/v3" +) + +func TestSaveBlobsToDisk(t *testing.T) { + response := []blobResponseItem{{ + BlockRoot: "a", + Index: 0, + Slot: 5, + BlockParentRoot: "a0", + ProposerIndex: 9, + Blob: []byte{1}, + KzgCommitment: []byte{1}, + KzgProof: []byte{1}, + }, { + BlockRoot: "a", + Index: 1, + Slot: 5, + BlockParentRoot: "a0", + ProposerIndex: 10, + Blob: []byte{2}, + KzgCommitment: []byte{2}, + KzgProof: []byte{2}, + }} + testDir := t.TempDir() + rawData, err := json.Marshal(response) + Require(t, err) + err = saveBlobDataToDisk(rawData, 5, testDir) + Require(t, err) + + filePath := path.Join(testDir, "5") + file, err := os.Open(filePath) + Require(t, err) + defer file.Close() + + data, err := io.ReadAll(file) + Require(t, err) + var full fullResult[[]blobResponseItem] + err = json.Unmarshal(data, &full) + Require(t, err) + if !reflect.DeepEqual(full.Data, response) { + changelog, err := diff.Diff(full.Data, response) + Require(t, err) + Fail(t, "blob data saved to disk does not match actual blob data", changelog) + } +} + +func Require(t *testing.T, err error, printables ...interface{}) { + t.Helper() + testhelpers.RequireImpl(t, err, printables...) +} + +func Fail(t *testing.T, printables ...interface{}) { + t.Helper() + testhelpers.FailImpl(t, printables...) +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 6af141c668..59e3b0e0f9 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -79,10 +79,10 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, // GetPriorities returns the priority list of sequencers func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if errors.Is(err, redis.Nil) { + return []string{}, nil + } if err != nil { - if errors.Is(err, redis.Nil) { - err = errors.New("sequencer priorities unset") - } return []string{}, err } prioritiesList := strings.Split(prioritiesString, ",") diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index dbc145d490..275acdb283 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -21,14 +21,14 @@ import ( ) type ClientConfig struct { - URL string `koanf:"url"` - JWTSecret string `koanf:"jwtsecret"` - Timeout time.Duration `koanf:"timeout" reload:"hot"` - Retries uint `koanf:"retries" reload:"hot"` - ConnectionWait time.Duration `koanf:"connection-wait"` - ArgLogLimit uint `koanf:"arg-log-limit" reload:"hot"` - RetryErrors string `koanf:"retry-errors" reload:"hot"` - RetryDelay time.Duration `koanf:"retry-delay"` + URL string `json:"url,omitempty" koanf:"url"` + JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` + Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` + Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` + ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` + ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` + RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` + RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` retryErrors *regexp.Regexp }