diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 4eac174d29..9124577d19 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -1,4 +1,5 @@ -name: CI +name: Arbitrator CI +run-name: Arbitrator CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: workflow_dispatch: @@ -37,7 +38,7 @@ jobs: - name: Install go uses: actions/setup-go@v2 with: - go-version: 1.19.x + go-version: 1.20.x - name: Setup nodejs uses: actions/setup-node@v2 diff --git a/.github/workflows/arbitrator-skip-ci.yml b/.github/workflows/arbitrator-skip-ci.yml index 87978ca2f8..6dfd962ee6 100644 --- a/.github/workflows/arbitrator-skip-ci.yml +++ b/.github/workflows/arbitrator-skip-ci.yml @@ -1,4 +1,5 @@ -name: CI +name: Arbitrator skip CI +run-name: Arbitrator skip CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: pull_request: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b341d532e..2dc03679cf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,5 @@ -name: CI +name: Go tests CI +run-name: Go tests CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: workflow_dispatch: @@ -44,7 +45,7 @@ jobs: - name: Install go uses: actions/setup-go@v2 with: - go-version: 1.19.x + go-version: 1.20.x - name: Install wasm-ld run: | diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 3e2c1214a3..78c3918717 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -68,7 +68,7 @@ jobs: - name: Install go uses: actions/setup-go@v2 with: - go-version: 1.19.x + go-version: 1.20.x - name: Install rust stable uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index fb4d7466a4..cf5fdd5ca9 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,5 @@ -name: CI +name: Docker build CI +run-name: Docker build CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: workflow_dispatch: diff --git a/.golangci.yml b/.golangci.yml index d9b6581393..e794cdb844 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,7 +22,6 @@ linters: - gosec # check for security concerns - nilerr # ensure errors aren't mishandled - staticcheck # check for suspicious constructs - - structcheck # check that struct fields are used - unused # check for unused constructs linters-settings: diff --git a/Dockerfile b/Dockerfile index 4043313893..c6fc2b2623 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,7 +53,7 @@ COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base as wasm-bin-builder # pinned go version -RUN curl -L https://golang.org/dl/go1.19.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - +RUN curl -L https://golang.org/dl/go1.20.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress COPY ./arbos ./arbos @@ -161,7 +161,7 @@ COPY ./testnode-scripts/download-machine.sh . RUN ./download-machine.sh consensus-v10 0x6b94a7fc388fd8ef3def759297828dc311761e88d8179c7ee8d3887dc554f3c3 RUN ./download-machine.sh consensus-v10.1 0xda4e3ad5e7feacb817c21c8d0220da7650fe9051ece68a3f0b1c5d38bbb27b21 -FROM golang:1.19-bullseye as node-builder +FROM golang:1.20-bullseye as node-builder WORKDIR /workspace ARG version="" ARG datetime="" @@ -215,7 +215,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ chown -R user:user /home/user && \ chmod -R 555 /home/user/target/machines && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user @@ -234,7 +234,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ node-ws vim-tiny python3 \ dnsutils && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user @@ -258,7 +258,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ adduser user sudo && \ echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock index 939b329d21..41522a82bb 100644 --- a/arbitrator/Cargo.lock +++ b/arbitrator/Cargo.lock @@ -2,12 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" - [[package]] name = "addr2line" version = "0.17.0" @@ -175,7 +169,7 @@ checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -361,7 +355,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.76", ] [[package]] @@ -372,7 +366,7 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -407,7 +401,7 @@ checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -428,7 +422,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -518,6 +512,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -578,7 +578,7 @@ checksum = "3c7090af3d300424caa81976b8c97bca41cd70e861272c072e188ae082fb49f9" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -884,25 +884,26 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "ouroboros" -version = "0.15.5" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca" +checksum = "e6a6d0919a92ba28d8109a103e0de08f89706be0eeaad1130fd1a34030dee84a" dependencies = [ "aliasable", "ouroboros_macro", + "static_assertions", ] [[package]] name = "ouroboros_macro" -version = "0.15.5" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d" +checksum = "46bc2307dc3420554ae349230dac4969c66d7c2feead3a8cab05ea0c604daca6" dependencies = [ - "Inflector", + "heck 0.4.1", "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] @@ -978,7 +979,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.76", "version_check", ] @@ -995,11 +996,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1048,14 +1049,14 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] name = "quote" -version = "1.0.9" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -1191,7 +1192,7 @@ checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1285,7 +1286,7 @@ checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1319,7 +1320,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1390,11 +1391,11 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1408,6 +1409,17 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "syn" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "target-lexicon" version = "0.12.4" @@ -1440,7 +1452,7 @@ checksum = "c251e90f708e16c49a16f4917dc2131e75222b72edfa9cb7f7c58ae56aae0c09" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1463,7 +1475,7 @@ checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1487,6 +1499,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +[[package]] +name = "unicode-ident" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" + [[package]] name = "unicode-segmentation" version = "1.8.0" @@ -1544,7 +1562,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.76", "wasm-bindgen-shared", ] @@ -1568,7 +1586,7 @@ checksum = "c5020cfa87c7cecefef118055d44e3c1fc122c7ec25701d528ee458a0b45f38f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] @@ -1589,7 +1607,7 @@ checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.76", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -1711,7 +1729,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.76", ] [[package]] diff --git a/arbitrator/jit/Cargo.toml b/arbitrator/jit/Cargo.toml index 23cc3becfe..75b3e3a74c 100644 --- a/arbitrator/jit/Cargo.toml +++ b/arbitrator/jit/Cargo.toml @@ -17,7 +17,7 @@ hex = "0.4.3" structopt = "0.3.26" sha3 = "0.9.1" libc = "0.2.132" -ouroboros = "0.15.5" +ouroboros = "0.16.0" [features] llvm = ["dep:wasmer-compiler-llvm"] diff --git a/arbitrator/jit/src/gostack.rs b/arbitrator/jit/src/gostack.rs index 45000ec034..80fccf179c 100644 --- a/arbitrator/jit/src/gostack.rs +++ b/arbitrator/jit/src/gostack.rs @@ -1,6 +1,8 @@ // Copyright 2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +#![allow(clippy::useless_transmute)] + use crate::{ machine::{WasmEnv, WasmEnvMut}, syscall::JsValue, diff --git a/arbitrator/jit/src/test.rs b/arbitrator/jit/src/test.rs index e3b276f3f2..517c8596c0 100644 --- a/arbitrator/jit/src/test.rs +++ b/arbitrator/jit/src/test.rs @@ -12,7 +12,7 @@ fn test_crate() -> eyre::Result<()> { let source = std::fs::read("programs/pure/main.wat")?; let mut store = Store::default(); - let module = Module::new(&store, &source)?; + let module = Module::new(&store, source)?; let imports = imports! {}; let instance = Instance::new(&mut store, &module, &imports)?; diff --git a/arbnode/api.go b/arbnode/api.go index 8d8516dda0..057c03bf31 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -2,6 +2,7 @@ package arbnode import ( "context" + "errors" "fmt" "time" @@ -11,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/staker" - "github.com/pkg/errors" ) type BlockValidatorAPI struct { diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 9a531df479..3e5e6a738f 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -7,12 +7,12 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "math/big" "time" "github.com/andybalholm/brotli" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum" @@ -248,9 +248,10 @@ type batchSegments struct { } type buildingBatch struct { - segments *batchSegments - startMsgCount arbutil.MessageIndex - msgCount arbutil.MessageIndex + segments *batchSegments + startMsgCount arbutil.MessageIndex + msgCount arbutil.MessageIndex + haveUsefulMessage bool } func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { @@ -558,6 +559,14 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, err } + dbBatchCount, err := b.inbox.GetBatchCount() + if err != nil { + return false, err + } + if dbBatchCount > batchPosition.NextSeqNum { + return false, fmt.Errorf("attempting to post batch %v, but the local inbox tracker database already has %v batches", batchPosition.NextSeqNum, dbBatchCount) + } + if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { b.building = &buildingBatch{ segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.backlog), @@ -577,11 +586,10 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - nextMessageTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) + firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) config := b.config() - forcePostBatch := time.Since(nextMessageTime) >= config.MaxBatchPostDelay - haveUsefulMessage := false + forcePostBatch := time.Since(firstMsgTime) >= config.MaxBatchPostDelay for b.building.msgCount < msgCount { msg, err := b.streamer.GetMessage(b.building.msgCount) @@ -600,16 +608,16 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if !config.WaitForMaxBatchPostDelay { forcePostBatch = true } - haveUsefulMessage = true + b.building.haveUsefulMessage = true break } if msg.Message.Header.Kind != arbostypes.L1MessageType_BatchPostingReport { - haveUsefulMessage = true + b.building.haveUsefulMessage = true } b.building.msgCount++ } - if !forcePostBatch || !haveUsefulMessage { + if !forcePostBatch || !b.building.haveUsefulMessage { // the batch isn't full yet and we've posted a batch recently // don't post anything for now return false, nil @@ -651,7 +659,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) DelayedMessageCount: b.building.segments.delayedMsg, NextSeqNum: batchPosition.NextSeqNum + 1, } - err = b.dataPoster.PostTransaction(ctx, nextMessageTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit) + err = b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit) if err != nil { return false, err } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b187612f40..ff0dcfebcf 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -222,6 +222,9 @@ func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64 if err != nil { return nil, nil, err } + if latestHeader.BaseFee == nil { + return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + } newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) @@ -408,7 +411,7 @@ func (p *DataPoster[Meta]) updateNonce(ctx context.Context) error { if p.lastBlock != nil && arbmath.BigEquals(p.lastBlock, header.Number) { return nil } - nonce, err := p.client.NonceAt(ctx, p.auth.From, p.lastBlock) + nonce, err := p.client.NonceAt(ctx, p.auth.From, header.Number) if err != nil { if p.lastBlock != nil { log.Warn("failed to get current nonce", "lastBlock", p.lastBlock, "newBlock", header.Number, "err", err) @@ -417,13 +420,16 @@ func (p *DataPoster[Meta]) updateNonce(ctx context.Context) error { return err } if nonce > p.nonce { - log.Info("data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "l1Block", p.lastBlock) + log.Info("data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number) if len(p.errorCount) > 0 { for x := p.nonce; x < nonce; x++ { delete(p.errorCount, x) } } - err := p.queue.Prune(ctx, nonce) + // We don't prune the most recent transaction in order to ensure that the data poster + // always has a reference point in its queue of the latest transaction nonce and metadata. + // nonce > 0 is implied by nonce > p.nonce, so this won't underflow. + err := p.queue.Prune(ctx, nonce-1) if err != nil { return err } @@ -473,7 +479,7 @@ func (p *DataPoster[Meta]) Start(ctxIn context.Context) { p.mutex.Lock() defer p.mutex.Unlock() if !p.redisLock.AttemptLock(ctx) { - return p.replacementTimes[0] + return minWait } err := p.updateBalance(ctx) if err != nil { diff --git a/arbnode/dataposter/slice_storage.go b/arbnode/dataposter/slice_storage.go index 9ae0fbda6b..4364523d99 100644 --- a/arbnode/dataposter/slice_storage.go +++ b/arbnode/dataposter/slice_storage.go @@ -32,11 +32,10 @@ func (s *SliceStorage[Item]) GetContents(ctx context.Context, startingIndex uint } func (s *SliceStorage[Item]) GetLast(ctx context.Context) (*Item, error) { - if len(s.queue) > 0 { - return s.queue[len(s.queue)-1], nil - } else { + if len(s.queue) == 0 { return nil, nil } + return s.queue[len(s.queue)-1], nil } func (s *SliceStorage[Item]) Prune(ctx context.Context, keepStartingAt uint64) error { diff --git a/arbnode/delayed.go b/arbnode/delayed.go index d0d58b0d44..2995cc3d2f 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -6,11 +6,10 @@ package arbnode import ( "bytes" "context" + "errors" "math/big" "sort" - "github.com/pkg/errors" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -88,7 +87,7 @@ func (b *DelayedBridge) GetMessageCount(ctx context.Context, blockNumber *big.In } bigRes, err := b.con.DelayedMessageCount(opts) if err != nil { - return 0, errors.WithStack(err) + return 0, err } if !bigRes.IsUint64() { return 0, errors.New("DelayedBridge MessageCount doesn't make sense!") @@ -134,7 +133,7 @@ func (b *DelayedBridge) LookupMessagesInRange(ctx context.Context, from, to *big } logs, err := b.client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } return b.logsToDeliveredMessages(ctx, logs, batchFetcher) } @@ -171,7 +170,7 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type } parsedLog, err := b.con.ParseMessageDelivered(ethLog) if err != nil { - return nil, errors.WithStack(err) + return nil, err } messageKey := common.BigToHash(parsedLog.MessageIndex) parsedLogs = append(parsedLogs, parsedLog) @@ -181,7 +180,7 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type messageData := make(map[common.Hash][]byte) if err := b.fillMessageData(ctx, inboxAddresses, messageIds, messageData, minBlockNum, maxBlockNum); err != nil { - return nil, errors.WithStack(err) + return nil, err } messages := make([]*DelayedInboxMessage, 0, len(logs)) @@ -249,7 +248,7 @@ func (b *DelayedBridge) fillMessageData( } logs, err := b.client.FilterLogs(ctx, query) if err != nil { - return errors.WithStack(err) + return err } for _, ethLog := range logs { msgNum, msg, err := b.parseMessage(ctx, ethLog) @@ -267,20 +266,21 @@ func (b *DelayedBridge) parseMessage(ctx context.Context, ethLog types.Log) (*bi var err error con, err = bridgegen.NewIDelayedMessageProvider(ethLog.Address, b.client) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } b.messageProviders[ethLog.Address] = con } - if ethLog.Topics[0] == inboxMessageDeliveredID { + switch { + case ethLog.Topics[0] == inboxMessageDeliveredID: parsedLog, err := con.ParseInboxMessageDelivered(ethLog) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } return parsedLog.MessageNum, parsedLog.Data, nil - } else if ethLog.Topics[0] == inboxMessageFromOriginID { + case ethLog.Topics[0] == inboxMessageFromOriginID: parsedLog, err := con.ParseInboxMessageDeliveredFromOrigin(ethLog) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } data, err := arbutil.GetLogEmitterTxData(ctx, b.client, ethLog) if err != nil { @@ -289,10 +289,10 @@ func (b *DelayedBridge) parseMessage(ctx context.Context, ethLog types.Log) (*bi args := make(map[string]interface{}) err = l2MessageFromOriginCallABI.Inputs.UnpackIntoMap(args, data[4:]) if err != nil { - return nil, nil, errors.WithStack(err) + return nil, nil, err } return parsedLog.MessageNum, args["messageData"].([]byte), nil - } else { + default: return nil, nil, errors.New("unexpected log type") } } diff --git a/arbnode/execution/api.go b/arbnode/execution/api.go index 8f3c37b9b3..5245f74f34 100644 --- a/arbnode/execution/api.go +++ b/arbnode/execution/api.go @@ -6,6 +6,7 @@ package execution import ( "context" "encoding/json" + "errors" "fmt" "math/big" "sync" @@ -20,7 +21,6 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/pkg/errors" ) type ArbAPI struct { @@ -280,7 +280,7 @@ type ArbTraceForwarderAPI struct { fallbackClientUrl string fallbackClientTimeout time.Duration - initialized int32 + initialized atomic.Bool mutex sync.Mutex fallbackClient types.FallbackClient } @@ -293,12 +293,12 @@ func NewArbTraceForwarderAPI(fallbackClientUrl string, fallbackClientTimeout tim } func (api *ArbTraceForwarderAPI) getFallbackClient() (types.FallbackClient, error) { - if atomic.LoadInt32(&api.initialized) == 1 { + if api.initialized.Load() { return api.fallbackClient, nil } api.mutex.Lock() defer api.mutex.Unlock() - if atomic.LoadInt32(&api.initialized) == 1 { + if api.initialized.Load() { return api.fallbackClient, nil } fallbackClient, err := arbitrum.CreateFallbackClient(api.fallbackClientUrl, api.fallbackClientTimeout) @@ -306,7 +306,7 @@ func (api *ArbTraceForwarderAPI) getFallbackClient() (types.FallbackClient, erro return nil, err } api.fallbackClient = fallbackClient - atomic.StoreInt32(&api.initialized, 1) + api.initialized.Store(true) return api.fallbackClient, nil } diff --git a/arbnode/execution/blockchain.go b/arbnode/execution/blockchain.go index 2ed0221b04..a4de72588a 100644 --- a/arbnode/execution/blockchain.go +++ b/arbnode/execution/blockchain.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/statetransfer" ) @@ -82,7 +83,7 @@ func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core } } -func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, serializedChainConfig []byte, accountsPerSync uint) error { +func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, accountsPerSync uint) error { EmptyHash := common.Hash{} prevHash := EmptyHash prevDifficulty := big.NewInt(0) @@ -103,7 +104,7 @@ func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitData } timestamp = prevHeader.Time } - stateRoot, err := arbosState.InitializeArbosInDatabase(chainDb, initData, chainConfig, serializedChainConfig, timestamp, accountsPerSync) + stateRoot, err := arbosState.InitializeArbosInDatabase(chainDb, initData, chainConfig, initMessage, timestamp, accountsPerSync) if err != nil { return err } @@ -170,8 +171,8 @@ func GetBlockChain(chainDb ethdb.Database, cacheConfig *core.CacheConfig, chainC return core.NewBlockChain(chainDb, cacheConfig, chainConfig, nil, nil, engine, vmConfig, shouldPreserveFalse, &txLookupLimit) } -func WriteOrTestBlockChain(chainDb ethdb.Database, cacheConfig *core.CacheConfig, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, serializedChainConfig []byte, txLookupLimit uint64, accountsPerSync uint) (*core.BlockChain, error) { - err := WriteOrTestGenblock(chainDb, initData, chainConfig, serializedChainConfig, accountsPerSync) +func WriteOrTestBlockChain(chainDb ethdb.Database, cacheConfig *core.CacheConfig, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, txLookupLimit uint64, accountsPerSync uint) (*core.BlockChain, error) { + err := WriteOrTestGenblock(chainDb, initData, chainConfig, initMessage, accountsPerSync) if err != nil { return nil, err } diff --git a/arbnode/execution/executionengine.go b/arbnode/execution/executionengine.go index 2dc625c1fc..88b42cbe4c 100644 --- a/arbnode/execution/executionengine.go +++ b/arbnode/execution/executionengine.go @@ -3,6 +3,7 @@ package execution import ( "context" "encoding/binary" + "errors" "fmt" "sync" "testing" @@ -21,7 +22,6 @@ import ( "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/sharedmetrics" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/pkg/errors" ) type TransactionStreamerInterface interface { @@ -136,7 +136,7 @@ func (s *ExecutionEngine) getCurrentHeader() (*types.Header, error) { if currentBlock == nil { return nil, errors.New("failed to get current block") } - return currentBlock.Header(), nil + return currentBlock, nil } func (s *ExecutionEngine) HeadMessageNumber() (arbutil.MessageIndex, error) { @@ -443,9 +443,14 @@ func (s *ExecutionEngine) MessageCountToBlockNumber(messageNum arbutil.MessageIn // must hold createBlockMutex func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWithMetadata) (*types.Block, *state.StateDB, types.Receipts, error) { - currentBlock := s.bc.CurrentBlock() + currentHeader := s.bc.CurrentBlock() + if currentHeader == nil { + return nil, nil, nil, errors.New("failed to get current block header") + } + + currentBlock := s.bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()) if currentBlock == nil { - return nil, nil, nil, errors.New("failed to get current block") + return nil, nil, nil, errors.New("can't find block for current header") } err := s.bc.RecoverState(currentBlock) @@ -453,8 +458,6 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith return nil, nil, nil, fmt.Errorf("failed to recover block %v state: %w", currentBlock.Number(), err) } - currentHeader := currentBlock.Header() - statedb, err := s.bc.StateAt(currentHeader.Root) if err != nil { return nil, nil, nil, err diff --git a/arbnode/execution/forwarder.go b/arbnode/execution/forwarder.go index de49b8763b..5d6938be22 100644 --- a/arbnode/execution/forwarder.go +++ b/arbnode/execution/forwarder.go @@ -5,6 +5,8 @@ package execution import ( "context" + "errors" + "fmt" "net" "net/http" "sync" @@ -13,7 +15,6 @@ import ( "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/arbitrum" @@ -78,7 +79,7 @@ func AddOptionsForForwarderConfigImpl(prefix string, defaultConfig *ForwarderCon } type TxForwarder struct { - enabled int32 + enabled atomic.Bool target string timeout time.Duration transport *http.Transport @@ -129,7 +130,7 @@ func (f *TxForwarder) ctxWithTimeout(inctx context.Context) (context.Context, co } func (f *TxForwarder) PublishTransaction(inctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { - if atomic.LoadInt32(&f.enabled) == 0 { + if !f.enabled.Load() { return ErrNoSequencer } ctx, cancelFunc := f.ctxWithTimeout(inctx) @@ -144,7 +145,7 @@ const cacheUpstreamHealth = 2 * time.Second const maxHealthTimeout = 10 * time.Second func (f *TxForwarder) CheckHealth(inctx context.Context) error { - if atomic.LoadInt32(&f.enabled) == 0 { + if !f.enabled.Load() { return ErrNoSequencer } f.healthMutex.Lock() @@ -166,7 +167,7 @@ func (f *TxForwarder) Initialize(inctx context.Context) error { if f.target == "" { f.rpcClient = nil f.ethClient = nil - f.enabled = 0 + f.enabled.Store(false) return nil } ctx, cancelFunc := f.ctxWithTimeout(inctx) @@ -177,13 +178,13 @@ func (f *TxForwarder) Initialize(inctx context.Context) error { } f.rpcClient = rpcClient f.ethClient = ethclient.NewClient(rpcClient) - f.enabled = 1 + f.enabled.Store(true) return nil } // Disable is not thread-safe vs. Initialize func (f *TxForwarder) Disable() { - atomic.StoreInt32(&f.enabled, 0) + f.enabled.Store(false) } func (f *TxForwarder) Start(ctx context.Context) error { @@ -268,7 +269,7 @@ func (f *RedisTxForwarder) Initialize(ctx context.Context) error { var err error f.redisCoordinator, err = redisutil.NewRedisCoordinator(f.config.RedisUrl) if err != nil { - return errors.Wrap(err, "unable to create redis coordinator") + return fmt.Errorf("unable to create redis coordinator: %w", err) } f.update(ctx) return nil @@ -369,7 +370,7 @@ func (f *RedisTxForwarder) Start(ctx context.Context) error { return err } if err := f.CallIterativelySafe(f.update); err != nil { - return errors.Wrap(err, "failed to start forwarder update thread") + return fmt.Errorf("failed to start forwarder update thread: %w", err) } return nil } diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index ce8308bfe0..6266f4197f 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -5,6 +5,7 @@ package execution import ( "context" + "errors" "fmt" "math" "math/big" @@ -14,6 +15,7 @@ import ( "sync/atomic" "time" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/headerreader" @@ -34,7 +36,6 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/pkg/errors" ) var ( @@ -184,9 +185,8 @@ func (c *nonceCache) matches(header *types.Header) bool { // The header is updated as the block is built, // so instead of checking its hash, we do a pointer comparison. return c.dirty == header - } else { - return c.block == header.ParentHash } + return c.block == header.ParentHash } func (c *nonceCache) Reset(block common.Hash) { @@ -633,7 +633,7 @@ func (s *Sequencer) expireNonceFailures() *time.Timer { // There's no guarantee that returned tx nonces will be correct func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { bc := s.execEngine.bc - latestHeader := bc.CurrentBlock().Header() + latestHeader := bc.CurrentBlock() latestState, err := bc.StateAt(latestHeader.Root) if err != nil { log.Error("failed to get current state to pre-check nonces", "err", err) @@ -927,12 +927,14 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { return madeBlock } -func (s *Sequencer) updateLatestL1Block(header *types.Header) { +func (s *Sequencer) updateLatestParentChainBlock(header *types.Header) { s.L1BlockAndTimeMutex.Lock() defer s.L1BlockAndTimeMutex.Unlock() - if s.l1BlockNumber < header.Number.Uint64() { - s.l1BlockNumber = header.Number.Uint64() + + l1BlockNumber := arbutil.ParentHeaderToL1BlockNumber(header) + if header.Time > s.l1Timestamp || (header.Time == s.l1Timestamp && l1BlockNumber > s.l1BlockNumber) { s.l1Timestamp = header.Time + s.l1BlockNumber = l1BlockNumber } } @@ -945,7 +947,7 @@ func (s *Sequencer) Initialize(ctx context.Context) error { if err != nil { return err } - s.updateLatestL1Block(header) + s.updateLatestParentChainBlock(header) return nil } @@ -967,7 +969,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { if !ok { return } - s.updateLatestL1Block(header) + s.updateLatestParentChainBlock(header) case <-ctx.Done(): return } @@ -982,10 +984,9 @@ func (s *Sequencer) Start(ctxIn context.Context) error { if madeBlock { // Note: this may return a negative duration, but timers are fine with that (they treat negative durations as 0). return time.Until(nextBlock) - } else { - // If we didn't make a block, try again immediately. - return 0 } + // If we didn't make a block, try again immediately. + return 0 }) return nil diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index d118a9273b..01cef6d7a4 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -18,7 +18,6 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/pkg/errors" flag "github.com/spf13/pflag" ) @@ -79,35 +78,34 @@ type NonceError struct { func (e NonceError) Error() string { if e.txNonce < e.stateNonce { return fmt.Sprintf("%v: address %v, tx: %d state: %d", core.ErrNonceTooLow, e.sender, e.txNonce, e.stateNonce) - } else if e.txNonce > e.stateNonce { + } + if e.txNonce > e.stateNonce { return fmt.Sprintf("%v: address %v, tx: %d state: %d", core.ErrNonceTooHigh, e.sender, e.txNonce, e.stateNonce) - } else { - // This should be unreachable - return fmt.Sprintf("invalid nonce error for address %v nonce %v", e.sender, e.txNonce) } + // This should be unreachable + return fmt.Sprintf("invalid nonce error for address %v nonce %v", e.sender, e.txNonce) } func (e NonceError) Unwrap() error { if e.txNonce < e.stateNonce { return core.ErrNonceTooLow - } else if e.txNonce > e.stateNonce { + } + if e.txNonce > e.stateNonce { return core.ErrNonceTooHigh - } else { - // This should be unreachable - return nil } + // This should be unreachable + return nil } func MakeNonceError(sender common.Address, txNonce uint64, stateNonce uint64) error { - if txNonce != stateNonce { - return NonceError{ - sender: sender, - txNonce: txNonce, - stateNonce: stateNonce, - } - } else { + if txNonce == stateNonce { return nil } + return NonceError{ + sender: sender, + txNonce: txNonce, + stateNonce: stateNonce, + } } func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *types.Header, statedb *state.StateDB, arbos *arbosState.ArbosState, tx *types.Transaction, options *arbitrum_types.ConditionalOptions, config *TxPreCheckerConfig) error { @@ -175,7 +173,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if oldHeader != header { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { - return errors.Wrap(err, "failed to get old state") + return fmt.Errorf("failed to get old state: %w", err) } oldExtraInfo := types.DeserializeHeaderExtraInformation(oldHeader) if err := options.Check(oldExtraInfo.L1BlockNumber, oldHeader.Time, secondOldStatedb); err != nil { @@ -199,7 +197,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty func (c *TxPreChecker) PublishTransaction(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { block := c.bc.CurrentBlock() - statedb, err := c.bc.StateAt(block.Root()) + statedb, err := c.bc.StateAt(block.Root) if err != nil { return err } @@ -207,7 +205,7 @@ func (c *TxPreChecker) PublishTransaction(ctx context.Context, tx *types.Transac if err != nil { return err } - err = PreCheckTx(c.bc, c.bc.Config(), block.Header(), statedb, arbos, tx, options, c.config()) + err = PreCheckTx(c.bc, c.bc.Config(), block, statedb, arbos, tx, options, c.config()) if err != nil { return err } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 65e54b2946..c83e8e4af4 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -140,17 +140,17 @@ func (r *InboxReader) Start(ctxIn context.Context) error { if err != nil { return err } - initChainId, initChainConfig, _, err := message.ParseInitMessage() + initMessage, err := message.ParseInitMessage() if err != nil { return err } chainConfig := r.tracker.txStreamer.chainConfig configChainId := chainConfig.ChainID - if initChainId.Cmp(configChainId) != 0 { - return fmt.Errorf("expected L2 chain ID %v but read L2 chain ID %v from init message in L1 inbox", configChainId, initChainId) + if initMessage.ChainId.Cmp(configChainId) != 0 { + return fmt.Errorf("expected L2 chain ID %v but read L2 chain ID %v from init message in L1 inbox", configChainId, initMessage.ChainId) } - if initChainConfig != nil { - if err := initChainConfig.CheckCompatible(chainConfig, chainConfig.ArbitrumChainParams.GenesisBlockNum, 0); err != nil { + if initMessage.ChainConfig != nil { + if err := initMessage.ChainConfig.CheckCompatible(chainConfig, chainConfig.ArbitrumChainParams.GenesisBlockNum, 0); err != nil { return fmt.Errorf("incompatible chain config read from init message in L1 inbox: %w", err) } } @@ -398,9 +398,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { idx := int(batchNum - sequencerBatches[0].SequenceNumber) if idx < len(sequencerBatches) { return sequencerBatches[idx].Serialize(ctx, r.l1Reader.Client()) - } else { - log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) } + log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) } return r.GetSequencerMessageBytes(ctx, batchNum) }) diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 0cede3bcdd..e68cee49ff 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -6,7 +6,6 @@ package arbnode import ( "context" "encoding/binary" - "encoding/json" "math/big" "math/rand" "testing" @@ -18,13 +17,14 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/statetransfer" - nitroutil "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/testhelpers" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" @@ -32,10 +32,6 @@ import ( func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*execution.ExecutionEngine, *TransactionStreamer, ethdb.Database, *core.BlockChain) { chainConfig := params.ArbitrumDevTestChainConfig() - serializedChainConfig, err := json.Marshal(chainConfig) - if err != nil { - Fail(t, err) - } initData := statetransfer.ArbosInitializationInfo{ Accounts: []statetransfer.AccountInitializationInfo{ @@ -50,7 +46,7 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* arbDb := rawdb.NewMemoryDatabase() initReader := statetransfer.NewMemoryInitDataReader(&initData) - bc, err := execution.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, serializedChainConfig, ConfigDefaultL2Test().TxLookupLimit, 0) + bc, err := execution.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, arbostypes.TestInitMessage, ConfigDefaultL2Test().TxLookupLimit, 0) if err != nil { Fail(t, err) @@ -101,7 +97,7 @@ func TestTransactionStreamer(t *testing.T) { var blockStates []blockTestState blockStates = append(blockStates, blockTestState{ balances: map[common.Address]*big.Int{ - ownerAddress: new(big.Int).Mul(maxExpectedGasCost, big.NewInt(int64(nitroutil.NormalizeL2GasForL1GasInitial(1_000_000, params.GWei)))), + ownerAddress: new(big.Int).SetUint64(params.Ether), }, accounts: []common.Address{ownerAddress}, numMessages: 1, @@ -169,6 +165,7 @@ func TestTransactionStreamer(t *testing.T) { Require(t, inbox.AddMessages(state.numMessages, false, messages)) state.numMessages += arbutil.MessageIndex(len(messages)) + prevBlockNumber := state.blockNumber state.blockNumber += uint64(len(messages)) for i := 0; ; i++ { blockNumber := bc.CurrentHeader().Number.Uint64() @@ -181,6 +178,23 @@ func TestTransactionStreamer(t *testing.T) { } time.Sleep(10 * time.Millisecond) } + for blockNum := prevBlockNumber + 1; blockNum <= state.blockNumber; blockNum++ { + block := bc.GetBlockByNumber(blockNum) + txs := block.Transactions() + receipts := bc.GetReceiptsByHash(block.Hash()) + if len(txs) != len(receipts) { + Fail(t, "got", len(txs), "transactions but", len(receipts), "receipts in block", blockNum) + } + for i, receipt := range receipts { + sender, err := types.Sender(types.LatestSigner(bc.Config()), txs[i]) + Require(t, err) + balance, ok := state.balances[sender] + if !ok { + continue + } + balance.Sub(balance, arbmath.BigMulByUint(block.BaseFee(), receipt.GasUsed)) + } + } blockStates = append(blockStates, state) } @@ -202,7 +216,7 @@ func TestTransactionStreamer(t *testing.T) { Fail(t, "error getting block state", err) } haveBalance := state.GetBalance(acct) - if balance.Cmp(haveBalance) < 0 { + if balance.Cmp(haveBalance) != 0 { t.Error("unexpected balance for account", acct, "; expected", balance, "got", haveBalance) } } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 3845850620..3f8ba0d346 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "sync" "time" @@ -21,7 +22,6 @@ import ( "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/containers" - "github.com/pkg/errors" ) type InboxTracker struct { @@ -97,6 +97,23 @@ func (t *InboxTracker) Initialize() error { var AccumulatorNotFoundErr = errors.New("accumulator not found") +func (t *InboxTracker) deleteBatchMetadataStartingAt(dbBatch ethdb.Batch, startIndex uint64) error { + t.batchMetaMutex.Lock() + defer t.batchMetaMutex.Unlock() + iter := t.db.NewIterator(sequencerBatchMetaPrefix, uint64ToKey(startIndex)) + defer iter.Release() + for iter.Next() { + curKey := iter.Key() + err := dbBatch.Delete(curKey) + if err != nil { + return err + } + curIndex := binary.BigEndian.Uint64(bytes.TrimPrefix(curKey, sequencerBatchMetaPrefix)) + t.batchMeta.Remove(curIndex) + } + return iter.Error() +} + func (t *InboxTracker) GetDelayedAcc(seqNum uint64) (common.Hash, error) { key := dbKey(rlpDelayedMessagePrefix, seqNum) hasKey, err := t.db.Has(key) @@ -334,9 +351,8 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR if err != nil { if errors.Is(err, AccumulatorNotFoundErr) { return errors.New("missing previous delayed message") - } else { - return err } + return err } } @@ -385,12 +401,6 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR return t.setDelayedCountReorgAndWriteBatch(batch, pos, true) } -func (t *InboxTracker) clearBatchMetaCache() { - t.batchMetaMutex.Lock() - defer t.batchMetaMutex.Unlock() - t.batchMeta.Clear() -} - // All-in-one delayed message count adjuster. Can go forwards or backwards. // Requires the mutex is held. Sets the delayed count and performs any sequencer batch reorg necessary. // Also deletes any future delayed messages. @@ -422,15 +432,13 @@ func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newD var reorgSeqBatchesToCount *uint64 for seqBatchIter.Next() { var batchSeqNum uint64 - err := rlp.DecodeBytes(seqBatchIter.Value(), &batchSeqNum) - if err != nil { + if err := rlp.DecodeBytes(seqBatchIter.Value(), &batchSeqNum); err != nil { return err } if !canReorgBatches { return fmt.Errorf("reorging of sequencer batch number %v via delayed messages reorg to count %v disabled in this instance", batchSeqNum, newDelayedCount) } - err = batch.Delete(seqBatchIter.Key()) - if err != nil { + if err := batch.Delete(seqBatchIter.Key()); err != nil { return err } if reorgSeqBatchesToCount == nil { @@ -440,47 +448,42 @@ func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newD reorgSeqBatchesToCount = &batchSeqNum } } - err = seqBatchIter.Error() - if err != nil { + if err := seqBatchIter.Error(); err != nil { return err } // Release the iterator early. // It's fine to call Release multiple times, // which we'll do because of the defer. seqBatchIter.Release() - if reorgSeqBatchesToCount != nil { - // Clear the batchMeta cache after writing the reorg to disk - defer t.clearBatchMetaCache() + if reorgSeqBatchesToCount == nil { + return batch.Write() + } - count := *reorgSeqBatchesToCount - if t.validator != nil { - t.validator.ReorgToBatchCount(count) - } - countData, err := rlp.EncodeToBytes(count) - if err != nil { - return err - } - err = batch.Put(sequencerBatchCountKey, countData) - if err != nil { - return err - } - log.Warn("InboxTracker delayed message reorg is causing a sequencer batch reorg", "sequencerBatchCount", count, "delayedCount", newDelayedCount) - err = deleteStartingAt(t.db, batch, sequencerBatchMetaPrefix, uint64ToKey(count)) + count := *reorgSeqBatchesToCount + if t.validator != nil { + t.validator.ReorgToBatchCount(count) + } + countData, err = rlp.EncodeToBytes(count) + if err != nil { + return err + } + if err := batch.Put(sequencerBatchCountKey, countData); err != nil { + return err + } + log.Warn("InboxTracker delayed message reorg is causing a sequencer batch reorg", "sequencerBatchCount", count, "delayedCount", newDelayedCount) + + if err := t.deleteBatchMetadataStartingAt(batch, count); err != nil { + return err + } + var prevMesssageCount arbutil.MessageIndex + if count > 0 { + prevMesssageCount, err = t.GetBatchMessageCount(count - 1) if err != nil { return err } - var prevMesssageCount arbutil.MessageIndex - if count > 0 { - prevMesssageCount, err = t.GetBatchMessageCount(count - 1) - if err != nil { - return err - } - } - // Writes batch - return t.txStreamer.ReorgToAndEndBatch(batch, prevMesssageCount) - } else { - return batch.Write() } + // Writes batch + return t.txStreamer.ReorgToAndEndBatch(batch, prevMesssageCount) } type multiplexerBackend struct { @@ -569,9 +572,11 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if errors.Is(err, AccumulatorNotFoundErr) { // We somehow missed a referenced delayed message; go back and look for it return delayedMessagesMismatch - } else if err != nil { + } + if err != nil { return err - } else if haveDelayedAcc != batch.AfterDelayedAcc { + } + if haveDelayedAcc != batch.AfterDelayedAcc { // We somehow missed a delayed message reorg; go back and look for it return delayedMessagesMismatch } @@ -642,7 +647,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L lastBatchMeta = meta } - err = deleteStartingAt(t.db, dbBatch, sequencerBatchMetaPrefix, uint64ToKey(pos)) + err = t.deleteBatchMetadataStartingAt(dbBatch, pos) if err != nil { return err } @@ -705,7 +710,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L prevprevbatchmeta, err := t.GetBatchMetadata(pos - 2) if errors.Is(err, AccumulatorNotFoundErr) { return errors.New("missing previous previous sequencer batch") - } else if err != nil { + } + if err != nil { return err } if prevprevbatchmeta.MessageCount > 0 { @@ -727,7 +733,8 @@ func (t *InboxTracker) ReorgDelayedTo(count uint64, canReorgBatches bool) error } if currentCount == count { return nil - } else if currentCount < count { + } + if currentCount < count { return errors.New("attempted to reorg to future delayed count") } @@ -744,7 +751,8 @@ func (t *InboxTracker) ReorgBatchesTo(count uint64) error { prevBatchMeta, err = t.GetBatchMetadata(count - 1) if errors.Is(err, AccumulatorNotFoundErr) { return errors.New("attempted to reorg to future batch count") - } else if err != nil { + } + if err != nil { return err } } @@ -753,16 +761,13 @@ func (t *InboxTracker) ReorgBatchesTo(count uint64) error { t.validator.ReorgToBatchCount(count) } - // Clear the batchMeta cache after writing the reorg to disk - defer t.clearBatchMetaCache() - dbBatch := t.db.NewBatch() err := deleteStartingAt(t.db, dbBatch, delayedSequencedPrefix, uint64ToKey(prevBatchMeta.DelayedMessageCount+1)) if err != nil { return err } - err = deleteStartingAt(t.db, dbBatch, sequencerBatchMetaPrefix, uint64ToKey(count)) + err = t.deleteBatchMetadataStartingAt(dbBatch, count) if err != nil { return err } diff --git a/arbnode/inbox_tracker_test.go b/arbnode/inbox_tracker_test.go new file mode 100644 index 0000000000..582b334aee --- /dev/null +++ b/arbnode/inbox_tracker_test.go @@ -0,0 +1,58 @@ +package arbnode + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/offchainlabs/nitro/util/containers" +) + +func TestDeleteBatchMetadata(t *testing.T) { + testBytes := []byte("bloop") + + tracker := &InboxTracker{ + db: rawdb.NewMemoryDatabase(), + batchMeta: containers.NewLruCache[uint64, BatchMetadata](100), + } + + for i := uint64(0); i < 30; i += 1 { + err := tracker.db.Put(dbKey(sequencerBatchMetaPrefix, i), testBytes) + Require(t, err) + if i%5 != 0 { + tracker.batchMeta.Add(i, BatchMetadata{}) + } + } + + batch := tracker.db.NewBatch() + err := tracker.deleteBatchMetadataStartingAt(batch, 15) + if err != nil { + Fail(t, "deleteBatchMetadataStartingAt returned error: ", err) + } + err = batch.Write() + Require(t, err) + + for i := uint64(0); i < 15; i += 1 { + has, err := tracker.db.Has(dbKey(sequencerBatchMetaPrefix, i)) + Require(t, err) + if !has { + Fail(t, "value removed from db: ", i) + } + if i%5 != 0 { + if !tracker.batchMeta.Contains(i) { + Fail(t, "value removed from cache: ", i) + } + } + } + + for i := uint64(15); i < 30; i += 1 { + has, err := tracker.db.Has(dbKey(sequencerBatchMetaPrefix, i)) + Require(t, err) + if has { + Fail(t, "value not removed from db: ", i) + } + if tracker.batchMeta.Contains(i) { + Fail(t, "value removed from cache: ", i) + } + } + +} diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go new file mode 100644 index 0000000000..1ba3886d8d --- /dev/null +++ b/arbnode/message_pruner.go @@ -0,0 +1,131 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "bytes" + "context" + "encoding/binary" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/util/stopwaiter" + + flag "github.com/spf13/pflag" +) + +type MessagePruner struct { + stopwaiter.StopWaiter + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + staker *staker.Staker + config MessagePrunerConfigFetcher +} + +type MessagePrunerConfig struct { + Enable bool `koanf:"enable"` + MessagePruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` +} + +type MessagePrunerConfigFetcher func() *MessagePrunerConfig + +var DefaultMessagePrunerConfig = MessagePrunerConfig{ + Enable: true, + MessagePruneInterval: time.Minute, +} + +func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultMessagePrunerConfig.Enable, "enable message pruning") + f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.MessagePruneInterval, "interval for running message pruner") +} + +func NewMessagePruner(transactionStreamer *TransactionStreamer, inboxTracker *InboxTracker, staker *staker.Staker, config MessagePrunerConfigFetcher) *MessagePruner { + return &MessagePruner{ + transactionStreamer: transactionStreamer, + inboxTracker: inboxTracker, + staker: staker, + config: config, + } +} + +func (m *MessagePruner) Start(ctxIn context.Context) { + m.StopWaiter.Start(ctxIn, m) + m.CallIteratively(m.prune) +} + +func (m *MessagePruner) prune(ctx context.Context) time.Duration { + latestConfirmedNode, err := m.staker.Rollup().LatestConfirmed( + &bind.CallOpts{ + Context: ctx, + BlockNumber: big.NewInt(int64(rpc.FinalizedBlockNumber)), + }) + if err != nil { + log.Error("error getting latest confirmed node", "err", err) + return m.config().MessagePruneInterval + } + nodeInfo, err := m.staker.Rollup().LookupNode(ctx, latestConfirmedNode) + if err != nil { + log.Error("error getting latest confirmed node info", "node", latestConfirmedNode, "err", err) + return m.config().MessagePruneInterval + } + endBatchCount := nodeInfo.Assertion.AfterState.GlobalState.Batch + if endBatchCount == 0 { + return m.config().MessagePruneInterval + } + endBatchMetadata, err := m.inboxTracker.GetBatchMetadata(endBatchCount - 1) + if err != nil { + log.Error("error getting last batch metadata", "batch", endBatchCount-1, "err", err) + return m.config().MessagePruneInterval + } + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, m.inboxTracker.db, m.transactionStreamer.db) + return m.config().MessagePruneInterval +} + +func deleteOldMessageFromDB(endBatchCount uint64, endBatchMetadata BatchMetadata, inboxTrackerDb ethdb.Database, transactionStreamerDb ethdb.Database) { + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(inboxTrackerDb, sequencerBatchMetaPrefix, endBatchCount) + if err != nil { + log.Error("error deleting batch metadata", "err", err) + return + } + if len(prunedKeysRange) > 0 { + log.Info("Pruned batches:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + } + + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(transactionStreamerDb, messagePrefix, uint64(endBatchMetadata.MessageCount)) + if err != nil { + log.Error("error deleting last batch messages", "err", err) + return + } + if len(prunedKeysRange) > 0 { + log.Info("Pruned last batch messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + } + + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(inboxTrackerDb, rlpDelayedMessagePrefix, endBatchMetadata.DelayedMessageCount) + if err != nil { + log.Error("error deleting last batch delayed messages", "err", err) + return + } + if len(prunedKeysRange) > 0 { + log.Info("Pruned last batch delayed messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + } +} + +func deleteFromLastPrunedUptoEndKey(db ethdb.Database, prefix []byte, endMinKey uint64) ([][]byte, error) { + startIter := db.NewIterator(prefix, uint64ToKey(1)) + if !startIter.Next() { + return nil, nil + } + startMinKey := binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) + startIter.Release() + if endMinKey > startMinKey { + return deleteFromRange(db, prefix, startMinKey, endMinKey-1) + } + return nil, nil +} diff --git a/arbnode/message_pruner_test.go b/arbnode/message_pruner_test.go new file mode 100644 index 0000000000..16c1d6b71c --- /dev/null +++ b/arbnode/message_pruner_test.go @@ -0,0 +1,109 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestMessagePrunerWithPruningEligibleMessagePresent(t *testing.T) { + endBatchCount := uint64(2 * 100 * 1024) + endBatchMetadata := BatchMetadata{ + MessageCount: 2 * 100 * 1024, + DelayedMessageCount: 2 * 100 * 1024, + } + inboxTrackerDb, transactionStreamerDb := setupDatabase(t, endBatchCount, endBatchMetadata) + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, inboxTrackerDb, transactionStreamerDb) + + checkDbKeys(t, endBatchCount, inboxTrackerDb, sequencerBatchMetaPrefix) + checkDbKeys(t, uint64(endBatchMetadata.MessageCount), transactionStreamerDb, messagePrefix) + checkDbKeys(t, endBatchMetadata.DelayedMessageCount, inboxTrackerDb, rlpDelayedMessagePrefix) + +} + +func TestMessagePrunerTraverseEachMessageOnlyOnce(t *testing.T) { + endBatchCount := uint64(10) + endBatchMetadata := BatchMetadata{} + inboxTrackerDb, transactionStreamerDb := setupDatabase(t, endBatchCount, endBatchMetadata) + // In first iteration message till endBatchCount are tried to be deleted. + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, inboxTrackerDb, transactionStreamerDb) + // In first iteration all the message till endBatchCount are deleted. + checkDbKeys(t, endBatchCount, inboxTrackerDb, sequencerBatchMetaPrefix) + // After first iteration endBatchCount/2 is reinserted in inbox db + err := inboxTrackerDb.Put(dbKey(sequencerBatchMetaPrefix, endBatchCount/2), []byte{}) + Require(t, err) + // In second iteration message till endBatchCount are again tried to be deleted. + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, inboxTrackerDb, transactionStreamerDb) + // In second iteration all the message till endBatchCount are deleted again. + checkDbKeys(t, endBatchCount, inboxTrackerDb, sequencerBatchMetaPrefix) +} + +func TestMessagePrunerPruneTillLessThenEqualTo(t *testing.T) { + endBatchCount := uint64(10) + endBatchMetadata := BatchMetadata{} + inboxTrackerDb, transactionStreamerDb := setupDatabase(t, 2*endBatchCount, endBatchMetadata) + err := inboxTrackerDb.Delete(dbKey(sequencerBatchMetaPrefix, 9)) + Require(t, err) + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, inboxTrackerDb, transactionStreamerDb) + hasKey, err := inboxTrackerDb.Has(dbKey(sequencerBatchMetaPrefix, 10)) + Require(t, err) + if !hasKey { + Fail(t, "Key", 10, "with prefix", string(sequencerBatchMetaPrefix), "should be present after pruning") + } +} + +func TestMessagePrunerWithNoPruningEligibleMessagePresent(t *testing.T) { + endBatchCount := uint64(2) + endBatchMetadata := BatchMetadata{ + MessageCount: 2, + DelayedMessageCount: 2, + } + inboxTrackerDb, transactionStreamerDb := setupDatabase(t, endBatchCount, endBatchMetadata) + deleteOldMessageFromDB(endBatchCount, endBatchMetadata, inboxTrackerDb, transactionStreamerDb) + + checkDbKeys(t, endBatchCount, inboxTrackerDb, sequencerBatchMetaPrefix) + checkDbKeys(t, uint64(endBatchMetadata.MessageCount), transactionStreamerDb, messagePrefix) + checkDbKeys(t, endBatchMetadata.DelayedMessageCount, inboxTrackerDb, rlpDelayedMessagePrefix) + +} + +func setupDatabase(t *testing.T, endBatchCount uint64, endBatchMetadata BatchMetadata) (ethdb.Database, ethdb.Database) { + inboxTrackerDb := rawdb.NewMemoryDatabase() + for i := uint64(0); i < endBatchCount; i++ { + err := inboxTrackerDb.Put(dbKey(sequencerBatchMetaPrefix, i), []byte{}) + Require(t, err) + } + + transactionStreamerDb := rawdb.NewMemoryDatabase() + for i := uint64(0); i < uint64(endBatchMetadata.MessageCount); i++ { + err := transactionStreamerDb.Put(dbKey(messagePrefix, i), []byte{}) + Require(t, err) + } + + for i := uint64(0); i < endBatchMetadata.DelayedMessageCount; i++ { + err := inboxTrackerDb.Put(dbKey(rlpDelayedMessagePrefix, i), []byte{}) + Require(t, err) + } + + return inboxTrackerDb, transactionStreamerDb +} + +func checkDbKeys(t *testing.T, endCount uint64, db ethdb.Database, prefix []byte) { + for i := uint64(0); i < endCount; i++ { + hasKey, err := db.Has(dbKey(prefix, i)) + Require(t, err) + if i == 0 || i == endCount-1 { + if !hasKey { + Fail(t, "Key", i, "with prefix", string(prefix), "should be present after pruning") + } + } else { + if hasKey { + Fail(t, "Key", i, "with prefix", string(prefix), "should not be present after pruning") + } + } + } +} diff --git a/arbnode/node.go b/arbnode/node.go index b56c898193..ebf1f07a31 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -6,11 +6,11 @@ package arbnode import ( "context" "encoding/binary" + "errors" "fmt" "math/big" "time" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -409,6 +409,7 @@ type Config struct { InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` + MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` ForwardingTargetImpl string `koanf:"forwarding-target"` Forwarder execution.ForwarderConfig `koanf:"forwarder"` TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` @@ -482,6 +483,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed InboxReaderConfigAddOptions(prefix+".inbox-reader", f) DelayedSequencerConfigAddOptions(prefix+".delayed-sequencer", f) BatchPosterConfigAddOptions(prefix+".batch-poster", f) + MessagePrunerConfigAddOptions(prefix+".message-pruner", f) f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) @@ -508,6 +510,7 @@ var ConfigDefault = Config{ InboxReader: DefaultInboxReaderConfig, DelayedSequencer: DefaultDelayedSequencerConfig, BatchPoster: DefaultBatchPosterConfig, + MessagePruner: DefaultMessagePrunerConfig, ForwardingTargetImpl: "", TxPreChecker: execution.DefaultTxPreCheckerConfig, BlockValidator: staker.DefaultBlockValidatorConfig, @@ -592,6 +595,7 @@ type Node struct { InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer BatchPoster *BatchPoster + MessagePruner *MessagePruner BlockValidator *staker.BlockValidator StatelessBlockValidator *staker.StatelessBlockValidator Staker *staker.Staker @@ -793,6 +797,7 @@ func createNodeImpl( nil, nil, nil, + nil, broadcastServer, broadcastClients, coordinator, @@ -874,9 +879,8 @@ func createNodeImpl( if err != nil { if config.ValidatorRequired() || config.Staker.Enable { return nil, fmt.Errorf("%w: failed to init block validator", err) - } else { - log.Warn("validation not supported", "err", err) } + log.Warn("validation not supported", "err", err) statelessBlockValidator = nil } @@ -954,6 +958,10 @@ func createNodeImpl( return nil, err } } + var messagePruner *MessagePruner + if config.MessagePruner.Enable && !config.Caching.Archive && stakerObj != nil { + messagePruner = NewMessagePruner(txStreamer, inboxTracker, stakerObj, func() *MessagePrunerConfig { return &configFetcher.Get().MessagePruner }) + } // always create DelayedSequencer, it won't do anything if it is disabled delayedSequencer, err = NewDelayedSequencer(l1Reader, inboxReader, exec.ExecEngine, coordinator, func() *DelayedSequencerConfig { return &configFetcher.Get().DelayedSequencer }) if err != nil { @@ -971,6 +979,7 @@ func createNodeImpl( inboxTracker, delayedSequencer, batchPoster, + messagePruner, blockValidator, statelessBlockValidator, stakerObj, @@ -1130,6 +1139,9 @@ func (n *Node) Start(ctx context.Context) error { if n.BatchPoster != nil { n.BatchPoster.Start(ctx) } + if n.MessagePruner != nil { + n.MessagePruner.Start(ctx) + } if n.Staker != nil { err = n.Staker.Initialize(ctx) if err != nil { @@ -1141,9 +1153,8 @@ func (n *Node) Start(ctx context.Context) error { if err != nil { if n.configFetcher.Get().ValidatorRequired() { return fmt.Errorf("error initializing stateless block validator: %w", err) - } else { - log.Info("validation not set up", "err", err) } + log.Info("validation not set up", "err", err) n.StatelessBlockValidator = nil n.BlockValidator = nil } @@ -1210,6 +1221,9 @@ func (n *Node) StopAndWait() { if n.BatchPoster != nil && n.BatchPoster.Started() { n.BatchPoster.StopAndWait() } + if n.MessagePruner != nil && n.MessagePruner.Started() { + n.MessagePruner.StopAndWait() + } if n.BroadcastServer != nil && n.BroadcastServer.Started() { n.BroadcastServer.StopAndWait() } diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 23d8e3fd71..ecb38129ac 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -7,6 +7,7 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "fmt" "net/http" "sync" @@ -14,7 +15,6 @@ import ( "time" "github.com/go-redis/redis/v8" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index f93698b4c6..a08e5b5c5a 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -6,6 +6,7 @@ package arbnode import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -15,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbutil" - "github.com/pkg/errors" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -56,7 +56,7 @@ type SequencerInbox struct { func NewSequencerInbox(client arbutil.L1Interface, addr common.Address, fromBlock int64) (*SequencerInbox, error) { con, err := bridgegen.NewSequencerInbox(addr, client) if err != nil { - return nil, errors.WithStack(err) + return nil, err } return &SequencerInbox{ @@ -77,7 +77,7 @@ func (i *SequencerInbox) GetBatchCount(ctx context.Context, blockNumber *big.Int } count, err := i.con.BatchCount(opts) if err != nil { - return 0, errors.WithStack(err) + return 0, err } if !count.IsUint64() { return 0, errors.New("sequencer inbox returned non-uint64 batch count") @@ -91,7 +91,7 @@ func (i *SequencerInbox) GetAccumulator(ctx context.Context, sequenceNumber uint BlockNumber: blockNumber, } acc, err := i.con.InboxAccs(opts, new(big.Int).SetUint64(sequenceNumber)) - return acc, errors.WithStack(err) + return acc, err } type SequencerInboxBatch struct { @@ -119,7 +119,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut args := make(map[string]interface{}) err = addSequencerL2BatchFromOriginCallABI.Inputs.UnpackIntoMap(args, data[4:]) if err != nil { - return nil, errors.WithStack(err) + return nil, err } return args["data"].([]byte), nil case batchDataSeparateEvent: @@ -132,7 +132,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut } logs, err := client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if len(logs) == 0 { return nil, errors.New("expected to find sequencer batch data") @@ -143,7 +143,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut event := new(bridgegen.SequencerInboxSequencerBatchData) err = sequencerBridgeABI.UnpackIntoInterface(event, sequencerBatchDataEvent, logs[0].Data) if err != nil { - return nil, errors.WithStack(err) + return nil, err } return event.Data, nil case batchDataNone: @@ -195,7 +195,7 @@ func (i *SequencerInbox) LookupBatchesInRange(ctx context.Context, from, to *big } logs, err := i.client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } messages := make([]*SequencerInboxBatch, 0, len(logs)) var lastSeqNum *uint64 @@ -205,7 +205,7 @@ func (i *SequencerInbox) LookupBatchesInRange(ctx context.Context, from, to *big } parsedLog, err := i.con.ParseSequencerBatchDelivered(log) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if !parsedLog.BatchSequenceNumber.IsUint64() { return nil, errors.New("sequencer inbox event has non-uint64 sequence number") diff --git a/arbnode/simple_redis_lock.go b/arbnode/simple_redis_lock.go index 753bb70671..f6f37cc42d 100644 --- a/arbnode/simple_redis_lock.go +++ b/arbnode/simple_redis_lock.go @@ -3,6 +3,7 @@ package arbnode import ( "context" "crypto/rand" + "errors" "math" "math/big" "strconv" @@ -12,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/pkg/errors" flag "github.com/spf13/pflag" ) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 2cdb624200..a6a11b0b84 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -198,6 +198,40 @@ func deleteStartingAt(db ethdb.Database, batch ethdb.Batch, prefix []byte, minKe return iter.Error() } +// deleteFromRange deletes key ranging from startMinKey(inclusive) to endMinKey(exclusive) +func deleteFromRange(db ethdb.Database, prefix []byte, startMinKey uint64, endMinKey uint64) ([][]byte, error) { + batch := db.NewBatch() + startIter := db.NewIterator(prefix, uint64ToKey(startMinKey)) + defer startIter.Release() + var prunedKeysRange [][]byte + for startIter.Next() { + if binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) >= endMinKey { + break + } + if len(prunedKeysRange) == 0 || len(prunedKeysRange) == 1 { + prunedKeysRange = append(prunedKeysRange, startIter.Key()) + } else { + prunedKeysRange[1] = startIter.Key() + } + err := batch.Delete(startIter.Key()) + if err != nil { + return nil, err + } + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return nil, err + } + batch.Reset() + } + } + if batch.ValueSize() > 0 { + if err := batch.Write(); err != nil { + return nil, err + } + } + return prunedKeysRange, nil +} + // The insertion mutex must be held. This acquires the reorg mutex. // Note: oldMessages will be empty if reorgHook is nil func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata) error { @@ -556,45 +590,43 @@ func (s *TransactionStreamer) countDuplicateMessages( if !bytes.Equal(haveMessage, wantMessage) { // Current message does not exactly match message in database var dbMessageParsed arbostypes.MessageWithMetadata - err := rlp.DecodeBytes(haveMessage, &dbMessageParsed) - if err != nil { + + if err := rlp.DecodeBytes(haveMessage, &dbMessageParsed); err != nil { log.Warn("TransactionStreamer: Reorg detected! (failed parsing db message)", "pos", pos, "err", err, ) return curMsg, true, nil, nil - } else { - var duplicateMessage bool - if nextMessage.Message != nil { - if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { - // Remove both of the batch gas costs and see if the messages still differ - nextMessageCopy := nextMessage - nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) - *nextMessageCopy.Message = *nextMessage.Message - batchGasCostBkup := dbMessageParsed.Message.BatchGasCost - dbMessageParsed.Message.BatchGasCost = nil - nextMessageCopy.Message.BatchGasCost = nil - if reflect.DeepEqual(dbMessageParsed, nextMessageCopy) { - // Actually this isn't a reorg; only the batch gas costs differed - duplicateMessage = true - // If possible - update the message in the database to add the gas cost cache. - if batch != nil && nextMessage.Message.BatchGasCost != nil { - if *batch == nil { - *batch = s.db.NewBatch() - } - err = s.writeMessage(pos, nextMessage, *batch) - if err != nil { - return 0, false, nil, err - } + } + var duplicateMessage bool + if nextMessage.Message != nil { + if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { + // Remove both of the batch gas costs and see if the messages still differ + nextMessageCopy := nextMessage + nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) + *nextMessageCopy.Message = *nextMessage.Message + batchGasCostBkup := dbMessageParsed.Message.BatchGasCost + dbMessageParsed.Message.BatchGasCost = nil + nextMessageCopy.Message.BatchGasCost = nil + if reflect.DeepEqual(dbMessageParsed, nextMessageCopy) { + // Actually this isn't a reorg; only the batch gas costs differed + duplicateMessage = true + // If possible - update the message in the database to add the gas cost cache. + if batch != nil && nextMessage.Message.BatchGasCost != nil { + if *batch == nil { + *batch = s.db.NewBatch() + } + if err := s.writeMessage(pos, nextMessage, *batch); err != nil { + return 0, false, nil, err } } - dbMessageParsed.Message.BatchGasCost = batchGasCostBkup } + dbMessageParsed.Message.BatchGasCost = batchGasCostBkup } + } - if !duplicateMessage { - return curMsg, true, &dbMessageParsed, nil - } + if !duplicateMessage { + return curMsg, true, &dbMessageParsed, nil } } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index d2839615eb..220c2700f4 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -20,7 +20,7 @@ type AddressTable struct { } func Initialize(sto *storage.Storage) { - // no initialization needed + // No initialization needed. } func Open(sto *storage.Storage) *AddressTable { @@ -34,24 +34,22 @@ func (atab *AddressTable) Register(addr common.Address) (uint64, error) { if err != nil { return 0, err } - if rev == (common.Hash{}) { - // addr isn't in the table, so add it - newNumItems, err := atab.numItems.Increment() - if err != nil { - return 0, err - } - err = atab.backingStorage.SetByUint64(newNumItems, addrAsHash) - if err != nil { - return 0, err - } - err = atab.byAddress.Set(addrAsHash, util.UintToHash(newNumItems)) - if err != nil { - return 0, err - } - return newNumItems - 1, nil - } else { + + if rev != (common.Hash{}) { return rev.Big().Uint64() - 1, nil } + // Addr isn't in the table, so add it. + newNumItems, err := atab.numItems.Increment() + if err != nil { + return 0, err + } + if err := atab.backingStorage.SetByUint64(newNumItems, addrAsHash); err != nil { + return 0, err + } + if err := atab.byAddress.Set(addrAsHash, util.UintToHash(newNumItems)); err != nil { + return 0, err + } + return newNumItems - 1, nil } func (atab *AddressTable) Lookup(addr common.Address) (uint64, bool, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 333ad464ac..2bea8f7c54 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -4,7 +4,6 @@ package arbosState import ( - "encoding/json" "errors" "fmt" "math/big" @@ -19,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/addressSet" "github.com/offchainlabs/nitro/arbos/addressTable" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/blockhash" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/l1pricing" @@ -112,11 +112,7 @@ func NewArbosMemoryBackedArbOSState() (*ArbosState, *state.StateDB) { } burner := burn.NewSystemBurner(nil, false) chainConfig := params.ArbitrumDevTestChainConfig() - serializedChainConfig, err := json.Marshal(chainConfig) - if err != nil { - log.Crit("failed to serialize chain config", "error", err) - } - newState, err := InitializeArbosState(statedb, burner, chainConfig, serializedChainConfig) + newState, err := InitializeArbosState(statedb, burner, chainConfig, arbostypes.TestInitMessage) if err != nil { log.Crit("failed to open the ArbOS state", "error", err) } @@ -183,7 +179,7 @@ func getArbitrumOnlyGenesisPrecompiles(chainConfig *params.ChainConfig) []common // start running long-lived chains, every change to the storage format will require defining a new version and // providing upgrade code. -func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *params.ChainConfig, serializedChainConfig []byte) (*ArbosState, error) { +func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage) (*ArbosState, error) { sto := storage.NewGeth(stateDB, burner) arbosVersion, err := sto.GetUint64ByUint64(uint64(versionOffset)) if err != nil { @@ -217,14 +213,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p } _ = sto.SetByUint64(uint64(chainIdOffset), common.BigToHash(chainConfig.ChainID)) chainConfigStorage := sto.OpenStorageBackedBytes(chainConfigSubspace) - _ = chainConfigStorage.Set(serializedChainConfig) + _ = chainConfigStorage.Set(initMessage.SerializedChainConfig) _ = sto.SetUint64ByUint64(uint64(genesisBlockNumOffset), chainConfig.ArbitrumChainParams.GenesisBlockNum) initialRewardsRecipient := l1pricing.BatchPosterAddress if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient) + _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go index 14ac8afa99..968f533e3e 100644 --- a/arbos/arbosState/initialization_test.go +++ b/arbos/arbosState/initialization_test.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" @@ -60,11 +60,7 @@ func tryMarshalUnmarshal(input *statetransfer.ArbosInitializationInfo, t *testin initReader := statetransfer.NewMemoryInitDataReader(&initData) chainConfig := params.ArbitrumDevTestChainConfig() - serializedChainConfig, err := json.Marshal(chainConfig) - if err != nil { - log.Crit("failed to serialize chain config", "error", err) - } - stateroot, err := InitializeArbosInDatabase(raw, initReader, chainConfig, serializedChainConfig, 0, 0) + stateroot, err := InitializeArbosInDatabase(raw, initReader, chainConfig, arbostypes.TestInitMessage, 0, 0) Require(t, err) stateDb, err := state.New(stateroot, state.NewDatabase(raw), nil) diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index d30507ee81..e98ab08485 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbos/retryables" @@ -49,7 +50,7 @@ func MakeGenesisBlock(parentHash common.Hash, blockNumber uint64, timestamp uint return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) } -func InitializeArbosInDatabase(db ethdb.Database, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, serializedChainConfig []byte, timestamp uint64, accountsPerSync uint) (common.Hash, error) { +func InitializeArbosInDatabase(db ethdb.Database, initData statetransfer.InitDataReader, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, timestamp uint64, accountsPerSync uint) (common.Hash, error) { stateDatabase := state.NewDatabase(db) statedb, err := state.New(common.Hash{}, stateDatabase, nil) if err != nil { @@ -73,7 +74,7 @@ func InitializeArbosInDatabase(db ethdb.Database, initData statetransfer.InitDat } burner := burn.NewSystemBurner(nil, false) - arbosState, err := InitializeArbosState(statedb, burner, chainConfig, serializedChainConfig) + arbosState, err := InitializeArbosState(statedb, burner, chainConfig, initMessage) if err != nil { log.Crit("failed to open the ArbOS state", "error", err) } diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go index 977299ce4e..58186b7108 100644 --- a/arbos/arbostypes/incomingmessage.go +++ b/arbos/arbostypes/incomingmessage.go @@ -234,29 +234,60 @@ func ParseIncomingL1Message(rd io.Reader, batchFetcher FallibleBatchFetcher) (*L type FallibleBatchFetcher func(batchNum uint64) ([]byte, error) +type ParsedInitMessage struct { + ChainId *big.Int + InitialL1BaseFee *big.Int + + // These may be nil + ChainConfig *params.ChainConfig + SerializedChainConfig []byte +} + +// The initial L1 pricing basefee starts at 50 GWei unless set in the init message +var DefaultInitialL1BaseFee = big.NewInt(50 * params.GWei) + +var TestInitMessage = &ParsedInitMessage{ + ChainId: params.ArbitrumDevTestChainConfig().ChainID, + InitialL1BaseFee: DefaultInitialL1BaseFee, +} + // ParseInitMessage returns the chain id on success -func (msg *L1IncomingMessage) ParseInitMessage() (*big.Int, *params.ChainConfig, []byte, error) { +func (msg *L1IncomingMessage) ParseInitMessage() (*ParsedInitMessage, error) { if msg.Header.Kind != L1MessageType_Initialize { - return nil, nil, nil, fmt.Errorf("invalid init message kind %v", msg.Header.Kind) + return nil, fmt.Errorf("invalid init message kind %v", msg.Header.Kind) } + basefee := new(big.Int).Set(DefaultInitialL1BaseFee) var chainConfig params.ChainConfig var chainId *big.Int if len(msg.L2msg) == 32 { chainId = new(big.Int).SetBytes(msg.L2msg[:32]) - return chainId, nil, nil, nil - } else if len(msg.L2msg) > 32 { + return &ParsedInitMessage{chainId, basefee, nil, nil}, nil + } + if len(msg.L2msg) > 32 { chainId = new(big.Int).SetBytes(msg.L2msg[:32]) version := msg.L2msg[32] - if version == 0 && len(msg.L2msg) > 33 { - serializedChainConfig := msg.L2msg[33:] - err := json.Unmarshal(serializedChainConfig, &chainConfig) + reader := bytes.NewReader(msg.L2msg[33:]) + switch version { + case 1: + var err error + basefee, err = util.Uint256FromReader(reader) + if err != nil { + return nil, err + } + fallthrough + case 0: + serializedChainConfig, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + err = json.Unmarshal(serializedChainConfig, &chainConfig) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to parse init message, err: %w, message data: %v", err, string(msg.L2msg)) + return nil, fmt.Errorf("failed to parse init message, err: %w, message data: %v", err, string(msg.L2msg)) } - return chainId, &chainConfig, serializedChainConfig, nil + return &ParsedInitMessage{chainId, basefee, &chainConfig, serializedChainConfig}, nil } } - return nil, nil, nil, fmt.Errorf("invalid init message data %v", string(msg.L2msg)) + return nil, fmt.Errorf("invalid init message data %v", string(msg.L2msg)) } func ParseBatchPostingReportMessageFields(rd io.Reader) (*big.Int, common.Address, common.Hash, uint64, *big.Int, error) { diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index 0cbe693911..a3d4f5e3c3 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -3,12 +3,12 @@ package arbostypes import ( "context" "encoding/binary" + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbutil" - "github.com/pkg/errors" ) var uniquifyingPrefix = []byte("Arbitrum Nitro Feed:") @@ -35,7 +35,7 @@ func (m *MessageWithMetadata) Hash(sequenceNumber arbutil.MessageIndex, chainId serializedMessage, err := rlp.EncodeToBytes(m.Message) if err != nil { - return common.Hash{}, errors.Wrapf(err, "unable to serialize message %v", sequenceNumber) + return common.Hash{}, fmt.Errorf("unable to serialize message %v: %w", sequenceNumber, err) } return crypto.Keccak256Hash(uniquifyingPrefix, serializedExtraData, serializedMessage), nil diff --git a/arbos/block_processor.go b/arbos/block_processor.go index c84aa7a4b9..9f208c4404 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -78,7 +78,7 @@ func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState copy(extra, prevHeader.Extra) mixDigest = prevHeader.MixDigest } - return &types.Header{ + header := &types.Header{ ParentHash: lastBlockHash, UncleHash: types.EmptyUncleHash, // Post-merge Ethereum will require this to be types.EmptyUncleHash Coinbase: coinbase, @@ -96,6 +96,7 @@ func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState Nonce: [8]byte{}, // Filled in later; post-merge Ethereum will require this to be zero BaseFee: baseFee, } + return header } type ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions @@ -323,6 +324,18 @@ func ProduceBlockAdvanced( return receipt, result, nil })() + if tx.Type() == types.ArbitrumInternalTxType { + // ArbOS might have upgraded to a new version, so we need to refresh our state + state, err = arbosState.OpenSystemArbosState(statedb, nil, true) + if err != nil { + return nil, nil, err + } + // Update the ArbOS version in the header (if it changed) + extraInfo := types.DeserializeHeaderExtraInformation(header) + extraInfo.ArbOSFormatVersion = state.ArbOSVersion() + extraInfo.UpdateHeaderWithInfo(header) + } + // append the err, even if it is nil hooks.TxErrors = append(hooks.TxErrors, err) @@ -443,10 +456,9 @@ func ProduceBlockAdvanced( // Fail if funds have been minted or debug mode is enabled (i.e. this is a test) if balanceDelta.Cmp(expectedBalanceDelta) > 0 || chainConfig.DebugMode() { return nil, nil, fmt.Errorf("unexpected total balance delta %v (expected %v)", balanceDelta, expectedBalanceDelta) - } else { - // This is a real chain and funds were burnt, not minted, so only log an error and don't panic - log.Error("Unexpected total balance delta", "delta", balanceDelta, "expected", expectedBalanceDelta) } + // This is a real chain and funds were burnt, not minted, so only log an error and don't panic + log.Error("Unexpected total balance delta", "delta", balanceDelta, "expected", expectedBalanceDelta) } return block, receipts, nil diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 6dc0bd4a46..9772ac028b 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -74,7 +74,6 @@ const ( const ( InitialInertia = 10 InitialPerUnitReward = 10 - InitialPricePerUnitWei = 50 * params.GWei InitialPerBatchGasCostV6 = 100000 ) @@ -82,7 +81,7 @@ const ( var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGasEIP2028 * 100000) var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) -func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address) error { +func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { bptStorage := sto.OpenSubStorage(BatchPosterTableKey) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err @@ -109,7 +108,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm return err } pricePerUnit := sto.OpenStorageBackedBigInt(pricePerUnitOffset) - if err := pricePerUnit.SetByUint(InitialPricePerUnitWei); err != nil { + if err := pricePerUnit.SetSaturatingWithWarning(initialL1BaseFee, "initial L1 base fee (storing in price per unit)"); err != nil { return err } return nil @@ -529,22 +528,22 @@ var randS = crypto.Keccak256Hash([]byte("S")).Big() // The returned tx will be invalid, likely for a number of reasons such as an invalid signature. // It's only used to check how large it is after brotli level 0 compression. -func makeFakeTxForMessage(message core.Message) *types.Transaction { - nonce := message.Nonce() +func makeFakeTxForMessage(message *core.Message) *types.Transaction { + nonce := message.Nonce if nonce == 0 { nonce = randomNonce } - gasTipCap := message.GasTipCap() + gasTipCap := message.GasTipCap if gasTipCap.Sign() == 0 { gasTipCap = randomGasTipCap } - gasFeeCap := message.GasFeeCap() + gasFeeCap := message.GasFeeCap if gasFeeCap.Sign() == 0 { gasFeeCap = randomGasFeeCap } // During gas estimation, we don't want the gas limit variability to change the L1 cost. - gas := message.Gas() - if gas == 0 || message.RunMode() == types.MessageGasEstimationMode { + gas := message.GasLimit + if gas == 0 || message.TxRunMode == core.MessageGasEstimationMode { gas = RandomGas } return types.NewTx(&types.DynamicFeeTx{ @@ -552,18 +551,18 @@ func makeFakeTxForMessage(message core.Message) *types.Transaction { GasTipCap: gasTipCap, GasFeeCap: gasFeeCap, Gas: gas, - To: message.To(), - Value: message.Value(), - Data: message.Data(), - AccessList: message.AccessList(), + To: message.To, + Value: message.Value, + Data: message.Data, + AccessList: message.AccessList, V: randV, R: randR, S: randS, }) } -func (ps *L1PricingState) PosterDataCost(message core.Message, poster common.Address) (*big.Int, uint64) { - tx := message.UnderlyingTransaction() +func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Address) (*big.Int, uint64) { + tx := message.Tx if tx != nil { return ps.GetPosterInfo(tx, poster) } diff --git a/arbos/l1pricing/l1pricing_test.go b/arbos/l1pricing/l1pricing_test.go index ec0ecc275e..b301c94257 100644 --- a/arbos/l1pricing/l1pricing_test.go +++ b/arbos/l1pricing/l1pricing_test.go @@ -4,18 +4,19 @@ package l1pricing import ( + "math/big" "testing" - am "github.com/offchainlabs/nitro/util/arbmath" - "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/storage" ) func TestL1PriceUpdate(t *testing.T) { sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) - err := InitializeL1PricingState(sto, common.Address{}) + initialPriceEstimate := big.NewInt(123 * params.GWei) + err := InitializeL1PricingState(sto, common.Address{}, initialPriceEstimate) Require(t, err) ps := OpenL1PricingState(sto) @@ -25,7 +26,6 @@ func TestL1PriceUpdate(t *testing.T) { Fail(t) } - initialPriceEstimate := am.UintToBig(InitialPricePerUnitWei) priceEstimate, err := ps.PricePerUnit() Require(t, err) if priceEstimate.Cmp(initialPriceEstimate) != 0 { diff --git a/arbos/merkleAccumulator/merkleAccumulator.go b/arbos/merkleAccumulator/merkleAccumulator.go index 3d117a0ae3..2e060c5840 100644 --- a/arbos/merkleAccumulator/merkleAccumulator.go +++ b/arbos/merkleAccumulator/merkleAccumulator.go @@ -67,17 +67,15 @@ func (acc *MerkleAccumulator) NonPersistentClone() (*MerkleAccumulator, error) { func (acc *MerkleAccumulator) Keccak(data ...[]byte) ([]byte, error) { if acc.backingStorage != nil { return acc.backingStorage.Keccak(data...) - } else { - return crypto.Keccak256(data...), nil } + return crypto.Keccak256(data...), nil } func (acc *MerkleAccumulator) KeccakHash(data ...[]byte) (common.Hash, error) { if acc.backingStorage != nil { return acc.backingStorage.KeccakHash(data...) - } else { - return crypto.Keccak256Hash(data...), nil } + return crypto.Keccak256Hash(data...), nil } func (acc *MerkleAccumulator) getPartial(level uint64) (*common.Hash, error) { @@ -87,10 +85,9 @@ func (acc *MerkleAccumulator) getPartial(level uint64) (*common.Hash, error) { acc.partials[level] = &h } return acc.partials[level], nil - } else { - ret, err := acc.backingStorage.GetByUint64(2 + level) - return &ret, err } + ret, err := acc.backingStorage.GetByUint64(2 + level) + return &ret, err } func (acc *MerkleAccumulator) GetPartials() ([]*common.Hash, error) { diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 39ba426f76..478ad68f8f 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -461,7 +461,8 @@ func (sbbu *StorageBackedBigUint) Get() (*big.Int, error) { func (sbbu *StorageBackedBigUint) SetChecked(val *big.Int) error { if val.Sign() < 0 { return sbbu.burner.HandleError(fmt.Errorf("underflow in StorageBackedBigUint.Set setting value %v", val)) - } else if val.BitLen() > 256 { + } + if val.BitLen() > 256 { return sbbu.burner.HandleError(fmt.Errorf("overflow in StorageBackedBigUint.Set setting value %v", val)) } return sbbu.StorageSlot.Set(common.BytesToHash(val.Bytes())) @@ -579,9 +580,8 @@ func (sba *StorageBackedAddressOrNil) Get() (*common.Address, error) { func (sba *StorageBackedAddressOrNil) Set(val *common.Address) error { if val == nil { return sba.StorageSlot.Set(NilAddressRepresentation) - } else { - return sba.StorageSlot.Set(common.BytesToHash(val.Bytes())) } + return sba.StorageSlot.Set(common.BytesToHash(val.Bytes())) } type StorageBackedBytes struct { diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index cba9fa22ab..d0f999d0de 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -35,7 +35,7 @@ const GasEstimationL1PricePadding arbmath.Bips = 11000 // pad estimates by 10% // It tracks state for ArbOS, allowing it infuence in Geth's tx processing. // Public fields are accessible in precompiles. type TxProcessor struct { - msg core.Message + msg *core.Message state *arbosState.ArbosState PosterFee *big.Int // set once in GasChargingHook to track L1 calldata costs posterGas uint64 @@ -53,8 +53,8 @@ type TxProcessor struct { cachedL1BlockHashes map[uint64]common.Hash } -func NewTxProcessor(evm *vm.EVM, msg core.Message) *TxProcessor { - tracingInfo := util.NewTracingInfo(evm, msg.From(), arbosAddress, util.TracingBeforeEVM) +func NewTxProcessor(evm *vm.EVM, msg *core.Message) *TxProcessor { + tracingInfo := util.NewTracingInfo(evm, msg.From, arbosAddress, util.TracingBeforeEVM) arbosState := arbosState.OpenSystemArbosStateOrPanic(evm.StateDB, tracingInfo, false) return &TxProcessor{ msg: msg, @@ -90,17 +90,16 @@ func takeFunds(pool *big.Int, take *big.Int) *big.Int { oldPool := new(big.Int).Set(pool) pool.Set(common.Big0) return oldPool - } else { - pool.Sub(pool, take) - return new(big.Int).Set(take) } + pool.Sub(pool, take) + return new(big.Int).Set(take) } func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, returnData []byte) { // This hook is called before gas charging and will end the state transition if endTxNow is set to true // Hence, we must charge for any l2 resources if endTxNow is returned true - underlyingTx := p.msg.UnderlyingTransaction() + underlyingTx := p.msg.Tx if underlyingTx == nil { return false, 0, nil, nil } @@ -116,26 +115,26 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } evm.IncrementDepth() // fake a call tracer := evm.Config.Tracer - from := p.msg.From() - tracer.CaptureStart(evm, from, *p.msg.To(), false, p.msg.Data(), p.msg.Gas(), p.msg.Value()) + from := p.msg.From + tracer.CaptureStart(evm, from, *p.msg.To, false, p.msg.Data, p.msg.GasLimit, p.msg.Value) - tracingInfo = util.NewTracingInfo(evm, from, *p.msg.To(), util.TracingDuringEVM) + tracingInfo = util.NewTracingInfo(evm, from, *p.msg.To, util.TracingDuringEVM) p.state = arbosState.OpenSystemArbosStateOrPanic(evm.StateDB, tracingInfo, false) return func() { tracer.CaptureEnd(nil, p.state.Burner.Burned(), nil) evm.DecrementDepth() // fake the return to the first faked call - tracingInfo = util.NewTracingInfo(evm, from, *p.msg.To(), util.TracingAfterEVM) + tracingInfo = util.NewTracingInfo(evm, from, *p.msg.To, util.TracingAfterEVM) p.state = arbosState.OpenSystemArbosStateOrPanic(evm.StateDB, tracingInfo, false) } } switch tx := underlyingTx.GetInner().(type) { case *types.ArbitrumDepositTx: - from := p.msg.From() - to := p.msg.To() - value := p.msg.Value() + from := p.msg.From + to := p.msg.To + value := p.msg.Value if to == nil { return true, 0, errors.New("eth deposit has no To address"), nil } @@ -148,7 +147,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r return true, 0, nil, nil case *types.ArbitrumInternalTx: defer (startTracer())() - if p.msg.From() != arbosAddress { + if p.msg.From != arbosAddress { return false, 0, errors.New("internal tx not from arbAddress"), nil } err = ApplyInternalTxUpdate(tx, p.state, evm) @@ -247,11 +246,11 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r balance := statedb.GetBalance(tx.From) basefee := evm.Context.BaseFee - usergas := p.msg.Gas() + usergas := p.msg.GasLimit maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, basefee) - if p.msg.RunMode() == types.MessageGasEstimationMode && tx.GasFeeCap.BitLen() == 0 { + if p.msg.TxRunMode == core.MessageGasEstimationMode && tx.GasFeeCap.BitLen() == 0 { // In gas estimation mode, we permit a zero gas fee cap. // This matches behavior with normal tx gas estimation. maxFeePerGasTooLow = false @@ -349,8 +348,8 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r return false, 0, nil, nil } -func GetPosterGas(state *arbosState.ArbosState, baseFee *big.Int, runMode types.MessageRunMode, posterCost *big.Int) uint64 { - if runMode == types.MessageGasEstimationMode { +func GetPosterGas(state *arbosState.ArbosState, baseFee *big.Int, runMode core.MessageRunMode, posterCost *big.Int) uint64 { + if runMode == core.MessageGasEstimationMode { // Suggest the amount of gas needed for a given amount of ETH is higher in case of congestion. // This will help the user pad the total they'll pay in case the price rises a bit. // Note, reducing the poster cost will increase share the network fee gets, not reduce the total. @@ -380,7 +379,7 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err basefee := p.evm.Context.BaseFee var poster common.Address - if p.msg.RunMode() != types.MessageCommitMode { + if p.msg.TxRunMode != core.MessageCommitMode { poster = l1pricing.BatchPosterAddress } else { poster = p.evm.Context.Coinbase @@ -394,7 +393,7 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err if calldataUnits > 0 { p.state.Restrict(p.state.L1PricingState().AddToUnitsSinceUpdate(calldataUnits)) } - p.posterGas = GetPosterGas(p.state, basefee, p.msg.RunMode(), posterCost) + p.posterGas = GetPosterGas(p.state, basefee, p.msg.TxRunMode, posterCost) p.PosterFee = arbmath.BigMulByUint(basefee, p.posterGas) // round down gasNeededToStartEVM = p.posterGas } @@ -405,7 +404,7 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err } *gasRemaining -= gasNeededToStartEVM - if p.msg.RunMode() != types.MessageEthcallMode { + if p.msg.TxRunMode != core.MessageEthcallMode { // If this is a real tx, limit the amount of computed based on the gas pool. // We do this by charging extra gas, and then refunding it later. gasAvailable, _ := p.state.L2PricingState().PerBlockGasLimit() @@ -430,15 +429,15 @@ func (p *TxProcessor) ForceRefundGas() uint64 { func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { - underlyingTx := p.msg.UnderlyingTransaction() + underlyingTx := p.msg.Tx networkFeeAccount, _ := p.state.NetworkFeeAccount() basefee := p.evm.Context.BaseFee scenario := util.TracingAfterEVM - if gasLeft > p.msg.Gas() { + if gasLeft > p.msg.GasLimit { panic("Tx somehow refunds gas after computation") } - gasUsed := p.msg.Gas() - gasLeft + gasUsed := p.msg.GasLimit - gasLeft if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) @@ -485,7 +484,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if success { // we don't want to charge for this - tracingInfo := util.NewTracingInfo(p.evm, arbosAddress, p.msg.From(), scenario) + tracingInfo := util.NewTracingInfo(p.evm, arbosAddress, p.msg.From, scenario) state := arbosState.OpenSystemArbosStateOrPanic(p.evm.StateDB, tracingInfo, false) _, _ = state.RetryableState().DeleteRetryable(inner.TicketId, p.evm, scenario) } else { @@ -544,7 +543,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { } } - if p.msg.GasPrice().Sign() > 0 { // in tests, gas price could be 0 + if p.msg.GasPrice.Sign() > 0 { // in tests, gas price could be 0 // ArbOS's gas pool is meant to enforce the computational speed-limit. // We don't want to remove from the pool the poster's L1 costs (as expressed in L2 gas in this func) // Hence, we deduct the previously saved poster L2-gas-equivalent to reveal the compute-only gas @@ -603,7 +602,7 @@ func (p *TxProcessor) L1BlockNumber(blockCtx vm.BlockContext) (uint64, error) { if p.cachedL1BlockNumber != nil { return *p.cachedL1BlockNumber, nil } - tracingInfo := util.NewTracingInfo(p.evm, p.msg.From(), arbosAddress, util.TracingDuringEVM) + tracingInfo := util.NewTracingInfo(p.evm, p.msg.From, arbosAddress, util.TracingDuringEVM) state, err := arbosState.OpenSystemArbosState(p.evm.StateDB, tracingInfo, false) if err != nil { return 0, err @@ -621,7 +620,7 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlockNumber uint64 if cached { return hash, nil } - tracingInfo := util.NewTracingInfo(p.evm, p.msg.From(), arbosAddress, util.TracingDuringEVM) + tracingInfo := util.NewTracingInfo(p.evm, p.msg.From, arbosAddress, util.TracingDuringEVM) state, err := arbosState.OpenSystemArbosState(p.evm.StateDB, tracingInfo, false) if err != nil { return common.Hash{}, err @@ -644,7 +643,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { version := p.state.ArbOSVersion() if version != 9 { gasPrice = p.evm.Context.BaseFee - if p.msg.RunMode() != types.MessageCommitMode && p.msg.GasFeeCap().Sign() == 0 { + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.Sign() == 0 { gasPrice.SetInt64(0) // gasprice zero behavior } } @@ -666,6 +665,6 @@ func (p *TxProcessor) MsgIsNonMutating() bool { if p.msg == nil { return false } - mode := p.msg.RunMode() - return mode == types.MessageGasEstimationMode || mode == types.MessageEthcallMode + mode := p.msg.TxRunMode + return mode == core.MessageGasEstimationMode || mode == core.MessageEthcallMode } diff --git a/arbos/util/util.go b/arbos/util/util.go index 1514d6d10d..4c0142aeb9 100644 --- a/arbos/util/util.go +++ b/arbos/util/util.go @@ -108,6 +108,14 @@ func HashFromReader(rd io.Reader) (common.Hash, error) { return common.BytesToHash(buf), nil } +func Uint256FromReader(rd io.Reader) (*big.Int, error) { + asHash, err := HashFromReader(rd) + if err != nil { + return nil, err + } + return asHash.Big(), nil +} + func HashToWriter(val common.Hash, wr io.Writer) error { _, err := wr.Write(val.Bytes()) return err diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b0368c56cc..80d40322c9 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -7,11 +7,10 @@ import ( "bytes" "context" "encoding/binary" + "errors" "io" "math/big" - "github.com/pkg/errors" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" diff --git a/arbutil/correspondingl1blocknumber.go b/arbutil/correspondingl1blocknumber.go index 806bffae6e..136eb8e4c9 100644 --- a/arbutil/correspondingl1blocknumber.go +++ b/arbutil/correspondingl1blocknumber.go @@ -11,15 +11,18 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, blockNumber uint64) (uint64, error) { - header, err := client.HeaderByNumber(ctx, big.NewInt(int64(blockNumber))) - if err != nil { - return 0, fmt.Errorf("error getting L1 block number %d header : %w", blockNumber, err) - } +func ParentHeaderToL1BlockNumber(header *types.Header) uint64 { headerInfo := types.DeserializeHeaderExtraInformation(header) - if headerInfo.L1BlockNumber != 0 { - return headerInfo.L1BlockNumber, nil - } else { - return blockNumber, nil + if headerInfo.ArbOSFormatVersion > 0 { + return headerInfo.L1BlockNumber + } + return header.Number.Uint64() +} + +func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, parentBlockNumber uint64) (uint64, error) { + header, err := client.HeaderByNumber(ctx, big.NewInt(int64(parentBlockNumber))) + if err != nil { + return 0, fmt.Errorf("error getting L1 block number %d header : %w", parentBlockNumber, err) } + return ParentHeaderToL1BlockNumber(header), nil } diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go index be601a913f..7741af6e9b 100644 --- a/arbutil/transaction_data.go +++ b/arbutil/transaction_data.go @@ -8,14 +8,13 @@ import ( "fmt" "github.com/ethereum/go-ethereum/core/types" - "github.com/pkg/errors" ) // GetLogEmitterTxData requires that the tx's data is at least 4 bytes long func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if tx.Hash() != log.TxHash { return nil, fmt.Errorf("L1 client returned unexpected transaction hash %v when looking up block %v transaction %v with expected hash %v", tx.Hash(), log.BlockHash, log.TxIndex, log.TxHash) diff --git a/blsSignatures/blsSignatures.go b/blsSignatures/blsSignatures.go index f177c4c3ac..b597d6a07e 100644 --- a/blsSignatures/blsSignatures.go +++ b/blsSignatures/blsSignatures.go @@ -194,14 +194,13 @@ func PublicKeyToBytes(pub PublicKey) []byte { g2 := bls12381.NewG2() if pub.validityProof == nil { return append([]byte{0}, g2.ToBytes(pub.key)...) - } else { - keyBytes := g2.ToBytes(pub.key) - sigBytes := SignatureToBytes(pub.validityProof) - if len(sigBytes) > 255 { - panic("validity proof too large to serialize") - } - return append(append([]byte{byte(len(sigBytes))}, sigBytes...), keyBytes...) } + keyBytes := g2.ToBytes(pub.key) + sigBytes := SignatureToBytes(pub.validityProof) + if len(sigBytes) > 255 { + panic("validity proof too large to serialize") + } + return append(append([]byte{byte(len(sigBytes))}, sigBytes...), keyBytes...) } func PublicKeyFromBytes(in []byte, trustedSource bool) (PublicKey, error) { diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index fc9b268d10..f78ef2aa9f 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -7,6 +7,8 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" + "fmt" "io" "net" "net/http" @@ -19,7 +21,6 @@ import ( "github.com/gobwas/httphead" "github.com/gobwas/ws" "github.com/gobwas/ws/wsflate" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" @@ -287,19 +288,19 @@ func (bc *BroadcastClient) connect(ctx context.Context, nextSeqNum arbutil.Messa return nil, err } if err != nil { - return nil, errors.Wrap(err, "broadcast client unable to connect") + return nil, fmt.Errorf("broadcast client unable to connect: %w", err) } if config.RequireChainId && !foundChainId { err := conn.Close() if err != nil { - return nil, errors.Wrap(err, "error closing connection when missing chain id") + return nil, fmt.Errorf("error closing connection when missing chain id: %w", err) } return nil, ErrMissingChainId } if config.RequireFeedVersion && !foundFeedServerVersion { err := conn.Close() if err != nil { - return nil, errors.Wrap(err, "error closing connection when missing feed server version") + return nil, fmt.Errorf("error closing connection when missing feed server version: %w", err) } return nil, ErrMissingFeedServerVersion } @@ -407,7 +408,7 @@ func (bc *BroadcastClient) startBackgroundReader(earlyFrameData io.Reader) { err := bc.isValidSignature(ctx, message) if err != nil { log.Error("error validating feed signature", "error", err, "sequence number", message.SequenceNumber) - bc.fatalErrChan <- errors.Wrapf(err, "error validating feed signature %v", message.SequenceNumber) + bc.fatalErrChan <- fmt.Errorf("error validating feed signature %v: %w", message.SequenceNumber, err) continue } @@ -485,7 +486,7 @@ func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *broadc } hash, err := message.Hash(bc.chainId) if err != nil { - return errors.Wrapf(err, "error getting message hash for sequence number %v", message.SequenceNumber) + return fmt.Errorf("error getting message hash for sequence number %v: %w", message.SequenceNumber, err) } return bc.sigVerifier.VerifyHash(ctx, message.Signature, hash) } diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index 8ef18b9722..13eb928c46 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -41,9 +41,8 @@ func GetChainConfig(chainId *big.Int, chainName string, genesisBlockNum uint64, } if chainId.Uint64() != 0 { return nil, fmt.Errorf("missing chain config for L2 chain ID %v", chainId) - } else { - return nil, fmt.Errorf("missing chain config for L2 chain name %v", chainName) } + return nil, fmt.Errorf("missing chain config for L2 chain name %v", chainName) } func GetRollupAddressesConfig(chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string) (RollupAddresses, error) { @@ -56,9 +55,8 @@ func GetRollupAddressesConfig(chainId uint64, chainName string, l2ChainInfoFiles } if chainId != 0 { return RollupAddresses{}, fmt.Errorf("missing rollup addresses for L2 chain ID %v", chainId) - } else { - return RollupAddresses{}, fmt.Errorf("missing rollup addresses for L2 chain name %v", chainName) } + return RollupAddresses{}, fmt.Errorf("missing rollup addresses for L2 chain name %v", chainName) } func ProcessChainInfo(chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string) (*ChainInfo, error) { @@ -85,9 +83,8 @@ func ProcessChainInfo(chainId uint64, chainName string, l2ChainInfoFiles []strin } if chainId != 0 { return nil, fmt.Errorf("unsupported chain ID %v", chainId) - } else { - return nil, fmt.Errorf("unsupported chain name %v", chainName) } + return nil, fmt.Errorf("unsupported chain name %v", chainName) } func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*ChainInfo, error) { diff --git a/cmd/genericconf/config.go b/cmd/genericconf/config.go index 88825e9ea9..8e75b61772 100644 --- a/cmd/genericconf/config.go +++ b/cmd/genericconf/config.go @@ -4,11 +4,11 @@ package genericconf import ( + "errors" "time" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/pkg/errors" flag "github.com/spf13/pflag" ) diff --git a/cmd/genericconf/jwt.go b/cmd/genericconf/jwt.go index b48c9a449d..41517aee18 100644 --- a/cmd/genericconf/jwt.go +++ b/cmd/genericconf/jwt.go @@ -21,7 +21,8 @@ func TryCreatingJWTSecret(filename string) error { if errors.Is(err, fs.ErrExist) { log.Info("using existing jwt file", "filename", filename) return nil - } else if err != nil { + } + if err != nil { return fmt.Errorf("couldn't create file: %w", err) } defer func() { diff --git a/cmd/ipfshelper/ipfshelper.go b/cmd/ipfshelper/ipfshelper.go index 08c26b5d68..82e726dbf3 100644 --- a/cmd/ipfshelper/ipfshelper.go +++ b/cmd/ipfshelper/ipfshelper.go @@ -9,7 +9,6 @@ import ( "path/filepath" "strings" "sync" - "time" "github.com/ethereum/go-ethereum/log" "github.com/ipfs/go-libipfs/files" @@ -177,7 +176,6 @@ func (h *IpfsHelper) DownloadFile(ctx context.Context, cidString string, destina } fmt.Printf("\033[2K\rPinned %d / %d subtrees (%.2f%%)", done, all, float32(done)/float32(all)*100) } - rand.Seed(time.Now().UnixNano()) permutation := rand.Perm(len(links)) printProgress(0, len(links)) for i, j := range permutation { diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index fca7abdb91..40d9fce5b6 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -9,7 +9,6 @@ import ( "path/filepath" "syscall" - "github.com/pkg/errors" flag "github.com/spf13/pflag" _ "github.com/ethereum/go-ethereum/eth/tracers/js" @@ -133,7 +132,7 @@ func mainImpl() int { } err = stack.Start() if err != nil { - fatalErrChan <- errors.Wrap(err, "error starting stack") + fatalErrChan <- fmt.Errorf("error starting stack: %w", err) } defer stack.Close() diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 31ebb0aa1e..c5faec4dd8 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/offchainlabs/nitro/cmd/util" "math/big" "os" "regexp" @@ -17,6 +16,8 @@ import ( "sync" "time" + "github.com/offchainlabs/nitro/cmd/util" + "github.com/cavaliergopher/grab/v3" extract "github.com/codeclysm/extract/v3" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -201,7 +202,7 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo if currentBlock == nil { return errors.New("failed to get current block") } - if err := oldConfig.CheckCompatible(chainConfig, currentBlock.NumberU64(), currentBlock.Time()); err != nil { + if err := oldConfig.CheckCompatible(chainConfig, currentBlock.Number.Uint64(), currentBlock.Time); err != nil { return fmt.Errorf("invalid chain config, not compatible with previous: %w", err) } } @@ -244,12 +245,11 @@ func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { } height := header.Number.Uint64() for len(r.heights) > 0 && r.heights[len(r.heights)-1] > height { - if overwrite { - r.roots = r.roots[:len(r.roots)-1] - r.heights = r.heights[:len(r.heights)-1] - } else { + if !overwrite { return nil } + r.roots = r.roots[:len(r.roots)-1] + r.heights = r.heights[:len(r.heights)-1] } if len(r.heights) > 0 && r.heights[len(r.heights)-1]+minRootDistance > height { return nil @@ -575,7 +575,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if config.Init.ThenQuit { cacheConfig.SnapshotWait = true } - var serializedChainConfig []byte + var parsedInitMessage *arbostypes.ParsedInitMessage if config.Node.L1Reader.Enable { delayedBridge, err := arbnode.NewDelayedBridge(l1Client, rollupAddrs.Bridge, rollupAddrs.DeployedAt) if err != nil { @@ -596,30 +596,34 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if initMessage == nil { return chainDb, nil, fmt.Errorf("failed to get init message while attempting to get serialized chain config") } - var initChainConfig *params.ChainConfig - var initChainId *big.Int - initChainId, initChainConfig, serializedChainConfig, err = initMessage.ParseInitMessage() + parsedInitMessage, err = initMessage.ParseInitMessage() if err != nil { return chainDb, nil, err } - if initChainId.Cmp(chainId) != 0 { - return chainDb, nil, fmt.Errorf("expected L2 chain ID %v but read L2 chain ID %v from init message in L1 inbox", chainId, initChainId) + if parsedInitMessage.ChainId.Cmp(chainId) != 0 { + return chainDb, nil, fmt.Errorf("expected L2 chain ID %v but read L2 chain ID %v from init message in L1 inbox", chainId, parsedInitMessage.ChainId) } - if initChainConfig != nil { - if err := initChainConfig.CheckCompatible(chainConfig, chainConfig.ArbitrumChainParams.GenesisBlockNum, 0); err != nil { + if parsedInitMessage.ChainConfig != nil { + if err := parsedInitMessage.ChainConfig.CheckCompatible(chainConfig, chainConfig.ArbitrumChainParams.GenesisBlockNum, 0); err != nil { return chainDb, nil, fmt.Errorf("incompatible chain config read from init message in L1 inbox: %w", err) } } - log.Info("Read serialized chain config from init message", "json", string(serializedChainConfig)) + log.Info("Read serialized chain config from init message", "json", string(parsedInitMessage.SerializedChainConfig)) } else { - serializedChainConfig, err = json.Marshal(chainConfig) + serializedChainConfig, err := json.Marshal(chainConfig) if err != nil { return chainDb, nil, err } - log.Warn("Serialized chain config as L1Reader is disabled and serialized chain config from init message is not available", "json", string(serializedChainConfig)) + parsedInitMessage = &arbostypes.ParsedInitMessage{ + ChainId: chainConfig.ChainID, + InitialL1BaseFee: arbostypes.DefaultInitialL1BaseFee, + ChainConfig: chainConfig, + SerializedChainConfig: serializedChainConfig, + } + log.Warn("Created fake init message as L1Reader is disabled and serialized chain config from init message is not available", "json", string(serializedChainConfig)) } - l2BlockChain, err = execution.WriteOrTestBlockChain(chainDb, cacheConfig, initDataReader, chainConfig, serializedChainConfig, config.Node.TxLookupLimit, config.Init.AccountsPerSync) + l2BlockChain, err = execution.WriteOrTestBlockChain(chainDb, cacheConfig, initDataReader, chainConfig, parsedInitMessage, config.Node.TxLookupLimit, config.Init.AccountsPerSync) if err != nil { return chainDb, nil, err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 25c791729a..0035171078 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -6,6 +6,7 @@ package main import ( "context" "crypto/ecdsa" + "errors" "fmt" "io" "math/big" @@ -17,9 +18,7 @@ import ( "time" "github.com/knadh/koanf" - "github.com/knadh/koanf/providers/confmap" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/syndtr/goleveldb/leveldb" @@ -652,9 +651,8 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa // If persistent-chain not defined, user not creating custom chain if l2ChainId != 0 { return nil, nil, nil, fmt.Errorf("Unknown chain id: %d, L2ChainInfoFiles: %v. update chain id, modify --chain.info-files or provide --persistent.chain\n", l2ChainId, l2ChainInfoFiles) - } else { - return nil, nil, nil, fmt.Errorf("Unknown chain name: %s, L2ChainInfoFiles: %v. update chain name, modify --chain.info-files or provide --persistent.chain\n", l2ChainName, l2ChainInfoFiles) } + return nil, nil, nil, fmt.Errorf("Unknown chain name: %s, L2ChainInfoFiles: %v. update chain name, modify --chain.info-files or provide --persistent.chain\n", l2ChainName, l2ChainInfoFiles) } return nil, nil, nil, errors.New("--persistent.chain not specified") } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 69e5d37ec5..501562d265 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -30,18 +30,17 @@ import ( "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/wavmio" - "github.com/pkg/errors" ) func getBlockHeaderByHash(hash common.Hash) *types.Header { enc, err := wavmio.ResolvePreImage(hash) if err != nil { - panic(errors.Wrap(err, "Error resolving preimage")) + panic(fmt.Errorf("Error resolving preimage: %w", err)) } header := &types.Header{} err = rlp.DecodeBytes(enc, &header) if err != nil { - panic(errors.Wrap(err, "Error parsing resolved block header")) + panic(fmt.Errorf("Error parsing resolved block header: %w", err)) } return header } @@ -245,19 +244,20 @@ func main() { message := readMessage(false) - chainId, chainConfig, serializedChainConfig, err := message.Message.ParseInitMessage() + initMessage, err := message.Message.ParseInitMessage() if err != nil { panic(err) } + chainConfig := initMessage.ChainConfig if chainConfig == nil { log.Info("No chain config in the init message. Falling back to hardcoded chain config.") - chainConfig, err = chaininfo.GetChainConfig(chainId, "", 0, []string{}, "") + chainConfig, err = chaininfo.GetChainConfig(initMessage.ChainId, "", 0, []string{}, "") if err != nil { panic(err) } } - _, err = arbosState.InitializeArbosState(statedb, burn.NewSystemBurner(nil, false), chainConfig, serializedChainConfig) + _, err = arbosState.InitializeArbosState(statedb, burn.NewSystemBurner(nil, false), chainConfig, initMessage) if err != nil { panic(fmt.Sprintf("Error initializing ArbOS: %v", err.Error())) } diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index b86016505a..4f9a8b2ea1 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -4,6 +4,7 @@ package confighelpers import ( + "errors" "fmt" "os" "strings" @@ -18,7 +19,6 @@ import ( "github.com/knadh/koanf/providers/rawbytes" "github.com/knadh/koanf/providers/s3" "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -39,7 +39,7 @@ func ApplyOverrides(f *flag.FlagSet, k *koanf.Koanf) error { // Load configuration file from S3 if setup if len(k.String("conf.s3.secret-key")) != 0 { if err := loadS3Variables(k); err != nil { - return errors.Wrap(err, "error loading S3 settings") + return fmt.Errorf("error loading S3 settings: %w", err) } if err := applyOverrideOverrides(f, k); err != nil { @@ -52,7 +52,7 @@ func ApplyOverrides(f *flag.FlagSet, k *koanf.Koanf) error { for _, configFile := range configFiles { if len(configFile) > 0 { if err := k.Load(file.Provider(configFile), json.Parser()); err != nil { - return errors.Wrap(err, "error loading local config file") + return fmt.Errorf("error loading local config file: %w", err) } if err := applyOverrideOverrides(f, k); err != nil { @@ -68,25 +68,25 @@ func ApplyOverrides(f *flag.FlagSet, k *koanf.Koanf) error { func applyOverrideOverrides(f *flag.FlagSet, k *koanf.Koanf) error { // Command line overrides config file or config string if err := k.Load(posflag.Provider(f, ".", k), nil); err != nil { - return errors.Wrap(err, "error loading command line config") + return fmt.Errorf("error loading command line config: %w", err) } // Config string overrides any config file configString := k.String("conf.string") if len(configString) > 0 { if err := k.Load(rawbytes.Provider([]byte(configString)), json.Parser()); err != nil { - return errors.Wrap(err, "error loading config string config") + return fmt.Errorf("error loading config string config: %w", err) } // Command line overrides config file or config string if err := k.Load(posflag.Provider(f, ".", k), nil); err != nil { - return errors.Wrap(err, "error loading command line config") + return fmt.Errorf("error loading command line config: %w", err) } } // Environment variables overrides config files or command line options if err := loadEnvironmentVariables(k); err != nil { - return errors.Wrap(err, "error loading environment variables") + return fmt.Errorf("error loading environment variables: %w", err) } return nil @@ -192,15 +192,15 @@ func DumpConfig(k *koanf.Koanf, extraOverrideFields map[string]interface{}) erro err := k.Load(confmap.Provider(overrideFields, "."), nil) if err != nil { - return errors.Wrap(err, "error removing extra parameters before dump") + return fmt.Errorf("error removing extra parameters before dump: %w", err) } c, err := k.Marshal(koanfjson.Parser()) if err != nil { - return errors.Wrap(err, "unable to marshal config file to JSON") + return fmt.Errorf("unable to marshal config file to JSON: %w", err) } fmt.Println(string(c)) os.Exit(0) - return errors.New("Unreachable") + return fmt.Errorf("Unreachable") } diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 1b0515fee1..fb89b1cf30 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -138,9 +138,8 @@ func (dbs *DBStorageService) Close(ctx context.Context) error { func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { if dbs.discardAfterTimeout { return arbstate.DiscardAfterDataTimeout, nil - } else { - return arbstate.KeepForever, nil } + return arbstate.KeepForever, nil } func (dbs *DBStorageService) String() string { diff --git a/das/ipfs_storage_service.go b/das/ipfs_storage_service.go index 4d92e85c73..4f73242c22 100644 --- a/das/ipfs_storage_service.go +++ b/das/ipfs_storage_service.go @@ -79,11 +79,6 @@ func NewIpfsStorageService(ctx context.Context, config IpfsStorageServiceConfig) } log.Info("IPFS node started up", "hostAddresses", addrs) - if config.PinAfterGet { - if config.PinPercentage != 100.0 { - rand.Seed(time.Now().UnixNano()) - } - } return &IpfsStorageService{ config: config, ipfsHelper: ipfsHelper, diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index ede9a8dcc0..a005c70a44 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -6,6 +6,7 @@ package das import ( "context" "crypto/hmac" + "errors" "fmt" "time" @@ -16,7 +17,6 @@ import ( "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/redisutil" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 0b56cdbddf..18a9ce1475 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -148,9 +148,8 @@ func (s3s *S3StorageService) Close(ctx context.Context) error { func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { if s3s.discardAfterTimeout { return arbstate.DiscardAfterDataTimeout, nil - } else { - return arbstate.KeepForever, nil } + return arbstate.KeepForever, nil } func (s3s *S3StorageService) String() string { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 6d62ea42be..1a244ab640 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -7,11 +7,11 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "os" "time" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common/hexutil" @@ -49,7 +49,7 @@ func (c *KeyConfig) BLSPrivKey() (blsSignatures.PrivateKey, error) { } privKey, err := DecodeBase64BLSPrivateKey(privKeyBytes) if err != nil { - return nil, errors.Wrap(err, "'priv-key' was invalid") + return nil, fmt.Errorf("'priv-key' was invalid: %w", err) } return privKey, nil } @@ -162,9 +162,8 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( extraBpVerifier = func(message []byte, timeout uint64, sig []byte) bool { if len(sig) >= 64 { return crypto.VerifySignature(pubkey, dasStoreHash(message, timeout), sig[:64]) - } else { - return false } + return false } } diff --git a/gethhook/geth-hook.go b/gethhook/geth-hook.go index 3e19fcaba6..dcd1788710 100644 --- a/gethhook/geth-hook.go +++ b/gethhook/geth-hook.go @@ -44,7 +44,7 @@ func (p ArbosPrecompileWrapper) RunAdvanced( } func init() { - core.ReadyEVMForL2 = func(evm *vm.EVM, msg core.Message) { + core.ReadyEVMForL2 = func(evm *vm.EVM, msg *core.Message) { if evm.ChainConfig().IsArbitrum() { evm.ProcessingHook = arbos.NewTxProcessor(evm, msg) } diff --git a/go-ethereum b/go-ethereum index a23dc84cfd..f214ae0426 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a23dc84cfd8a00f1a971edf917563a72fdf4304f +Subproject commit f214ae0426fa0affbc29871a5277f4dc75afb0bb diff --git a/go.mod b/go.mod index 42217858fb..fc52f1f763 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/offchainlabs/nitro -go 1.19 +go 1.20 replace github.com/VictoriaMetrics/fastcache => ./fastcache @@ -28,13 +28,14 @@ require ( github.com/libp2p/go-libp2p v0.26.4 github.com/multiformats/go-multiaddr v0.8.0 github.com/multiformats/go-multihash v0.2.1 - github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/term v0.5.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) +require github.com/gofrs/flock v0.8.1 // indirect + require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect @@ -210,6 +211,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -300,7 +302,6 @@ require ( github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/prometheus/tsdb v0.7.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect diff --git a/go.sum b/go.sum index 2432bec383..f351bb9545 100644 --- a/go.sum +++ b/go.sum @@ -284,7 +284,6 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= @@ -368,13 +367,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -418,6 +415,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= @@ -923,6 +922,7 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= @@ -1321,7 +1321,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= @@ -1411,7 +1410,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -1428,8 +1426,6 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 265113e0a5..41475b167d 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -39,9 +39,9 @@ type NodeInterface struct { backend core.NodeInterfaceBackendAPI context context.Context header *types.Header - sourceMessage types.Message + sourceMessage *core.Message returnMessage struct { - message *types.Message + message *core.Message changed *bool } } @@ -143,8 +143,8 @@ func (n NodeInterface) EstimateRetryableTicket( From: util.RemapL1Address(sender), L1BaseFee: l1BaseFee, DepositValue: deposit, - GasFeeCap: n.sourceMessage.GasPrice(), - Gas: n.sourceMessage.Gas(), + GasFeeCap: n.sourceMessage.GasPrice, + Gas: n.sourceMessage.GasLimit, RetryTo: pRetryTo, RetryValue: l2CallValue, Beneficiary: callValueRefundAddress, @@ -154,13 +154,13 @@ func (n NodeInterface) EstimateRetryableTicket( } // ArbitrumSubmitRetryableTx is unsigned so the following won't panic - msg, err := types.NewTx(submitTx).AsMessage(types.NewArbitrumSigner(nil), nil) + msg, err := core.TransactionToMessage(types.NewTx(submitTx), types.NewArbitrumSigner(nil), nil) if err != nil { return err } - msg.TxRunMode = types.MessageGasEstimationMode - *n.returnMessage.message = msg + msg.TxRunMode = core.MessageGasEstimationMode + *n.returnMessage.message = *msg *n.returnMessage.changed = true return nil } @@ -170,7 +170,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) hash0 := bytes32{} currentBlock := n.backend.CurrentBlock() - currentBlockInfo := types.DeserializeHeaderExtraInformation(currentBlock.Header()) + currentBlockInfo := types.DeserializeHeaderExtraInformation(currentBlock) if leaf > currentBlockInfo.SendCount { return hash0, hash0, nil, errors.New("leaf does not exist") } @@ -297,7 +297,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) } } - search(0, currentBlock.NumberU64(), query) + search(0, currentBlock.Number.Uint64(), query) if searchErr != nil { return hash0, hash0, nil, searchErr @@ -425,11 +425,11 @@ func (n NodeInterface) messageArgs( evm mech, value huge, to addr, contractCreation bool, data []byte, ) arbitrum.TransactionArgs { msg := n.sourceMessage - from := msg.From() - gas := msg.Gas() - nonce := msg.Nonce() - maxFeePerGas := msg.GasFeeCap() - maxPriorityFeePerGas := msg.GasTipCap() + from := msg.From + gas := msg.GasLimit + nonce := msg.Nonce + maxFeePerGas := msg.GasFeeCap + maxPriorityFeePerGas := msg.GasTipCap chainid := evm.ChainConfig().ChainID args := arbitrum.TransactionArgs{ @@ -458,7 +458,7 @@ func (n NodeInterface) GasEstimateL1Component( args.Gas = (*hexutil.Uint64)(&randomGas) // We set the run mode to eth_call mode here because we want an exact estimate, not a padded estimate - msg, err := args.ToMessage(randomGas, n.header, evm.StateDB.(*state.StateDB), types.MessageEthcallMode) + msg, err := args.ToMessage(randomGas, n.header, evm.StateDB.(*state.StateDB), core.MessageEthcallMode) if err != nil { return 0, nil, nil, err } @@ -510,7 +510,7 @@ func (n NodeInterface) GasEstimateComponents( // Setting the gas currently doesn't affect the PosterDataCost, // but we do it anyways for accuracy with potential future changes. args.Gas = &totalRaw - msg, err := args.ToMessage(gasCap, n.header, evm.StateDB.(*state.StateDB), types.MessageGasEstimationMode) + msg, err := args.ToMessage(gasCap, n.header, evm.StateDB.(*state.StateDB), core.MessageGasEstimationMode) if err != nil { return 0, 0, nil, nil, err } @@ -526,7 +526,7 @@ func (n NodeInterface) GasEstimateComponents( } // Compute the fee paid for L1 in L2 terms - gasForL1 := arbos.GetPosterGas(c.State, baseFee, types.MessageGasEstimationMode, feeForL1) + gasForL1 := arbos.GetPosterGas(c.State, baseFee, core.MessageGasEstimationMode, feeForL1) return total, gasForL1, baseFee, l1BaseFeeEstimate, nil } diff --git a/nodeInterface/NodeInterfaceDebug.go b/nodeInterface/NodeInterfaceDebug.go index b64f10420c..ae9c157ce4 100644 --- a/nodeInterface/NodeInterfaceDebug.go +++ b/nodeInterface/NodeInterfaceDebug.go @@ -18,9 +18,9 @@ type NodeInterfaceDebug struct { backend core.NodeInterfaceBackendAPI context context.Context header *types.Header - sourceMessage types.Message + sourceMessage *core.Message returnMessage struct { - message *types.Message + message *core.Message changed *bool } } diff --git a/nodeInterface/virtual-contracts.go b/nodeInterface/virtual-contracts.go index dab567a088..29ca3f2b82 100644 --- a/nodeInterface/virtual-contracts.go +++ b/nodeInterface/virtual-contracts.go @@ -33,7 +33,6 @@ type hash = common.Hash type bytes32 = [32]byte type ctx = *precompiles.Context -type Message = types.Message type BackendAPI = core.NodeInterfaceBackendAPI type ExecutionResult = core.ExecutionResult @@ -49,18 +48,18 @@ func init() { _, nodeInterfaceDebug := precompiles.MakePrecompile(nodeInterfaceDebugMeta, nodeInterfaceDebugImpl) core.InterceptRPCMessage = func( - msg Message, + msg *core.Message, ctx context.Context, statedb *state.StateDB, header *types.Header, backend core.NodeInterfaceBackendAPI, - ) (Message, *ExecutionResult, error) { - to := msg.To() + ) (*core.Message, *ExecutionResult, error) { + to := msg.To arbosVersion := arbosState.ArbOSVersion(statedb) // check ArbOS has been installed if to != nil && arbosVersion != 0 { var precompile precompiles.ArbosPrecompile var swapMessages bool - returnMessage := &Message{} + returnMessage := &core.Message{} var address addr switch *to { @@ -99,16 +98,16 @@ func init() { core.ReadyEVMForL2(evm, msg) output, gasLeft, err := precompile.Call( - msg.Data(), address, address, msg.From(), msg.Value(), false, msg.Gas(), evm, + msg.Data, address, address, msg.From, msg.Value, false, msg.GasLimit, evm, ) if err != nil { return msg, nil, err } if swapMessages { - return *returnMessage, nil, nil + return returnMessage, nil, nil } res := &ExecutionResult{ - UsedGas: msg.Gas() - gasLeft, + UsedGas: msg.GasLimit - gasLeft, Err: nil, ReturnData: output, ScheduledTxes: nil, @@ -118,7 +117,7 @@ func init() { return msg, nil, nil } - core.InterceptRPCGasCap = func(gascap *uint64, msg Message, header *types.Header, statedb *state.StateDB) { + core.InterceptRPCGasCap = func(gascap *uint64, msg *core.Message, header *types.Header, statedb *state.StateDB) { if *gascap == 0 { // It's already unlimited return @@ -139,7 +138,7 @@ func init() { } posterCost, _ := state.L1PricingState().PosterDataCost(msg, l1pricing.BatchPosterAddress) - posterCostInL2Gas := arbos.GetPosterGas(state, header.BaseFee, msg.RunMode(), posterCost) + posterCostInL2Gas := arbos.GetPosterGas(state, header.BaseFee, msg.TxRunMode, posterCost) *gascap = arbmath.SaturatingUAdd(*gascap, posterCostInL2Gas) } diff --git a/precompiles/ArbAddressTable_test.go b/precompiles/ArbAddressTable_test.go index 5a0831e276..b01a460636 100644 --- a/precompiles/ArbAddressTable_test.go +++ b/precompiles/ArbAddressTable_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -154,9 +154,9 @@ func newMockEVMForTesting() *vm.EVM { return newMockEVMForTestingWithVersion(nil) } -func newMockEVMForTestingWithVersionAndRunMode(version *uint64, runMode types.MessageRunMode) *vm.EVM { +func newMockEVMForTestingWithVersionAndRunMode(version *uint64, runMode core.MessageRunMode) *vm.EVM { evm := newMockEVMForTestingWithVersion(version) - evm.ProcessingHook = arbos.NewTxProcessor(evm, types.Message{TxRunMode: runMode}) + evm.ProcessingHook = arbos.NewTxProcessor(evm, &core.Message{TxRunMode: runMode}) return evm } diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index 3c20786e2b..b5527e0017 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -12,6 +12,7 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -155,7 +156,7 @@ func TestArbOwner(t *testing.T) { } func TestArbOwnerSetChainConfig(t *testing.T) { - evm := newMockEVMForTestingWithVersionAndRunMode(nil, types.MessageGasEstimationMode) + evm := newMockEVMForTestingWithVersionAndRunMode(nil, core.MessageGasEstimationMode) caller := common.BytesToAddress(crypto.Keccak256([]byte{})[:20]) tracer := util.NewTracingInfo(evm, testhelpers.RandomAddress(), types.ArbosAddress, util.TracingDuringEVM) state, err := arbosState.OpenArbosState(evm.StateDB, burn.NewSystemBurner(tracer, false)) diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 96c32bdb46..3cb7510f0b 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -41,9 +41,8 @@ var ErrSelfModifyingRetryable = errors.New("retryable cannot modify itself") func (con ArbRetryableTx) oldNotFoundError(c ctx) error { if c.State.ArbOSVersion() >= 3 { return con.NoTicketWithIDError() - } else { - return errors.New("ticketId not found") } + return errors.New("ticketId not found") } // Redeem schedules an attempt to redeem the retryable, donating all of the call's gas to the redeem attempt @@ -226,9 +225,8 @@ func (con ArbRetryableTx) Cancel(c ctx, evm mech, ticketId bytes32) error { func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, error) { if c.txProcessor.CurrentRefundTo != nil { return *c.txProcessor.CurrentRefundTo, nil - } else { - return common.Address{}, nil } + return common.Address{}, nil } func (con ArbRetryableTx) SubmitRetryable( diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 13006f4447..dc92baf448 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -39,9 +39,8 @@ func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes if !arbBlockNumber.IsUint64() { if c.State.ArbOSVersion() >= 11 { return bytes32{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) - } else { - return bytes32{}, errors.New("invalid block number") } + return bytes32{}, errors.New("invalid block number") } requestedBlockNum := arbBlockNumber.Uint64() @@ -49,9 +48,8 @@ func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes if requestedBlockNum >= currentNumber || requestedBlockNum+256 < currentNumber { if c.State.ArbOSVersion() >= 11 { return common.Hash{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) - } else { - return common.Hash{}, errors.New("invalid block number for ArbBlockHAsh") } + return common.Hash{}, errors.New("invalid block number for ArbBlockHAsh") } return evm.Context.GetHash(requestedBlockNum), nil diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 022da5732e..55dd1e3e4e 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -742,10 +742,9 @@ func (p *Precompile) Call( // nolint:errorlint if arbosVersion >= 11 || errRet == vm.ErrExecutionReverted { return nil, callerCtx.gasLeft, vm.ErrExecutionReverted - } else { - // Preserve behavior with old versions which would zero out gas on this type of error - return nil, 0, errRet } + // Preserve behavior with old versions which would zero out gas on this type of error + return nil, 0, errRet } result := make([]interface{}, resultCount) for i := 0; i < resultCount; i++ { diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index 3214011be9..b9363c40a2 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -41,10 +41,9 @@ func (wrapper *DebugPrecompile) Call( if debugMode { con := wrapper.precompile return con.Call(input, precompileAddress, actingAsAddress, caller, value, readOnly, gasSupplied, evm) - } else { - // take all gas - return nil, 0, errors.New("debug precompiles are disabled") } + // Take all gas. + return nil, 0, errors.New("debug precompiles are disabled") } func (wrapper *DebugPrecompile) Precompile() *Precompile { diff --git a/staker/assertion.go b/staker/assertion.go index 2f683e07c9..19ee65e251 100644 --- a/staker/assertion.go +++ b/staker/assertion.go @@ -64,7 +64,7 @@ type Assertion struct { type NodeInfo struct { NodeNum uint64 - BlockProposed uint64 + L1BlockProposed uint64 Assertion *Assertion InboxMaxCount *big.Int AfterInboxBatchAcc common.Hash diff --git a/staker/block_validator.go b/staker/block_validator.go index d537f6e630..56bb2729c7 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -5,12 +5,12 @@ package staker import ( "context" + "errors" "fmt" "sync" "sync/atomic" "time" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -365,7 +365,7 @@ func (v *BlockValidator) sendRecord(s *validationStatus, mustDeref bool) error { if mustDeref { v.recordingDatabase.Dereference(prevHeader) } - return errors.Errorf("failed status check for send record. Status: %v", s.getStatus()) + return fmt.Errorf("failed status check for send record. Status: %v", s.getStatus()) } v.LaunchThread(func(ctx context.Context) { if mustDeref { @@ -603,15 +603,19 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { defer cancel() validationStatus.Cancel = cancel err := v.ValidationEntryAddSeqMessage(ctx, validationStatus.Entry, startPos, endPos, seqMsg) - if err != nil && validationCtx.Err() == nil { + if err != nil { validationStatus.replaceStatus(Prepared, RecordFailed) - log.Error("error preparing validation", "err", err) + if validationCtx.Err() == nil { + log.Error("error preparing validation", "err", err) + } return } input, err := validationStatus.Entry.ToInput() - if err != nil && validationCtx.Err() == nil { + if err != nil { validationStatus.replaceStatus(Prepared, RecordFailed) - log.Error("error preparing validation", "err", err) + if validationCtx.Err() == nil { + log.Error("error preparing validation", "err", err) + } return } for _, moduleRoot := range wasmRoots { @@ -821,7 +825,7 @@ func (v *BlockValidator) progressValidated() { func (v *BlockValidator) AssumeValid(globalState validator.GoGlobalState) error { if v.Started() { - return errors.Errorf("cannot handle AssumeValid while running") + return fmt.Errorf("cannot handle AssumeValid while running") } v.reorgMutex.Lock() diff --git a/staker/builder_backend.go b/staker/builder_backend.go index 8faebc58d0..1bf15ff027 100644 --- a/staker/builder_backend.go +++ b/staker/builder_backend.go @@ -9,10 +9,10 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbutil" - "github.com/pkg/errors" ) // ValidatorTxBuilder combines any transactions sent to it via SendTransaction into one batch, @@ -72,23 +72,32 @@ func (b *ValidatorTxBuilder) SendTransaction(ctx context.Context, tx *types.Tran if err != nil { // Remove the bad tx b.transactions = b.transactions[:len(b.transactions)-1] - return errors.WithStack(err) + return err } return nil } -func (b *ValidatorTxBuilder) AuthWithAmount(ctx context.Context, amount *big.Int) *bind.TransactOpts { +// While this is not currently required, it's recommended not to reuse the returned auth for multiple transactions, +// as for an EOA this has the nonce in it. However, the EOA wwallet currently will only publish the first created tx, +// which is why that doesn't really matter. +func (b *ValidatorTxBuilder) AuthWithAmount(ctx context.Context, amount *big.Int) (*bind.TransactOpts, error) { + nonce, err := b.NonceAt(ctx, b.builderAuth.From, nil) + if err != nil { + return nil, err + } return &bind.TransactOpts{ From: b.builderAuth.From, - Nonce: b.builderAuth.Nonce, + Nonce: new(big.Int).SetUint64(nonce), Signer: b.builderAuth.Signer, Value: amount, GasPrice: b.builderAuth.GasPrice, GasLimit: b.builderAuth.GasLimit, Context: ctx, - } + }, nil } -func (b *ValidatorTxBuilder) Auth(ctx context.Context) *bind.TransactOpts { - return b.AuthWithAmount(ctx, big.NewInt(0)) +// Auth is the same as AuthWithAmount with a 0 amount specified. +// See AuthWithAmount docs for important details. +func (b *ValidatorTxBuilder) Auth(ctx context.Context) (*bind.TransactOpts, error) { + return b.AuthWithAmount(ctx, common.Big0) } diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 66dfbf5abd..1d182bd3c1 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -5,6 +5,7 @@ package staker import ( "context" + "errors" "fmt" "math/big" "time" @@ -20,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/pkg/errors" ) type ConfirmType uint8 @@ -169,7 +169,11 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat return false, nil } log.Warn("rejecting node", "node", unresolvedNodeIndex) - _, err = v.rollup.RejectNextNode(v.builder.Auth(ctx), *addr) + auth, err := v.builder.Auth(ctx) + if err != nil { + return false, err + } + _, err = v.rollup.RejectNextNode(auth, *addr) return true, err case CONFIRM_TYPE_VALID: nodeInfo, err := v.rollup.LookupNode(ctx, unresolvedNodeIndex) @@ -178,7 +182,11 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat } afterGs := nodeInfo.AfterState().GlobalState log.Info("confirming node", "node", unresolvedNodeIndex) - _, err = v.rollup.ConfirmNextNode(v.builder.Auth(ctx), afterGs.BlockHash, afterGs.SendRoot) + auth, err := v.builder.Auth(ctx) + if err != nil { + return false, err + } + _, err = v.rollup.ConfirmNextNode(auth, afterGs.BlockHash, afterGs.SendRoot) if err != nil { return false, err } @@ -286,14 +294,13 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta log.Error("invalid start global state inbox position", startState.GlobalState.BlockHash, "batch", startState.GlobalState.Batch, "pos", startState.GlobalState.PosInBatch) return nil, false, errors.New("invalid start global state inbox position") } - latestHeader := v.l2Blockchain.CurrentBlock().Header() + latestHeader := v.l2Blockchain.CurrentBlock() if latestHeader.Number.Int64() < expectedBlockHeight { log.Info("catching up to chain blocks", "localBlocks", latestHeader.Number, "target", expectedBlockHeight) return nil, false, nil - } else { - log.Error("unknown start block hash", "hash", startState.GlobalState.BlockHash, "batch", startState.GlobalState.Batch, "pos", startState.GlobalState.PosInBatch) - return nil, false, errors.New("unknown start block hash") } + log.Error("unknown start block hash", "hash", startState.GlobalState.BlockHash, "batch", startState.GlobalState.Batch, "pos", startState.GlobalState.PosInBatch) + return nil, false, errors.New("unknown start block hash") } var lastBlockValidated uint64 @@ -319,7 +326,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return nil, false, fmt.Errorf("wasmroot doesn't match rollup : %v, valid: %v", v.lastWasmModuleRoot, validRoots) } } else { - lastBlockValidated = v.l2Blockchain.CurrentBlock().Header().Number.Uint64() + lastBlockValidated = v.l2Blockchain.CurrentBlock().Number.Uint64() if localBatchCount > 0 { messageCount, err := v.inboxTracker.GetBatchMessageCount(localBatchCount - 1) @@ -341,7 +348,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return nil, false, fmt.Errorf("error getting latest L1 block number: %w", err) } - parentChainBlockNumber, err := arbutil.CorrespondingL1BlockNumber(ctx, v.client, currentL1BlockNum) + l1BlockNumber, err := arbutil.CorrespondingL1BlockNumber(ctx, v.client, currentL1BlockNum) if err != nil { return nil, false, err } @@ -351,7 +358,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return nil, false, fmt.Errorf("error getting rollup minimum assertion period: %w", err) } - timeSinceProposed := big.NewInt(int64(parentChainBlockNumber) - int64(startStateProposed)) + timeSinceProposed := big.NewInt(int64(l1BlockNumber) - int64(startStateProposed)) if timeSinceProposed.Cmp(minAssertionPeriod) < 0 { // Too soon to assert return nil, false, nil @@ -596,7 +603,7 @@ func (v *L1Validator) createNewNodeAction( } // Returns (execution state, inbox max count, L1 block proposed, error) -func lookupNodeStartState(ctx context.Context, rollup *RollupWatcher, nodeNum uint64, nodeHash [32]byte) (*validator.ExecutionState, *big.Int, uint64, error) { +func lookupNodeStartState(ctx context.Context, rollup *RollupWatcher, nodeNum uint64, nodeHash common.Hash) (*validator.ExecutionState, *big.Int, uint64, error) { if nodeNum == 0 { creationEvent, err := rollup.LookupCreation(ctx) if err != nil { @@ -618,5 +625,5 @@ func lookupNodeStartState(ctx context.Context, rollup *RollupWatcher, nodeNum ui if node.NodeHash != nodeHash { return nil, nil, 0, errors.New("looked up starting node but found wrong hash") } - return node.AfterState(), node.InboxMaxCount, node.BlockProposed, nil + return node.AfterState(), node.InboxMaxCount, node.L1BlockProposed, nil } diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 135bbcbd0b..5a2089eca8 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -6,6 +6,7 @@ package staker import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -14,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/pkg/errors" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -52,7 +52,7 @@ type RollupWatcher struct { func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { con, err := rollupgen.NewRollupUserLogic(address, client) if err != nil { - return nil, errors.WithStack(err) + return nil, err } return &RollupWatcher{ @@ -76,7 +76,7 @@ func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64 log.Trace("failed to call getNodeCreationBlockForLogLookup, falling back on node CreatedAtBlock field", "err", err) node, err := r.GetNode(callOpts, nodeNum) if err != nil { - return nil, errors.WithStack(err) + return nil, err } createdAtBlock = new(big.Int).SetUint64(node.CreatedAtBlock) } @@ -98,7 +98,7 @@ func (r *RollupWatcher) LookupCreation(ctx context.Context) (*rollupgen.RollupUs } logs, err := r.client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if len(logs) == 0 { return nil, errors.New("rollup not created") @@ -107,7 +107,7 @@ func (r *RollupWatcher) LookupCreation(ctx context.Context) (*rollupgen.RollupUs return nil, errors.New("rollup created multiple times") } ev, err := r.ParseRollupInitialized(logs[0]) - return ev, errors.WithStack(err) + return ev, err } func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInfo, error) { @@ -125,22 +125,26 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf } logs, err := r.client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if len(logs) == 0 { - return nil, errors.New("Couldn't find requested node") + return nil, fmt.Errorf("couldn't find requested node %v", number) } if len(logs) > 1 { - return nil, errors.New("Found multiple instances of requested node") + return nil, fmt.Errorf("found multiple instances of requested node %v", number) } ethLog := logs[0] parsedLog, err := r.ParseNodeCreated(ethLog) if err != nil { - return nil, errors.WithStack(err) + return nil, err + } + l1BlockProposed, err := arbutil.CorrespondingL1BlockNumber(ctx, r.client, ethLog.BlockNumber) + if err != nil { + return nil, err } return &NodeInfo{ NodeNum: parsedLog.NodeNum, - BlockProposed: ethLog.BlockNumber, + L1BlockProposed: l1BlockProposed, Assertion: NewAssertionFromSolidity(parsedLog.Assertion), InboxMaxCount: parsedLog.InboxMaxCount, AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, @@ -152,7 +156,7 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, nodeHash common.Hash) ([]*NodeInfo, error) { node, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), nodeNum) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if node.LatestChildNumber == 0 { return nil, nil @@ -162,7 +166,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, } latestChild, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), node.LatestChildNumber) if err != nil { - return nil, errors.WithStack(err) + return nil, err } var query = ethereum.FilterQuery{ FromBlock: new(big.Int).SetUint64(node.CreatedAtBlock), @@ -172,23 +176,27 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, } logs, err := r.client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } infos := make([]*NodeInfo, 0, len(logs)) lastHash := nodeHash for i, ethLog := range logs { parsedLog, err := r.ParseNodeCreated(ethLog) if err != nil { - return nil, errors.WithStack(err) + return nil, err } lastHashIsSibling := [1]byte{0} if i > 0 { lastHashIsSibling[0] = 1 } lastHash = crypto.Keccak256Hash(lastHashIsSibling[:], lastHash[:], parsedLog.ExecutionHash[:], parsedLog.AfterInboxBatchAcc[:], parsedLog.WasmModuleRoot[:]) + l1BlockProposed, err := arbutil.CorrespondingL1BlockNumber(ctx, r.client, ethLog.BlockNumber) + if err != nil { + return nil, err + } infos = append(infos, &NodeInfo{ NodeNum: parsedLog.NodeNum, - BlockProposed: ethLog.BlockNumber, + L1BlockProposed: l1BlockProposed, Assertion: NewAssertionFromSolidity(parsedLog.Assertion), InboxMaxCount: parsedLog.InboxMaxCount, AfterInboxBatchAcc: parsedLog.AfterInboxBatchAcc, @@ -202,7 +210,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, func (r *RollupWatcher) LatestConfirmedCreationBlock(ctx context.Context) (uint64, error) { latestConfirmed, err := r.LatestConfirmed(r.getCallOpts(ctx)) if err != nil { - return 0, errors.WithStack(err) + return 0, err } creation, err := r.getNodeCreationBlock(ctx, latestConfirmed) if err != nil { @@ -236,7 +244,7 @@ func (r *RollupWatcher) LookupChallengedNode(ctx context.Context, address common } logs, err := r.client.FilterLogs(ctx, query) if err != nil { - return 0, errors.WithStack(err) + return 0, err } if len(logs) == 0 { @@ -249,7 +257,7 @@ func (r *RollupWatcher) LookupChallengedNode(ctx context.Context, address common challenge, err := r.ParseRollupChallengeStarted(logs[0]) if err != nil { - return 0, errors.WithStack(err) + return 0, err } return challenge.ChallengedNode, nil @@ -258,7 +266,7 @@ func (r *RollupWatcher) LookupChallengedNode(ctx context.Context, address common func (r *RollupWatcher) StakerInfo(ctx context.Context, staker common.Address) (*StakerInfo, error) { info, err := r.StakerMap(r.getCallOpts(ctx), staker) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if !info.IsStaked { return nil, nil diff --git a/staker/staker.go b/staker/staker.go index f544ec6ea5..baaf81f9a2 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -5,6 +5,7 @@ package staker import ( "context" + "errors" "fmt" "math/big" "runtime/debug" @@ -16,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -85,17 +85,18 @@ type L1ValidatorConfig struct { } func (c *L1ValidatorConfig) ParseStrategy() (StakerStrategy, error) { - if strings.ToLower(c.Strategy) == "watchtower" { + switch strings.ToLower(c.Strategy) { + case "watchtower": return WatchtowerStrategy, nil - } else if strings.ToLower(c.Strategy) == "defensive" { + case "defensive": return DefensiveStrategy, nil - } else if strings.ToLower(c.Strategy) == "stakelatest" { + case "stakelatest": return StakeLatestStrategy, nil - } else if strings.ToLower(c.Strategy) == "resolvenodes" { + case "resolvenodes": return ResolveNodesStrategy, nil - } else if strings.ToLower(c.Strategy) == "makenodes" { + case "makenodes": return MakeNodesStrategy, nil - } else { + default: return WatchtowerStrategy, fmt.Errorf("unknown staker strategy \"%v\"", c.Strategy) } } @@ -209,8 +210,8 @@ func NewStaker( statelessBlockValidator *StatelessBlockValidator, validatorUtilsAddress common.Address, ) (*Staker, error) { - err := config.Validate() - if err != nil { + + if err := config.Validate(); err != nil { return nil, err } client := l1Reader.Client() @@ -290,9 +291,10 @@ func (s *Staker) Start(ctxIn context.Context) { arbTx, err := s.Act(ctx) if err == nil && arbTx != nil { _, err = s.l1Reader.WaitForTxApproval(ctx, arbTx) - err = errors.Wrap(err, "error waiting for tx receipt") if err == nil { log.Info("successfully executed staker transaction", "hash", arbTx.Hash()) + } else { + err = fmt.Errorf("error waiting for tx receipt: %w", err) } } if err == nil { @@ -377,9 +379,8 @@ func (s *Staker) shouldAct(ctx context.Context) bool { "highGasBuffer", s.highGasBlocksBuffer, ) return false - } else { - return true } + return true } func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { @@ -513,11 +514,19 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { stakeIsUnwanted := effectiveStrategy < StakeLatestStrategy if stakeIsTooOutdated || stakeIsUnwanted { // Note: we must have an address if rawInfo != nil - _, err = s.rollup.ReturnOldDeposit(s.builder.Auth(ctx), walletAddressOrZero) + auth, err := s.builder.Auth(ctx) + if err != nil { + return nil, err + } + _, err = s.rollup.ReturnOldDeposit(auth, walletAddressOrZero) if err != nil { return nil, fmt.Errorf("error returning old deposit (from our staker %v): %w", walletAddressOrZero, err) } - _, err = s.rollup.WithdrawStakerFunds(s.builder.Auth(ctx)) + auth, err = s.builder.Auth(ctx) + if err != nil { + return nil, err + } + _, err = s.rollup.WithdrawStakerFunds(auth) if err != nil { return nil, fmt.Errorf("error withdrawing staker funds from our staker %v: %w", walletAddressOrZero, err) } @@ -532,7 +541,11 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { return nil, fmt.Errorf("error checking withdrawable funds of our staker %v: %w", walletAddressOrZero, err) } if withdrawable.Sign() > 0 { - _, err = s.rollup.WithdrawStakerFunds(s.builder.Auth(ctx)) + auth, err := s.builder.Auth(ctx) + if err != nil { + return nil, err + } + _, err = s.rollup.WithdrawStakerFunds(auth) if err != nil { return nil, fmt.Errorf("error withdrawing our staker %v funds: %w", walletAddressOrZero, err) } @@ -650,7 +663,11 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv // We'll return early if we already have a stake if info.StakeExists { - _, err = s.rollup.StakeOnNewNode(s.builder.Auth(ctx), action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount) + auth, err := s.builder.Auth(ctx) + if err != nil { + return err + } + _, err = s.rollup.StakeOnNewNode(auth, action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount) if err != nil { return fmt.Errorf("error staking on new node: %w", err) } @@ -662,8 +679,12 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv if err != nil { return fmt.Errorf("error getting current required stake: %w", err) } + auth, err := s.builder.AuthWithAmount(ctx, stakeAmount) + if err != nil { + return err + } _, err = s.rollup.NewStakeOnNewNode( - s.builder.AuthWithAmount(ctx, stakeAmount), + auth, action.assertion.AsSolidityStruct(), action.hash, action.prevInboxMaxCount, @@ -692,7 +713,11 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv log.Info("staking on existing node", "node", action.number) // We'll return early if we already havea stake if info.StakeExists { - _, err = s.rollup.StakeOnExistingNode(s.builder.Auth(ctx), action.number, action.hash) + auth, err := s.builder.Auth(ctx) + if err != nil { + return err + } + _, err = s.rollup.StakeOnExistingNode(auth, action.number, action.hash) if err != nil { return fmt.Errorf("error staking on existing node: %w", err) } @@ -704,8 +729,12 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv if err != nil { return fmt.Errorf("error getting current required stake: %w", err) } + auth, err := s.builder.AuthWithAmount(ctx, stakeAmount) + if err != nil { + return err + } _, err = s.rollup.NewStakeOnExistingNode( - s.builder.AuthWithAmount(ctx, stakeAmount), + auth, action.number, action.hash, ) @@ -781,15 +810,19 @@ func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error { return fmt.Errorf("error looking up node %v: %w", conflictInfo.Node2, err) } log.Warn("creating challenge", "node1", conflictInfo.Node1, "node2", conflictInfo.Node2, "otherStaker", staker) + auth, err := s.builder.Auth(ctx) + if err != nil { + return err + } _, err = s.rollup.CreateChallenge( - s.builder.Auth(ctx), + auth, [2]common.Address{staker1, staker2}, [2]uint64{conflictInfo.Node1, conflictInfo.Node2}, node1Info.MachineStatuses(), node1Info.GlobalStates(), node1Info.Assertion.NumBlocks, node2Info.Assertion.ExecutionHash(), - [2]*big.Int{new(big.Int).SetUint64(node1Info.BlockProposed), new(big.Int).SetUint64(node2Info.BlockProposed)}, + [2]*big.Int{new(big.Int).SetUint64(node1Info.L1BlockProposed), new(big.Int).SetUint64(node2Info.L1BlockProposed)}, [2][32]byte{node1Info.WasmModuleRoot, node2Info.WasmModuleRoot}, ) if err != nil { @@ -804,6 +837,10 @@ func (s *Staker) Strategy() StakerStrategy { return s.config.strategy } +func (s *Staker) Rollup() *RollupWatcher { + return s.rollup +} + func (s *Staker) updateStakerBalanceMetric(ctx context.Context) { txSenderAddress := s.wallet.TxSenderAddress() if txSenderAddress == nil { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index af80e29680..c56eb3e9a7 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -5,6 +5,7 @@ package staker import ( "context" + "errors" "fmt" "sync" @@ -25,7 +26,6 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" - "github.com/pkg/errors" ) type StatelessBlockValidator struct { @@ -418,7 +418,7 @@ func (v *StatelessBlockValidator) RecordBlockCreation( func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e *validationEntry, keepReference bool) error { if e.Stage != ReadyForRecord { - return errors.Errorf("validation entry should be ReadyForRecord, is: %v", e.Stage) + return fmt.Errorf("validation entry should be ReadyForRecord, is: %v", e.Stage) } if e.PrevBlockHeader == nil { e.Stage = Recorded diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index e30ab3e7f8..c36efa7b61 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -5,6 +5,7 @@ package staker import ( "context" + "errors" "math/big" "strings" @@ -16,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/pkg/errors" ) var validatorABI abi.ABI @@ -152,12 +152,25 @@ func (v *ContractValidatorWallet) From() common.Address { return v.auth.From } -func (v *ContractValidatorWallet) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { - oldAuthValue := v.auth.Value - v.auth.Value = tx.Value() - defer (func() { v.auth.Value = oldAuthValue })() +// nil value == 0 value +func (v *ContractValidatorWallet) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) { + newAuth := *v.auth + newAuth.Context = ctx + newAuth.Value = value + nonce, err := v.L1Client().NonceAt(ctx, v.auth.From, nil) + if err != nil { + return nil, err + } + newAuth.Nonce = new(big.Int).SetUint64(nonce) + return &newAuth, nil +} - return v.con.ExecuteTransactionWithGasRefunder(v.auth, gasRefunder, tx.Data(), *tx.To(), tx.Value()) +func (v *ContractValidatorWallet) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { + auth, err := v.getAuth(ctx, tx.Value()) + if err != nil { + return nil, err + } + return v.con.ExecuteTransactionWithGasRefunder(auth, gasRefunder, tx.Data(), *tx.To(), tx.Value()) } func (v *ContractValidatorWallet) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -171,7 +184,11 @@ func (v *ContractValidatorWallet) populateWallet(ctx context.Context, createIfMi return nil } if v.address == nil { - addr, err := GetValidatorWalletContract(ctx, v.walletFactoryAddr, v.rollupFromBlock, v.auth, v.l1Reader, createIfMissing) + auth, err := v.getAuth(ctx, nil) + if err != nil { + return err + } + addr, err := GetValidatorWalletContract(ctx, v.walletFactoryAddr, v.rollupFromBlock, auth, v.l1Reader, createIfMissing) if err != nil { return err } @@ -248,14 +265,15 @@ func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, build return nil, err } - oldAuthValue := v.auth.Value - v.auth.Value = new(big.Int).Sub(totalAmount, balanceInContract) - if v.auth.Value.Sign() < 0 { - v.auth.Value.SetInt64(0) + callValue := new(big.Int).Sub(totalAmount, balanceInContract) + if callValue.Sign() < 0 { + callValue.SetInt64(0) } - defer (func() { v.auth.Value = oldAuthValue })() - - arbTx, err := v.con.ExecuteTransactionsWithGasRefunder(v.auth, gasRefunder, data, dest, amount) + auth, err := v.getAuth(ctx, callValue) + if err != nil { + return nil, err + } + arbTx, err := v.con.ExecuteTransactionsWithGasRefunder(auth, gasRefunder, data, dest, amount) if err != nil { return nil, err } @@ -264,7 +282,11 @@ func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, build } func (v *ContractValidatorWallet) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { - return v.con.TimeoutChallenges(v.auth, v.challengeManagerAddress, challenges) + auth, err := v.getAuth(ctx, nil) + if err != nil { + return nil, err + } + return v.con.TimeoutChallenges(auth, v.challengeManagerAddress, challenges) } func (v *ContractValidatorWallet) L1Client() arbutil.L1Interface { @@ -319,7 +341,7 @@ func GetValidatorWalletContract( // TODO: If we just save a mapping in the wallet creator we won't need log search walletCreator, err := rollupgen.NewValidatorWalletCreator(validatorWalletFactoryAddr, client) if err != nil { - return nil, errors.WithStack(err) + return nil, err } query := ethereum.FilterQuery{ BlockHash: nil, @@ -330,11 +352,12 @@ func GetValidatorWalletContract( } logs, err := client.FilterLogs(ctx, query) if err != nil { - return nil, errors.WithStack(err) + return nil, err } if len(logs) > 1 { return nil, errors.New("more than one validator wallet created for address") - } else if len(logs) == 1 { + } + if len(logs) == 1 { rawLog := logs[0] parsed, err := walletCreator.ParseWalletCreated(rawLog) if err != nil { @@ -360,7 +383,7 @@ func GetValidatorWalletContract( } ev, err := walletCreator.ParseWalletCreated(*receipt.Logs[len(receipt.Logs)-1]) if err != nil { - return nil, errors.WithStack(err) + return nil, err } log.Info("created validator smart contract wallet", "address", ev.WalletAddress) return &ev.WalletAddress, nil diff --git a/system_tests/aliasing_test.go b/system_tests/aliasing_test.go index 716cb6fef8..5e4e65a2ca 100644 --- a/system_tests/aliasing_test.go +++ b/system_tests/aliasing_test.go @@ -44,10 +44,10 @@ func TestAliasing(t *testing.T) { alias, err := arbsys.MyCallersAddressWithoutAliasing(nil) Require(t, err) if !top { - Fail(t, "direct call is not top level") + Fatal(t, "direct call is not top level") } if was || alias != (common.Address{}) { - Fail(t, "direct call has an alias", was, alias) + Fatal(t, "direct call has an alias", was, alias) } testL2Signed := func(top, direct, static, delegate, callcode, call bool) { diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index fcf04c98f0..3785c47c00 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -125,7 +125,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } if !foundMultipleInBlock { - Fail(t, "only found one batch per block") + Fatal(t, "only found one batch per block") } } @@ -133,7 +133,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Require(t, err) if l2balance.Sign() == 0 { - Fail(t, "Unexpected zero balance") + Fatal(t, "Unexpected zero balance") } } @@ -164,7 +164,7 @@ func TestBatchPosterLargeTx(t *testing.T) { receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30) Require(t, err) if receiptA.BlockHash != receiptB.BlockHash { - Fail(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) + Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) } } diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index c5279a03ed..b3fd8ddb6c 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -17,8 +17,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" ) type workloadType uint @@ -27,6 +29,7 @@ const ( ethSend workloadType = iota smallContract depleteGas + upgradeArbOs ) func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops int, workload workloadType, arbitrator bool) { @@ -36,6 +39,9 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) + if workload == upgradeArbOs { + chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + } l2info, nodeA, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, chainConfig, nil) defer requireClose(t, l1stack) @@ -54,50 +60,73 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops perTransfer := big.NewInt(1e12) - for i := 0; i < workloadLoops; i++ { - var tx *types.Transaction - - if workload == ethSend { - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, perTransfer, nil) - } else { - var contractCode []byte - var gas uint64 - - if workload == smallContract { - contractCode = []byte{byte(vm.PUSH0)} - contractCode = append(contractCode, byte(vm.PUSH0)) - contractCode = append(contractCode, byte(vm.PUSH1)) - contractCode = append(contractCode, 8) // the prelude length - contractCode = append(contractCode, byte(vm.PUSH0)) - contractCode = append(contractCode, byte(vm.CODECOPY)) - contractCode = append(contractCode, byte(vm.PUSH0)) - contractCode = append(contractCode, byte(vm.RETURN)) - basefee := GetBaseFee(t, l2client, ctx) - var err error - gas, err = l2client.EstimateGas(ctx, ethereum.CallMsg{ - From: l2info.GetAddress("Owner"), - GasPrice: basefee, - Value: big.NewInt(0), - Data: contractCode, - }) - Require(t, err) + if workload != upgradeArbOs { + for i := 0; i < workloadLoops; i++ { + var tx *types.Transaction + + if workload == ethSend { + tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, perTransfer, nil) } else { - contractCode = []byte{0x5b} // JUMPDEST - for i := 0; i < 20; i++ { - contractCode = append(contractCode, 0x60, 0x00, 0x60, 0x00, 0x52) // PUSH1 0 MSTORE + var contractCode []byte + var gas uint64 + + if workload == smallContract { + contractCode = []byte{byte(vm.PUSH0)} + contractCode = append(contractCode, byte(vm.PUSH0)) + contractCode = append(contractCode, byte(vm.PUSH1)) + contractCode = append(contractCode, 8) // the prelude length + contractCode = append(contractCode, byte(vm.PUSH0)) + contractCode = append(contractCode, byte(vm.CODECOPY)) + contractCode = append(contractCode, byte(vm.PUSH0)) + contractCode = append(contractCode, byte(vm.RETURN)) + basefee := GetBaseFee(t, l2client, ctx) + var err error + gas, err = l2client.EstimateGas(ctx, ethereum.CallMsg{ + From: l2info.GetAddress("Owner"), + GasPrice: basefee, + Value: big.NewInt(0), + Data: contractCode, + }) + Require(t, err) + } else { + contractCode = []byte{0x5b} // JUMPDEST + for i := 0; i < 20; i++ { + contractCode = append(contractCode, 0x60, 0x00, 0x60, 0x00, 0x52) // PUSH1 0 MSTORE + } + contractCode = append(contractCode, 0x60, 0x00, 0x56) // JUMP + gas = l2info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 } - contractCode = append(contractCode, 0x60, 0x00, 0x56) // JUMP - gas = l2info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 + tx = l2info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) + } + + err := l2client.SendTransaction(ctx, tx) + Require(t, err) + _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) + if workload != depleteGas { + Require(t, err) } - tx = l2info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) } + } else { + auth := l2info.GetDefaultTransactOpts("Owner", ctx) + // make auth a chain owner + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), l2client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 11, 0) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) - err := l2client.SendTransaction(ctx, tx) + tx = l2info.PrepareTxTo("Owner", nil, l2info.TransferGas, perTransfer, []byte{byte(vm.PUSH0)}) + err = l2client.SendTransaction(ctx, tx) Require(t, err) _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) - if workload != depleteGas { - Require(t, err) - } + Require(t, err) } if workload != depleteGas { @@ -125,7 +154,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(workloadLoops+1))) if l2balance.Cmp(expectedBalance) != 0 { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } } @@ -148,7 +177,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops t.Log("waiting for block: ", lastBlock.NumberU64()) timeout := getDeadlineTimeout(t, time.Minute*10) if !nodeB.BlockValidator.WaitForBlock(ctx, lastBlock.NumberU64(), timeout) { - Fail(t, "did not validate all blocks") + Fatal(t, "did not validate all blocks") } finalRefCount := nodeB.BlockValidator.RecordDBReferenceCount() lastBlockNow, err := l2clientB.BlockByNumber(ctx, nil) @@ -156,10 +185,14 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3 if finalRefCount < 0 || finalRefCount > int64(largestRefCount) { - Fail(t, "unexpected refcount:", finalRefCount) + Fatal(t, "unexpected refcount:", finalRefCount) } } +func TestBlockValidatorSimpleOnchainUpgradeArbOs(t *testing.T) { + testBlockValidatorSimple(t, "onchain", 1, upgradeArbOs, true) +} + func TestBlockValidatorSimpleOnchain(t *testing.T) { testBlockValidatorSimple(t, "onchain", 1, ethSend, true) } diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index c932e0f185..9ad3253d4a 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -84,7 +84,7 @@ func TestBloom(t *testing.T) { for { sectionSize, sectionNum := node.Execution.Backend.APIBackend().BloomStatus() if sectionSize != 256 { - Fail(t, "unexpected section size: ", sectionSize) + Fatal(t, "unexpected section size: ", sectionSize) } t.Log("sections: ", sectionNum, "/", uint64(countsNum)/sectionSize) if sectionSize*(sectionNum+1) > uint64(countsNum) && sectionNum > 1 { @@ -102,7 +102,7 @@ func TestBloom(t *testing.T) { logs, err := client.FilterLogs(ctx, nullEventQuery) Require(t, err) if len(logs) != len(nullEventCounts) { - Fail(t, "expected ", len(nullEventCounts), " logs, got ", len(logs)) + Fatal(t, "expected ", len(nullEventCounts), " logs, got ", len(logs)) } incrementEventQuery := ethereum.FilterQuery{ Topics: [][]common.Hash{{simpleABI.Events["CounterEvent"].ID}}, @@ -110,14 +110,14 @@ func TestBloom(t *testing.T) { logs, err = client.FilterLogs(ctx, incrementEventQuery) Require(t, err) if len(logs) != len(eventCounts) { - Fail(t, "expected ", len(eventCounts), " logs, got ", len(logs)) + Fatal(t, "expected ", len(eventCounts), " logs, got ", len(logs)) } for _, log := range logs { parsedLog, err := simple.ParseCounterEvent(log) Require(t, err) _, expected := eventCounts[parsedLog.Count-1] if !expected { - Fail(t, "unxpected count in logs: ", parsedLog.Count) + Fatal(t, "unxpected count in logs: ", parsedLog.Count) } } } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index ffff7d33e6..0aa6aa29d7 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -18,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_api" "github.com/offchainlabs/nitro/validator/valnode" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" @@ -105,7 +106,7 @@ func BridgeBalance( l2acct := l2info.GetInfoWithPrivKey(account) if l2acct.PrivateKey.X.Cmp(l1acct.PrivateKey.X) != 0 || l2acct.PrivateKey.Y.Cmp(l1acct.PrivateKey.Y) != 0 { - Fail(t, "l2 account already exists and not compatible to l1") + Fatal(t, "l2 account already exists and not compatible to l1") } } @@ -137,7 +138,7 @@ func BridgeBalance( } TransferBalance(t, "Faucet", "User", big.NewInt(1), l1info, l1client, ctx) if i > 20 { - Fail(t, "bridging failed") + Fatal(t, "bridging failed") } <-time.After(time.Millisecond * 100) } @@ -281,7 +282,7 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func getTestStackConfig(t *testing.T) *node.Config { +func stackConfigForTest(t *testing.T) *node.Config { stackConfig := node.DefaultConfig stackConfig.HTTPPort = 0 stackConfig.WSPort = 0 @@ -345,16 +346,17 @@ type validated interface { Validate() error } -func StaticFetcherFrom[T any](t *testing.T, config T) func() T { +func StaticFetcherFrom[T any](t *testing.T, config *T) func() *T { t.Helper() - asEmptyIf := interface{}(config) + tCopy := *config + asEmptyIf := interface{}(&tCopy) if asValidtedIf, ok := asEmptyIf.(validated); ok { err := asValidtedIf.Validate() if err != nil { - Fail(t, err) + Fatal(t, err) } } - return func() T { return config } + return func() *T { return &tCopy } } func configByValidationNode(t *testing.T, clientConfig *arbnode.Config, valStack *node.Node) { @@ -380,7 +382,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = getTestStackConfig(t) + stackConfig = stackConfigForTest(t) } l1info.GenerateAccount("Faucet") @@ -431,9 +433,24 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no return l1info, l1Client, l1backend, stack } +func getInitMessage(ctx context.Context, t *testing.T, l1client client, addresses *chaininfo.RollupAddresses) *arbostypes.ParsedInitMessage { + bridge, err := arbnode.NewDelayedBridge(l1client, addresses.Bridge, addresses.DeployedAt) + Require(t, err) + deployedAtBig := arbmath.UintToBig(addresses.DeployedAt) + messages, err := bridge.LookupMessagesInRange(ctx, deployedAtBig, deployedAtBig, nil) + Require(t, err) + if len(messages) == 0 { + Fatal(t, "No delayed messages found at rollup creation block") + } + initMessage, err := messages[0].Message.ParseInitMessage() + Require(t, err, "Failed to parse rollup init message") + + return initMessage +} + func DeployOnTestL1( t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, -) *chaininfo.RollupAddresses { +) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) { l1info.GenerateAccount("RollupOwner") l1info.GenerateAccount("Sequencer") l1info.GenerateAccount("User") @@ -462,17 +479,18 @@ func DeployOnTestL1( l1info.SetContract("Bridge", addresses.Bridge) l1info.SetContract("SequencerInbox", addresses.SequencerInbox) l1info.SetContract("Inbox", addresses.Inbox) - return addresses + initMessage := getInitMessage(ctx, t, l1client, addresses) + return addresses, initMessage } func createL2BlockChain( t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { - return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil) + return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil) } func createL2BlockChainWithStackConfig( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, stackConfig *node.Config, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) @@ -493,9 +511,17 @@ func createL2BlockChainWithStackConfig( Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData) - serializedChainConfig, err := json.Marshal(chainConfig) - Require(t, err) - blockchain, err := execution.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, serializedChainConfig, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + if initMessage == nil { + serializedChainConfig, err := json.Marshal(chainConfig) + Require(t, err) + initMessage = &arbostypes.ParsedInitMessage{ + ChainId: chainConfig.ChainID, + InitialL1BaseFee: arbostypes.DefaultInitialL1BaseFee, + ChainConfig: chainConfig, + SerializedChainConfig: serializedChainConfig, + } + } + blockchain, err := execution.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) Require(t, err) return l2info, stack, chainDb, arbDb, blockchain @@ -561,8 +587,8 @@ func createTestNodeOnL1WithConfigImpl( if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) } - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, stackConfig) - addresses := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -657,7 +683,7 @@ func Require(t *testing.T, err error, text ...interface{}) { testhelpers.RequireImpl(t, err, text...) } -func Fail(t *testing.T, printables ...interface{}) { +func Fatal(t *testing.T, printables ...interface{}) { t.Helper() testhelpers.FailImpl(t, printables...) } @@ -693,12 +719,12 @@ func Create2ndNodeWithConfig( feedErrChan := make(chan error, 10) l1rpcClient, err := l1stack.Attach() if err != nil { - Fail(t, err) + Fatal(t, err) } l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = getTestStackConfig(t) + stackConfig = stackConfigForTest(t) } l2stack, err := node.New(stackConfig) Require(t, err) @@ -712,11 +738,8 @@ func Create2ndNodeWithConfig( dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) chainConfig := first.Execution.ArbInterface.BlockChain().Config() - serializedChainConfig, err := json.Marshal(chainConfig) - if err != nil { - Fail(t, err) - } - l2blockchain, err := execution.WriteOrTestBlockChain(l2chainDb, nil, initReader, chainConfig, serializedChainConfig, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) + l2blockchain, err := execution.WriteOrTestBlockChain(l2chainDb, nil, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) Require(t, err) AddDefaultValNode(t, ctx, nodeConfig, true) @@ -791,7 +814,7 @@ func setupConfigWithDAS( case "onchain": enableDas = false default: - Fail(t, "unknown storage type") + Fatal(t, "unknown storage type") } dbPath = t.TempDir() dasSignerKey, _, err := das.GenerateAndStoreKeys(dbPath) diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index 950938a756..c65103694a 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -53,7 +53,7 @@ func testConditionalTxThatShouldSucceed(t *testing.T, ctx context.Context, idx i tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) err := arbitrum.SendConditionalTransactionRPC(ctx, rpcClient, tx, options) if err != nil { - Fail(t, "SendConditionalTransactionRPC failed, idx:", idx, "err:", err) + Fatal(t, "SendConditionalTransactionRPC failed, idx:", idx, "err:", err) } } @@ -65,18 +65,18 @@ func testConditionalTxThatShouldFail(t *testing.T, ctx context.Context, idx int, err := arbitrum.SendConditionalTransactionRPC(ctx, rpcClient, tx, options) if err == nil { if options == nil { - Fail(t, "SendConditionalTransactionRPC didn't fail as expected, idx:", idx, "options:", options) + Fatal(t, "SendConditionalTransactionRPC didn't fail as expected, idx:", idx, "options:", options) } else { - Fail(t, "SendConditionalTransactionRPC didn't fail as expected, idx:", idx, "options:", *options) + Fatal(t, "SendConditionalTransactionRPC didn't fail as expected, idx:", idx, "options:", *options) } } else { var rErr rpc.Error if errors.As(err, &rErr) { if rErr.ErrorCode() != expectedErrorCode { - Fail(t, "unexpected error code, have:", rErr.ErrorCode(), "want:", expectedErrorCode) + Fatal(t, "unexpected error code, have:", rErr.ErrorCode(), "want:", expectedErrorCode) } } else { - Fail(t, "unexpected error type, err:", err) + Fatal(t, "unexpected error type, err:", err) } } accountInfo.Nonce = nonce // revert nonce as the tx failed @@ -264,14 +264,14 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { previousStorageRootHash1 := currentRootHash1 currentRootHash1 = getStorageRootHash(t, node, contractAddress1) if bytes.Equal(previousStorageRootHash1.Bytes(), currentRootHash1.Bytes()) { - Fail(t, "storage root hash didn't change as expected") + Fatal(t, "storage root hash didn't change as expected") } currentSlotValueMap1 = getStorageSlotValue(t, node, contractAddress1) previousStorageRootHash2 := currentRootHash2 currentRootHash2 = getStorageRootHash(t, node, contractAddress2) if bytes.Equal(previousStorageRootHash2.Bytes(), currentRootHash2.Bytes()) { - Fail(t, "storage root hash didn't change as expected") + Fatal(t, "storage root hash didn't change as expected") } currentSlotValueMap2 = getStorageSlotValue(t, node, contractAddress2) @@ -362,7 +362,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { select { case <-success: case <-ctxWithTimeout.Done(): - Fail(t, "test timeouted") + Fatal(t, "test timeouted") } } cancelCtxWithTimeout() @@ -375,7 +375,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { for i := genesis + 1; header != nil; i++ { blockReceipts := bc.GetReceiptsByHash(header.Hash()) if blockReceipts == nil { - Fail(t, "Failed to get block receipts, block number:", header.Number) + Fatal(t, "Failed to get block receipts, block number:", header.Number) } receipts = append(receipts, blockReceipts...) header = bc.GetHeaderByNumber(i) @@ -387,14 +387,14 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { parsed, err := simple.ParseLogAndIncrementCalled(*receipt.Logs[0]) Require(t, err) if parsed.Expected.Int64() != parsed.Have.Int64() { - Fail(t, "Got invalid log, log.Expected:", parsed.Expected, "log.Have:", parsed.Have) + Fatal(t, "Got invalid log, log.Expected:", parsed.Expected, "log.Have:", parsed.Have) } else { succeeded++ } } } if succeeded != expectedSuccesses { - Fail(t, "Unexpected number of successful txes, want:", numTxes, "have:", succeeded) + Fatal(t, "Unexpected number of successful txes, want:", numTxes, "have:", succeeded) } } diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index 9214884c26..e671dcc6ac 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -94,12 +94,12 @@ func TestContractTxDeploy(t *testing.T) { receipt, err := WaitForTx(ctx, client, txHash, time.Second*10) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "Receipt has non-successful status", receipt.Status) + Fatal(t, "Receipt has non-successful status", receipt.Status) } expectedAddr := crypto.CreateAddress(from, stateNonce) if receipt.ContractAddress != expectedAddr { - Fail(t, "expected address", from, "nonce", stateNonce, "to deploy to", expectedAddr, "but got", receipt.ContractAddress) + Fatal(t, "expected address", from, "nonce", stateNonce, "to deploy to", expectedAddr, "but got", receipt.ContractAddress) } t.Log("deployed contract", receipt.ContractAddress, "from address", from, "with nonce", stateNonce) stateNonce++ @@ -107,7 +107,7 @@ func TestContractTxDeploy(t *testing.T) { code, err := client.CodeAt(ctx, receipt.ContractAddress, nil) Require(t, err) if !bytes.Equal(code, []byte{0xFE}) { - Fail(t, "expected contract", receipt.ContractAddress, "code of 0xFE but got", hex.EncodeToString(code)) + Fatal(t, "expected contract", receipt.ContractAddress, "code of 0xFE but got", hex.EncodeToString(code)) } } } diff --git a/system_tests/das_test.go b/system_tests/das_test.go index bce4da1f34..d813253670 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -110,7 +110,7 @@ func TestDASRekey(t *testing.T) { l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) feedErrChan := make(chan error, 10) - addresses := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) // Setup DAS servers dasDataDir := t.TempDir() @@ -126,7 +126,7 @@ func TestDASRekey(t *testing.T) { authorizeDASKeyset(t, ctx, pubkeyA, l1info, l1client) // Setup L2 chain - _, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChain(t, l2info, nodeDir, chainConfig) + _, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, l2info, nodeDir, chainConfig, initMessage, nil) l2info.GenerateAccount("User2") // Setup DAS config @@ -218,7 +218,7 @@ func checkBatchPosting(t *testing.T, ctx context.Context, l1client, l2clientA *e Require(t, err) if l2balance.Cmp(expectedBalance) != 0 { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } } @@ -238,7 +238,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1Reader.Start(ctx) defer l1Reader.StopAndWait() feedErrChan := make(chan error, 10) - addresses := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) keyDir, fileDataDir, dbDataDir := t.TempDir(), t.TempDir(), t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) @@ -304,7 +304,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { Require(t, err) // Setup L2 chain - l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChain(t, nil, "", chainConfig) + l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil) l2info.GenerateAccount("User2") sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) diff --git a/system_tests/delayedinbox_test.go b/system_tests/delayedinbox_test.go index f83ddab184..e48cb37028 100644 --- a/system_tests/delayedinbox_test.go +++ b/system_tests/delayedinbox_test.go @@ -50,6 +50,6 @@ func TestDelayInboxSimple(t *testing.T) { l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e6)) != 0 { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } } diff --git a/system_tests/delayedinboxlong_test.go b/system_tests/delayedinboxlong_test.go index 9424660406..b1c8ea361b 100644 --- a/system_tests/delayedinboxlong_test.go +++ b/system_tests/delayedinboxlong_test.go @@ -63,7 +63,7 @@ func TestDelayInboxLong(t *testing.T) { t.Log("Done sending", delayedMessages, "delayedMessages") if delayedMessages == 0 { - Fail(t, "No delayed messages sent!") + Fatal(t, "No delayed messages sent!") } // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in @@ -78,6 +78,6 @@ func TestDelayInboxLong(t *testing.T) { l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(fundsPerDelayed*delayedMessages)) != 0 { - Fail(t, "Unexpected balance:", "balance", l2balance, "expected", fundsPerDelayed*delayedMessages) + Fatal(t, "Unexpected balance:", "balance", l2balance, "expected", fundsPerDelayed*delayedMessages) } } diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index c80492326c..2a416ad179 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -43,7 +43,7 @@ func TestDeploy(t *testing.T) { Require(t, err, "failed to get counter") if counter != 1 { - Fail(t, "Unexpected counter value", counter) + Fatal(t, "Unexpected counter value", counter) } } @@ -88,7 +88,7 @@ func TestEstimate(t *testing.T) { numTriesLeft-- } if !equilibrated { - Fail(t, "L2 gas price did not converge", gasPrice) + Fatal(t, "L2 gas price did not converge", gasPrice) } initialBalance, err := client.BalanceAt(ctx, auth.From, nil) @@ -103,7 +103,7 @@ func TestEstimate(t *testing.T) { header, err := client.HeaderByNumber(ctx, receipt.BlockNumber) Require(t, err, "could not get header") if header.BaseFee.Cmp(gasPrice) != 0 { - Fail(t, "Header has wrong basefee", header.BaseFee, gasPrice) + Fatal(t, "Header has wrong basefee", header.BaseFee, gasPrice) } balance, err := client.BalanceAt(ctx, auth.From, nil) @@ -111,7 +111,7 @@ func TestEstimate(t *testing.T) { expectedCost := receipt.GasUsed * gasPrice.Uint64() observedCost := initialBalance.Uint64() - balance.Uint64() if expectedCost != observedCost { - Fail(t, "Expected deployment to cost", expectedCost, "instead of", observedCost) + Fatal(t, "Expected deployment to cost", expectedCost, "instead of", observedCost) } tx, err = simple.Increment(&auth) @@ -123,7 +123,7 @@ func TestEstimate(t *testing.T) { Require(t, err, "failed to get counter") if counter != 1 { - Fail(t, "Unexpected counter value", counter) + Fatal(t, "Unexpected counter value", counter) } } @@ -134,7 +134,7 @@ func TestComponentEstimate(t *testing.T) { l2info, node, client := CreateTestL2(t, ctx) defer node.StopAndWait() - l1BaseFee := big.NewInt(l1pricing.InitialPricePerUnitWei) + l1BaseFee := new(big.Int).Set(arbostypes.DefaultInitialL1BaseFee) l2BaseFee := GetBaseFee(t, client, ctx) colors.PrintGrey("l1 basefee ", l1BaseFee) @@ -177,7 +177,7 @@ func TestComponentEstimate(t *testing.T) { outputs, err := nodeMethod.Outputs.Unpack(returnData) Require(t, err) if len(outputs) != 4 { - Fail(t, "expected 4 outputs from gasEstimateComponents, got", len(outputs)) + Fatal(t, "expected 4 outputs from gasEstimateComponents, got", len(outputs)) } gasEstimate, _ := outputs[0].(uint64) @@ -201,10 +201,10 @@ func TestComponentEstimate(t *testing.T) { colors.PrintBlue("Est. ", gasEstimate, " - ", gasEstimateForL1, " = ", l2Estimate) if !arbmath.BigEquals(l1BaseFeeEstimate, l1BaseFee) { - Fail(t, l1BaseFeeEstimate, l1BaseFee) + Fatal(t, l1BaseFeeEstimate, l1BaseFee) } if !arbmath.BigEquals(baseFee, l2BaseFee) { - Fail(t, baseFee, l2BaseFee.Uint64()) + Fatal(t, baseFee, l2BaseFee.Uint64()) } Require(t, client.SendTransaction(ctx, tx)) @@ -215,6 +215,6 @@ func TestComponentEstimate(t *testing.T) { colors.PrintMint("True ", receipt.GasUsed, " - ", receipt.GasUsedForL1, " = ", l2Used) if l2Estimate != l2Used { - Fail(t, l2Estimate, l2Used) + Fatal(t, l2Estimate, l2Used) } } diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 734ccc7529..bdd998357e 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -82,20 +82,22 @@ func TestSequencerFeePaid(t *testing.T) { tipPaidToNet := arbmath.BigMulByUint(tipCap, receipt.GasUsedForL1) gotTip := arbmath.BigEquals(networkRevenue, arbmath.BigAdd(feePaidForL2, tipPaidToNet)) if !gotTip && version == 9 { - Fail(t, "network didn't receive expected payment", networkRevenue, feePaidForL2, tipPaidToNet) + Fatal(t, "network didn't receive expected payment", networkRevenue, feePaidForL2, tipPaidToNet) } if gotTip && version != 9 { - Fail(t, "tips are somehow enabled") + Fatal(t, "tips are somehow enabled") } txSize := compressedTxSize(t, tx) l1GasBought := arbmath.BigDiv(l1Charge, l1Estimate).Uint64() - l1GasActual := txSize * params.TxDataNonZeroGasEIP2028 + l1ChargeExpected := arbmath.BigMulByUint(l1Estimate, txSize*params.TxDataNonZeroGasEIP2028) + // L1 gas can only be charged in terms of L2 gas, so subtract off any rounding error from the expected value + l1ChargeExpected.Sub(l1ChargeExpected, new(big.Int).Mod(l1ChargeExpected, l2info.GasPrice)) colors.PrintBlue("bytes ", l1GasBought/params.TxDataNonZeroGasEIP2028, txSize) - if l1GasBought != l1GasActual { - Fail(t, "the sequencer's future revenue does not match its costs", l1GasBought, l1GasActual) + if !arbmath.BigEquals(l1Charge, l1ChargeExpected) { + Fatal(t, "the sequencer's future revenue does not match its costs", l1Charge, l1ChargeExpected) } return networkRevenue, tipPaidToNet } @@ -109,10 +111,10 @@ func TestSequencerFeePaid(t *testing.T) { net2, tip2 := testFees(2) if tip0.Sign() != 0 { - Fail(t, "nonzero tip") + Fatal(t, "nonzero tip") } if arbmath.BigEquals(arbmath.BigSub(net2, tip2), net0) { - Fail(t, "a tip of 2 should yield a total of 3") + Fatal(t, "a tip of 2 should yield a total of 3") } } @@ -211,7 +213,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { if numRetrogradeMoves > 1 { colors.PrintRed(timesPriceAdjusted, newDiff, oldDiff, lastEstimate, surplus) colors.PrintRed(estimatedL1FeePerUnit, l1Header.BaseFee, actualL1FeePerUnit) - Fail(t, "L1 gas price estimate should tend toward the basefee") + Fatal(t, "L1 gas price estimate should tend toward the basefee") } } else { numRetrogradeMoves = 0 @@ -219,10 +221,10 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { diff := arbmath.BigAbs(arbmath.BigSub(actualL1FeePerUnit, estimatedL1FeePerUnit)) maxDiffToAllow := arbmath.BigDivByUint(actualL1FeePerUnit, 100) if arbmath.BigLessThan(maxDiffToAllow, diff) { // verify that estimates is within 1% of actual - Fail(t, "New L1 estimate differs too much from receipt") + Fatal(t, "New L1 estimate differs too much from receipt") } if arbmath.BigEquals(actualL1FeePerUnit, common.Big0) { - Fail(t, "Estimate is zero", i) + Fatal(t, "Estimate is zero", i) } lastEstimate = actualL1FeePerUnit timesPriceAdjusted++ @@ -240,7 +242,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { break } if j == 1 { - Fail(t, "batch count didn't update in time") + Fatal(t, "batch count didn't update in time") } time.Sleep(time.Millisecond * 100) } @@ -252,10 +254,10 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { colors.PrintMint("price changes ", timesPriceAdjusted) if timesPriceAdjusted == 0 { - Fail(t, "L1 gas price estimate never adjusted") + Fatal(t, "L1 gas price estimate never adjusted") } if !arbmath.BigGreaterThan(rewardRecipientBalanceAfter, rewardRecipientBalanceBefore) { - Fail(t, "reward recipient didn't get paid") + Fatal(t, "reward recipient didn't get paid") } arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), l2client) @@ -269,12 +271,12 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { bal, err := l1client.BalanceAt(ctx, bpAddr, nil) Require(t, err) if bal.Sign() == 0 { - Fail(t, "Batch poster balance is zero for", bpAddr) + Fatal(t, "Batch poster balance is zero for", bpAddr) } } } if numReimbursed != 1 { - Fail(t, "Wrong number of batch posters were reimbursed", numReimbursed) + Fatal(t, "Wrong number of batch posters were reimbursed", numReimbursed) } } diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 5ec897604e..3691caf5d2 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -8,6 +8,7 @@ import ( "fmt" "math/big" "path/filepath" + "strings" "sync" "testing" "time" @@ -21,16 +22,19 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/redisutil" - "github.com/offchainlabs/nitro/util/testhelpers" ) +var transferAmount = big.NewInt(1e12) // amount of ether to use for transactions in tests + +const nodesCount = 5 // number of testnodes to create in tests + func TestStaticForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ipcPath := filepath.Join(t.TempDir(), "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConfig := getTestStackConfig(t) + stackConfig := stackConfigForTest(t) ipcConfig.Apply(stackConfig) nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false @@ -50,54 +54,61 @@ func TestStaticForwarder(t *testing.T) { defer nodeB.StopAndWait() l2info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) err := clientB.SendTransaction(ctx, tx) - testhelpers.RequireImpl(t, err) + Require(t, err) _, err = EnsureTxSucceeded(ctx, clientA, tx) - testhelpers.RequireImpl(t, err) + Require(t, err) + l2balance, err := clientA.BalanceAt(ctx, l2info.GetAddress("User2"), nil) - testhelpers.RequireImpl(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - testhelpers.FailImpl(t, "Unexpected balance:", l2balance) + Require(t, err) + + if l2balance.Cmp(transferAmount) != 0 { + Fatal(t, "Unexpected balance:", l2balance) } } -func initMiniRedisForTest(t *testing.T, ctx context.Context, nodeNames []string) (*miniredis.Miniredis, string) { - var priorities string +func initRedis(ctx context.Context, t *testing.T, nodeNames []string) (*miniredis.Miniredis, string) { + t.Helper() + redisServer, err := miniredis.Run() - testhelpers.RequireImpl(t, err) + Require(t, err) redisUrl := fmt.Sprintf("redis://%s/0", redisServer.Addr()) redisClient, err := redisutil.RedisClientFromURL(redisUrl) - testhelpers.RequireImpl(t, err) + Require(t, err) defer redisClient.Close() - for _, name := range nodeNames { - priorities = priorities + name + "," - } - priorities = priorities[:len(priorities)-1] // remove last "," - testhelpers.RequireImpl(t, redisClient.Set(ctx, redisutil.PRIORITIES_KEY, priorities, time.Duration(0)).Err()) + priorities := strings.Join(nodeNames, ",") + + Require(t, redisClient.Set(ctx, redisutil.PRIORITIES_KEY, priorities, time.Duration(0)).Err()) return redisServer, redisUrl } -func createFallbackSequencer( - t *testing.T, ctx context.Context, ipcPath string, redisUrl string, +type fallbackSequencerOpts struct { + ipcPath string + redisUrl string + enableSecCoordinator bool +} + +func fallbackSequencer( + ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, ) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := stackConfigForTest(t) ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath + ipcConfig.Path = opts.ipcPath ipcConfig.Apply(stackConfig) nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.SeqCoordinator.Enable = false - nodeConfig.SeqCoordinator.RedisUrl = redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = ipcPath + nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator + nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl + nodeConfig.SeqCoordinator.MyUrlImpl = opts.ipcPath return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, stackConfig) } func createForwardingNode( - t *testing.T, ctx context.Context, + ctx context.Context, t *testing.T, first *arbnode.Node, l1stack *node.Node, l1info *BlockchainTestInfo, @@ -106,7 +117,7 @@ func createForwardingNode( redisUrl string, fallbackPath string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := stackConfigForTest(t) if ipcPath != "" { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath @@ -123,7 +134,7 @@ func createForwardingNode( } func createSequencer( - t *testing.T, ctx context.Context, + ctx context.Context, t *testing.T, first *arbnode.Node, l1stack *node.Node, l1info *BlockchainTestInfo, @@ -131,7 +142,7 @@ func createSequencer( ipcPath string, redisUrl string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := stackConfigForTest(t) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) @@ -144,126 +155,180 @@ func createSequencer( return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) } -func TestRedisForwarder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +// tmpPath returns file path with specified filename from temporary directory of the test. +func tmpPath(t *testing.T, filename string) string { + return filepath.Join(t.TempDir(), filename) +} + +// testNodes creates specified number of paths for ipc from temporary directory of the test. +// e.g. /tmp/TestRedisForwarder689063006/003/0.ipc, /tmp/TestRedisForwarder689063006/007/1.ipc and so on. +func testNodes(t *testing.T, n int) []string { + var paths []string + for i := 0; i < n; i++ { + paths = append(paths, tmpPath(t, fmt.Sprintf("%d.ipc", i))) + } + return paths +} + +// waitForSequencerLockout blocks and waits until there is some sequencer chosen for specified duration. +// Errors out after timeout. +func waitForSequencerLockout(ctx context.Context, node *arbnode.Node, duration time.Duration) error { + if node == nil { + return fmt.Errorf("node is nil") + } + if node.SeqCoordinator == nil { + return fmt.Errorf("sequence coordinator in the node is nil") + } + // TODO: implement exponential backoff retry mechanism and use it instead. + for { + select { + case <-time.After(duration): + return fmt.Errorf("no sequencer was chosen") + default: + if c, err := node.SeqCoordinator.CurrentChosenSequencer(ctx); err == nil && c != "" { + return nil + } + time.Sleep(100 * time.Millisecond) + } + } +} - fallbackIpcPath := filepath.Join(t.TempDir(), "fallback.ipc") - nodePaths := []string{} - for i := 0; i < 5; i++ { - nodePaths = append(nodePaths, filepath.Join(t.TempDir(), fmt.Sprintf("%d.ipc", i))) +// stopNodes blocks and waits until all nodes are stopped. +func stopNodes(nodes []*arbnode.Node) { + var wg sync.WaitGroup + for _, node := range nodes { + if node != nil { + wg.Add(1) + n := node + go func() { + n.StopAndWait() + wg.Done() + }() + } } - redisServer, redisUrl := initMiniRedisForTest(t, ctx, nodePaths) + wg.Wait() +} + +func user(suffix string, idx int) string { + return fmt.Sprintf("User%s_%d", suffix, idx) +} + +// tryWithTimeout calls function f() repeatedly foruntil it succeeds. +func tryWithTimeout(ctx context.Context, f func() error, duration time.Duration) error { + for { + select { + case <-time.After(duration): + return fmt.Errorf("timeout expired") + default: + if err := f(); err == nil { + return nil + } + } + } +} + +func TestRedisForwarder(t *testing.T) { + ctx := context.Background() + + nodePaths := testNodes(t, nodesCount) + fbNodePath := tmpPath(t, "fallback.ipc") // fallback node path + redisServer, redisUrl := initRedis(ctx, t, append(nodePaths, fbNodePath)) defer redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := createFallbackSequencer(t, ctx, fallbackIpcPath, redisUrl) + l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + &fallbackSequencerOpts{ + ipcPath: fbNodePath, + redisUrl: redisUrl, + enableSecCoordinator: true, + }) defer requireClose(t, l1stack) defer fallbackNode.StopAndWait() - forwardingClient, forwardingNode := createForwardingNode(t, ctx, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fallbackIpcPath) + forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fbNodePath) defer forwardingNode.StopAndWait() - var sequencers []*arbnode.Node + var seqNodes []*arbnode.Node var seqClients []*ethclient.Client for _, path := range nodePaths { - client, node := createSequencer(t, ctx, fallbackNode, l1stack, l1info, &l2info.ArbInitData, path, redisUrl) - sequencers = append(sequencers, node) + client, node := createSequencer(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, path, redisUrl) + seqNodes = append(seqNodes, node) seqClients = append(seqClients, client) } - clients := seqClients - clients = append(clients, fallbackClient) - nodes := sequencers - nodes = append(nodes, fallbackNode) - defer func() { - var wg sync.WaitGroup - for _, node := range nodes { - if node != nil && node != fallbackNode { - wg.Add(1) - n := node - go func() { - n.StopAndWait() - wg.Done() - }() - } - } - wg.Wait() - }() + defer stopNodes(seqNodes) - for i := range clients { - userA := fmt.Sprintf("UserA%d", i) + for i := range seqClients { + userA := user("A", i) l2info.GenerateAccount(userA) tx := l2info.PrepareTx("Owner", userA, l2info.TransferGas, big.NewInt(1e12+int64(l2info.TransferGas)*l2info.GasPrice.Int64()), nil) err := fallbackClient.SendTransaction(ctx, tx) - testhelpers.RequireImpl(t, err) + Require(t, err) _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) - testhelpers.RequireImpl(t, err) + Require(t, err) } - for i := range clients { - userA := fmt.Sprintf("UserA%d", i) - userB := fmt.Sprintf("UserB%d", i) + for i := range seqClients { + if err := waitForSequencerLockout(ctx, fallbackNode, 2*time.Second); err != nil { + t.Fatalf("Error waiting for lockout: %v", err) + } + userA := user("A", i) + userB := user("B", i) l2info.GenerateAccount(userB) - tx := l2info.PrepareTx(userA, userB, l2info.TransferGas, big.NewInt(1e12), nil) - var err error - for j := 0; j < 20; j++ { - err = forwardingClient.SendTransaction(ctx, tx) - if err == nil { - break - } - time.Sleep(execution.DefaultTestForwarderConfig.UpdateInterval / 2) + tx := l2info.PrepareTx(userA, userB, l2info.TransferGas, transferAmount, nil) + + sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } + if err := tryWithTimeout(ctx, sendFunc, execution.DefaultTestForwarderConfig.UpdateInterval*10); err != nil { + t.Fatalf("Client: %v, error sending transaction: %v", i, err) } - testhelpers.RequireImpl(t, err) - _, err = EnsureTxSucceeded(ctx, clients[i], tx) - testhelpers.RequireImpl(t, err) - l2balance, err := clients[i].BalanceAt(ctx, l2info.GetAddress(userB), nil) - testhelpers.RequireImpl(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - testhelpers.FailImpl(t, "Unexpected balance:", l2balance) + _, err := EnsureTxSucceeded(ctx, seqClients[i], tx) + Require(t, err) + + l2balance, err := seqClients[i].BalanceAt(ctx, l2info.GetAddress(userB), nil) + Require(t, err) + + if l2balance.Cmp(transferAmount) != 0 { + Fatal(t, "Unexpected balance:", l2balance) } - if i < len(nodes)-1 { - time.Sleep(100 * time.Millisecond) - nodes[i].StopAndWait() - nodes[i] = nil + if i < len(seqNodes) { + seqNodes[i].StopAndWait() + seqNodes[i] = nil } } } func TestRedisForwarderFallbackNoRedis(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := context.Background() - fallbackIpcPath := filepath.Join(t.TempDir(), "fallback.ipc") - nodePaths := []string{} - for i := 0; i < 10; i++ { - nodePaths = append(nodePaths, filepath.Join(t.TempDir(), fmt.Sprintf("%d.ipc", i))) - } - redisServer, redisUrl := initMiniRedisForTest(t, ctx, nodePaths) + fallbackIpcPath := tmpPath(t, "fallback.ipc") + nodePaths := testNodes(t, nodesCount) + redisServer, redisUrl := initRedis(ctx, t, nodePaths) redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := createFallbackSequencer(t, ctx, fallbackIpcPath, redisUrl) + l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + &fallbackSequencerOpts{ + ipcPath: fallbackIpcPath, + redisUrl: redisUrl, + enableSecCoordinator: false, + }) defer requireClose(t, l1stack) defer fallbackNode.StopAndWait() - forwardingClient, forwardingNode := createForwardingNode(t, ctx, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fallbackIpcPath) + forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fallbackIpcPath) defer forwardingNode.StopAndWait() - l2info.GenerateAccount("User2") - var err error - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - for j := 0; j < 20; j++ { - err = forwardingClient.SendTransaction(ctx, tx) - if err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - testhelpers.RequireImpl(t, err) + user := "User2" + l2info.GenerateAccount(user) + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) + sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } + err := tryWithTimeout(ctx, sendFunc, execution.DefaultTestForwarderConfig.UpdateInterval*10) + Require(t, err) _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) - testhelpers.RequireImpl(t, err) - l2balance, err := fallbackClient.BalanceAt(ctx, l2info.GetAddress("User2"), nil) - testhelpers.RequireImpl(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - t.Fatal("Unexpected balance:", l2balance) + Require(t, err) + + l2balance, err := fallbackClient.BalanceAt(ctx, l2info.GetAddress(user), nil) + Require(t, err) + + if l2balance.Cmp(transferAmount) != 0 { + t.Errorf("Got balance: %v, want: %v", l2balance, transferAmount) } } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 87d6c9b729..26e2d4a64e 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -38,27 +38,27 @@ import ( func DeployOneStepProofEntry(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client) common.Address { osp0, _, _, err := ospgen.DeployOneStepProver0(auth, client) if err != nil { - Fail(t, err) + Fatal(t, err) } ospMem, _, _, err := ospgen.DeployOneStepProverMemory(auth, client) if err != nil { - Fail(t, err) + Fatal(t, err) } ospMath, _, _, err := ospgen.DeployOneStepProverMath(auth, client) if err != nil { - Fail(t, err) + Fatal(t, err) } ospHostIo, _, _, err := ospgen.DeployOneStepProverHostIo(auth, client) if err != nil { - Fail(t, err) + Fatal(t, err) } ospEntry, tx, _, err := ospgen.DeployOneStepProofEntry(auth, client, osp0, ospMem, ospMath, ospHostIo) if err != nil { - Fail(t, err) + Fatal(t, err) } _, err = EnsureTxSucceeded(ctx, client, tx) if err != nil { - Fail(t, err) + Fatal(t, err) } return ospEntry } @@ -165,7 +165,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b batches, err := nodeSeqInbox.LookupBatchesInRange(ctx, receipt.BlockNumber, receipt.BlockNumber) Require(t, err) if len(batches) == 0 { - Fail(t, "batch not found after AddSequencerL2BatchFromOrigin") + Fatal(t, "batch not found after AddSequencerL2BatchFromOrigin") } err = l2Node.InboxTracker.AddSequencerBatches(ctx, backend, batches) Require(t, err) @@ -245,7 +245,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { configByValidationNode(t, conf, valStack) fatalErrChan := make(chan error, 10) - asserterRollupAddresses := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) @@ -255,7 +255,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) - asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChain(t, nil, "", chainConfig) + asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil) asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, NewFetcherFromConfig(conf), asserterL2Blockchain, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan) @@ -263,7 +263,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { err = asserterL2.Start(ctx) Require(t, err) - challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChain(t, nil, "", chainConfig) + challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil) challengerRollupAddresses := *asserterRollupAddresses challengerRollupAddresses.Bridge = challengerBridgeAddr challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr @@ -289,22 +289,22 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { locator, err := server_common.NewMachineLocator("") if err != nil { - Fail(t, err) + Fatal(t, err) } wasmModuleRoot := locator.LatestWasmModuleRoot() if (wasmModuleRoot == common.Hash{}) { - Fail(t, "latest machine not found") + Fatal(t, "latest machine not found") } asserterGenesis := asserterL2.Execution.ArbInterface.BlockChain().Genesis() challengerGenesis := challengerL2.Execution.ArbInterface.BlockChain().Genesis() if asserterGenesis.Hash() != challengerGenesis.Hash() { - Fail(t, "asserter and challenger have different genesis hashes") + Fatal(t, "asserter and challenger have different genesis hashes") } asserterLatestBlock := asserterL2.Execution.ArbInterface.BlockChain().CurrentBlock() challengerLatestBlock := challengerL2.Execution.ArbInterface.BlockChain().CurrentBlock() if asserterLatestBlock.Hash() == challengerLatestBlock.Hash() { - Fail(t, "asserter and challenger have the same end block") + Fatal(t, "asserter and challenger have the same end block") } asserterStartGlobalState := validator.GoGlobalState{ @@ -317,7 +317,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { Batch: 2, PosInBatch: 0, } - numBlocks := asserterLatestBlock.NumberU64() - asserterGenesis.NumberU64() + numBlocks := asserterLatestBlock.Number.Uint64() - asserterGenesis.NumberU64() resultReceiver, challengeManagerAddr := CreateChallenge( t, @@ -339,29 +339,29 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterL2Blockchain, asserterL2ChainDb, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { - Fail(t, err) + Fatal(t, err) } err = asserterValidator.Start(ctx) if err != nil { - Fail(t, err) + Fatal(t, err) } defer asserterValidator.Stop() asserterManager, err := staker.NewChallengeManager(ctx, l1Backend, &asserterTxOpts, asserterTxOpts.From, challengeManagerAddr, 1, asserterL2Blockchain, asserterL2.InboxTracker, asserterValidator, 0, 0) if err != nil { - Fail(t, err) + Fatal(t, err) } challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerL2Blockchain, challengerL2ChainDb, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { - Fail(t, err) + Fatal(t, err) } err = challengerValidator.Start(ctx) if err != nil { - Fail(t, err) + Fatal(t, err) } defer challengerValidator.Stop() challengerManager, err := staker.NewChallengeManager(ctx, l1Backend, &challengerTxOpts, challengerTxOpts.From, challengeManagerAddr, 1, challengerL2Blockchain, challengerL2.InboxTracker, challengerValidator, 0, 0) if err != nil { - Fail(t, err) + Fatal(t, err) } for i := 0; i < 100; i++ { @@ -389,10 +389,10 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { t.Log("challenge completed! asserter hit expected error:", err) return } - Fail(t, "challenge step", i, "hit error:", err) + Fatal(t, "challenge step", i, "hit error:", err) } if tx == nil { - Fail(t, "no move") + Fatal(t, "no move") } _, err = EnsureTxSucceeded(ctx, l1Backend, tx) if err != nil { @@ -400,22 +400,22 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool) { t.Log("challenge complete! Tx failed as expected:", err) return } - Fail(t, err) + Fatal(t, err) } confirmLatestBlock(ctx, t, l1Info, l1Backend) winner, err := resultReceiver.Winner(&bind.CallOpts{}) if err != nil { - Fail(t, err) + Fatal(t, err) } if winner == (common.Address{}) { continue } if winner != expectedWinner { - Fail(t, "wrong party won challenge") + Fatal(t, "wrong party won challenge") } } - Fail(t, "challenge timed out without winner") + Fatal(t, "challenge timed out without winner") } diff --git a/system_tests/infra_fee_test.go b/system_tests/infra_fee_test.go index fd10badd4e..89f869576d 100644 --- a/system_tests/infra_fee_test.go +++ b/system_tests/infra_fee_test.go @@ -67,9 +67,9 @@ func TestInfraFee(t *testing.T) { Require(t, err) if !arbmath.BigEquals(netFeeBalanceBefore, netFeeBalanceAfter) { - Fail(t, netFeeBalanceBefore, netFeeBalanceAfter) + Fatal(t, netFeeBalanceBefore, netFeeBalanceAfter) } if !arbmath.BigEquals(infraFeeBalanceAfter, expectedBalanceAfter) { - Fail(t, infraFeeBalanceBefore, expectedFunds, infraFeeBalanceAfter, expectedBalanceAfter) + Fatal(t, infraFeeBalanceBefore, expectedFunds, infraFeeBalanceAfter, expectedBalanceAfter) } } diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index ad5a8fbc64..01ecf859d8 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -18,7 +18,7 @@ func TestIpcRpc(t *testing.T) { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConf := getTestStackConfig(t) + stackConf := stackConfigForTest(t) ipcConfig.Apply(stackConf) ctx, cancel := context.WithCancel(context.Background()) diff --git a/system_tests/log_subscription_test.go b/system_tests/log_subscription_test.go index d0896c0ea7..5ee1732fb0 100644 --- a/system_tests/log_subscription_test.go +++ b/system_tests/log_subscription_test.go @@ -37,7 +37,7 @@ func TestLogSubscription(t *testing.T) { Require(t, err) if len(receipt.Logs) != 1 { - Fail(t, "Unexpected number of logs", len(receipt.Logs)) + Fatal(t, "Unexpected number of logs", len(receipt.Logs)) } var receiptLog types.Log = *receipt.Logs[0] @@ -46,11 +46,11 @@ func TestLogSubscription(t *testing.T) { defer timer.Stop() select { case <-timer.C: - Fail(t, "Hit timeout waiting for log from subscription") + Fatal(t, "Hit timeout waiting for log from subscription") case subscriptionLog = <-logChan: } if !reflect.DeepEqual(receiptLog, subscriptionLog) { - Fail(t, "Receipt log", receiptLog, "is different than subscription log", subscriptionLog) + Fatal(t, "Receipt log", receiptLog, "is different than subscription log", subscriptionLog) } _, err = client.BlockByHash(ctx, subscriptionLog.BlockHash) Require(t, err) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index f9e9f6e57f..31e20a67e2 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -35,14 +35,14 @@ func TestMeaninglessBatchReorg(t *testing.T) { for i := 0; ; i++ { if i >= 500 { - Fail(t, "Failed to read batch from L1") + Fatal(t, "Failed to read batch from L1") } msgNum, err := arbNode.Execution.ExecEngine.HeadMessageNumber() Require(t, err) if msgNum == 1 { break } else if msgNum > 1 { - Fail(t, "More than two batches in test?") + Fatal(t, "More than two batches in test?") } time.Sleep(10 * time.Millisecond) } @@ -50,7 +50,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) originalBatchBlock := batchReceipt.BlockNumber.Uint64() if metadata.L1Block != originalBatchBlock { - Fail(t, "Posted batch in block", originalBatchBlock, "but metadata says L1 block was", metadata.L1Block) + Fatal(t, "Posted batch in block", originalBatchBlock, "but metadata says L1 block was", metadata.L1Block) } _, l2Receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2Info, l2Client, ctx) @@ -77,21 +77,21 @@ func TestMeaninglessBatchReorg(t *testing.T) { newBatchBlock := newBatchReceipt.BlockNumber.Uint64() if newBatchBlock == originalBatchBlock { - Fail(t, "Attempted to change L1 block number in batch reorg, but it ended up in the same block", newBatchBlock) + Fatal(t, "Attempted to change L1 block number in batch reorg, but it ended up in the same block", newBatchBlock) } else { t.Log("Batch successfully moved in reorg from L1 block", originalBatchBlock, "to L1 block", newBatchBlock) } for i := 0; ; i++ { if i >= 500 { - Fail(t, "Failed to read batch reorg from L1") + Fatal(t, "Failed to read batch reorg from L1") } metadata, err = arbNode.InboxTracker.GetBatchMetadata(1) Require(t, err) if metadata.L1Block == newBatchBlock { break } else if metadata.L1Block != originalBatchBlock { - Fail(t, "Batch L1 block changed from", originalBatchBlock, "to", metadata.L1Block, "instead of expected", metadata.L1Block) + Fatal(t, "Batch L1 block changed from", originalBatchBlock, "to", metadata.L1Block, "instead of expected", metadata.L1Block) } time.Sleep(10 * time.Millisecond) } @@ -103,6 +103,6 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) if l2Header.Hash() != l2Receipt.BlockHash { - Fail(t, "L2 block hash changed") + Fatal(t, "L2 block hash changed") } } diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index f72d3486bc..6b43cc83b0 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -90,10 +90,10 @@ func TestOutboxProofs(t *testing.T) { Require(t, err, "No receipt for txn") if receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "Tx failed with status code:", receipt) + Fatal(t, "Tx failed with status code:", receipt) } if len(receipt.Logs) == 0 { - Fail(t, "Tx didn't emit any logs") + Fatal(t, "Tx didn't emit any logs") } for _, log := range receipt.Logs { @@ -230,7 +230,7 @@ func TestOutboxProofs(t *testing.T) { if zero, ok := partials[place]; ok { if zero != (common.Hash{}) { - Fail(t, "Somehow got 2 partials for the same level\n\t1st:", zero, "\n\t2nd:", hash) + Fatal(t, "Somehow got 2 partials for the same level\n\t1st:", zero, "\n\t2nd:", hash) } partials[place] = hash partialsByLevel[level] = hash @@ -264,7 +264,7 @@ func TestOutboxProofs(t *testing.T) { curr, ok := known[step] if !ok { - Fail(t, "We should know the current node's value") + Fatal(t, "We should know the current node's value") } left := curr @@ -276,7 +276,7 @@ func TestOutboxProofs(t *testing.T) { step.Leaf -= 1 << step.Level partial, ok := known[step] if !ok { - Fail(t, "There should be a partial here") + Fatal(t, "There should be a partial here") } left = partial } else { @@ -309,7 +309,7 @@ func TestOutboxProofs(t *testing.T) { for i, place := range nodes { hash, ok := known[place] if !ok { - Fail(t, "We're missing data for the node at position", place) + Fatal(t, "We're missing data for the node at position", place) } hashes[i] = hash t.Log("node", place, hash) @@ -323,7 +323,7 @@ func TestOutboxProofs(t *testing.T) { } if !proof.IsCorrect() { - Fail(t, "Proof is wrong") + Fatal(t, "Proof is wrong") } // Check NodeInterface.sol produces equivalent proofs @@ -336,10 +336,10 @@ func TestOutboxProofs(t *testing.T) { nodeSend := outboxProof.Send if nodeRoot != rootHash { - Fail(t, "NodeInterface root differs\n", nodeRoot, "\n", rootHash) + Fatal(t, "NodeInterface root differs\n", nodeRoot, "\n", rootHash) } if len(hashes) != len(nodeProof) { - Fail(t, "NodeInterface proof is the wrong size", len(nodeProof), len(hashes)) + Fatal(t, "NodeInterface proof is the wrong size", len(nodeProof), len(hashes)) } for i, correct := range hashes { if nodeProof[i] != correct { @@ -347,7 +347,7 @@ func TestOutboxProofs(t *testing.T) { } } if nodeSend != provable.hash { - Fail(t, "NodeInterface send differs\n", nodeSend, "\n", provable.hash) + Fatal(t, "NodeInterface send differs\n", nodeSend, "\n", provable.hash) } } } diff --git a/system_tests/precompile_fuzz_test.go b/system_tests/precompile_fuzz_test.go index 7a2438f79c..8ab133cf58 100644 --- a/system_tests/precompile_fuzz_test.go +++ b/system_tests/precompile_fuzz_test.go @@ -4,7 +4,6 @@ package arbtest import ( - "encoding/json" "math/big" "testing" @@ -12,11 +11,10 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/precompiles" @@ -35,11 +33,7 @@ func FuzzPrecompiles(f *testing.F) { } burner := burn.NewSystemBurner(nil, false) chainConfig := params.ArbitrumDevTestChainConfig() - serializedChainConfig, err := json.Marshal(chainConfig) - if err != nil { - log.Crit("failed to serialize chain config", "error", err) - } - _, err = arbosState.InitializeArbosState(sdb, burner, chainConfig, serializedChainConfig) + _, err = arbosState.InitializeArbosState(sdb, burner, chainConfig, arbostypes.TestInitMessage) if err != nil { panic(err) } @@ -60,7 +54,7 @@ func FuzzPrecompiles(f *testing.F) { GasLimit: fuzzGas, BaseFee: common.Big1, } - evm := vm.NewEVM(blockContext, txContext, sdb, params.ArbitrumDevTestChainConfig(), vm.Config{}) + evm := vm.NewEVM(blockContext, txContext, sdb, chainConfig, vm.Config{}) // Pick a precompile address based on the first byte of the input var addr common.Address @@ -78,19 +72,18 @@ func FuzzPrecompiles(f *testing.F) { } // Create and apply a message - msg := types.NewMessage( - common.Address{}, - &addr, - 0, - new(big.Int), - fuzzGas, - new(big.Int), - new(big.Int), - new(big.Int), - input, - nil, - true, - ) + msg := &core.Message{ + From: common.Address{}, + To: &addr, + Nonce: 0, + Value: new(big.Int), + GasLimit: fuzzGas, + GasPrice: new(big.Int), + GasFeeCap: new(big.Int), + GasTipCap: new(big.Int), + Data: input, + AccessList: nil, + } _, _ = core.ApplyMessage(evm, msg, &gp) }) } diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 21675eba39..ad08ff7471 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -29,7 +29,7 @@ func TestPurePrecompileMethodCalls(t *testing.T) { chainId, err := arbSys.ArbChainID(&bind.CallOpts{}) Require(t, err, "failed to get the ChainID") if chainId.Uint64() != params.ArbitrumDevTestChainConfig().ChainID.Uint64() { - Fail(t, "Wrong ChainID", chainId.Uint64()) + Fatal(t, "Wrong ChainID", chainId.Uint64()) } } @@ -45,7 +45,7 @@ func TestViewLogReverts(t *testing.T) { err = arbDebug.EventsView(nil) if err == nil { - Fail(t, "unexpected success") + Fatal(t, "unexpected success") } } @@ -61,24 +61,24 @@ func TestCustomSolidityErrors(t *testing.T) { Require(t, err, "could not bind ArbDebug contract") customError := arbDebug.CustomRevert(callOpts, 1024) if customError == nil { - Fail(t, "customRevert call should have errored") + Fatal(t, "customRevert call should have errored") } observedMessage := customError.Error() expectedMessage := "execution reverted: error Custom(1024, This spider family wards off bugs: /\\oo/\\ //\\(oo)/\\ /\\oo/\\, true)" if observedMessage != expectedMessage { - Fail(t, observedMessage) + Fatal(t, observedMessage) } arbSys, err := precompilesgen.NewArbSys(arbos.ArbSysAddress, client) Require(t, err, "could not bind ArbSys contract") _, customError = arbSys.ArbBlockHash(callOpts, big.NewInt(1e9)) if customError == nil { - Fail(t, "out of range ArbBlockHash call should have errored") + Fatal(t, "out of range ArbBlockHash call should have errored") } observedMessage = customError.Error() expectedMessage = "execution reverted: error InvalidBlockNumber(1000000000, 1)" if observedMessage != expectedMessage { - Fail(t, observedMessage) + Fatal(t, observedMessage) } } @@ -98,7 +98,7 @@ func TestPrecompileErrorGasLeft(t *testing.T) { Require(t, err, "Failed to call CheckGasUsed to precompile", to) maxGas := big.NewInt(100_000) if arbmath.BigGreaterThan(gas, maxGas) { - Fail(t, "Precompile", to, "used", gas, "gas reverting, greater than max expected", maxGas) + Fatal(t, "Precompile", to, "used", gas, "gas reverting, greater than max expected", maxGas) } } diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index c56f919403..f132d46487 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -42,7 +42,7 @@ func TestReorgResequencing(t *testing.T) { balance, err := client.BalanceAt(ctx, l2info.GetAddress(account), nil) Require(t, err) if balance.Int64() != params.Ether { - Fail(t, "expected account", account, "to have a balance of 1 ether but instead it has", balance, "wei "+scenario) + Fatal(t, "expected account", account, "to have a balance of 1 ether but instead it has", balance, "wei "+scenario) } } } diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index c711ac1021..7b0c3a7563 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -54,7 +54,7 @@ func retryableSetup(t *testing.T) ( messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber, nil) Require(t, err) if len(messages) == 0 { - Fail(t, "didn't find message for retryable submission") + Fatal(t, "didn't find message for retryable submission") } var submissionTxs []*types.Transaction for _, message := range messages { @@ -70,7 +70,7 @@ func retryableSetup(t *testing.T) ( } } if len(submissionTxs) != 1 { - Fail(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) + Fatal(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) } return submissionTxs[0].Hash() @@ -89,7 +89,7 @@ func retryableSetup(t *testing.T) ( block, err := l2client.BlockByNumber(ctx, arbmath.UintToBig(number)) Require(t, err, "failed to get L2 block", number, "of", blockNum) if block.Number().Uint64() != number { - Fail(t, "block number mismatch", number, block.Number().Uint64()) + Fatal(t, "block number mismatch", number, block.Number().Uint64()) } } @@ -111,7 +111,7 @@ func TestRetryableNoExist(t *testing.T) { Require(t, err) _, err = arbRetryableTx.GetTimeout(&bind.CallOpts{}, common.Hash{}) if err.Error() != "execution reverted: error NoTicketWithID()" { - Fail(t, "didn't get expected NoTicketWithID error") + Fatal(t, "didn't get expected NoTicketWithID error") } } @@ -166,7 +166,7 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) if l1receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "l1receipt indicated failure") + Fatal(t, "l1receipt indicated failure") } waitForL1DelayBlocks(t, ctx, l1client, l1info) @@ -174,14 +174,14 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { receipt, err := WaitForTx(ctx, l2client, lookupSubmitRetryableL2TxHash(l1receipt), time.Second*5) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { - Fail(t) + Fatal(t) } l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) if !arbmath.BigEquals(l2balance, big.NewInt(1e6)) { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } } @@ -216,7 +216,7 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) if l1receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "l1receipt indicated failure") + Fatal(t, "l1receipt indicated failure") } waitForL1DelayBlocks(t, ctx, l1client, l1info) @@ -224,10 +224,10 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { receipt, err := WaitForTx(ctx, l2client, lookupSubmitRetryableL2TxHash(l1receipt), time.Second*5) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { - Fail(t) + Fatal(t) } if len(receipt.Logs) != 2 { - Fail(t, len(receipt.Logs)) + Fatal(t, len(receipt.Logs)) } ticketId := receipt.Logs[0].Topics[1] firstRetryTxId := receipt.Logs[1].Topics[2] @@ -236,7 +236,7 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { receipt, err = WaitForTx(ctx, l2client, firstRetryTxId, time.Second*5) Require(t, err) if receipt.Status != types.ReceiptStatusFailed { - Fail(t, receipt.GasUsed) + Fatal(t, receipt.GasUsed) } arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) @@ -252,7 +252,7 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { receipt, err = WaitForTx(ctx, l2client, retryTxId, time.Second*1) Require(t, err) if receipt.Status != 1 { - Fail(t, receipt.Status) + Fatal(t, receipt.Status) } // verify that the increment happened, so we know the retry succeeded @@ -260,20 +260,20 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { Require(t, err) if counter != 1 { - Fail(t, "Unexpected counter:", counter) + Fatal(t, "Unexpected counter:", counter) } if len(receipt.Logs) != 1 { - Fail(t, "Unexpected log count:", len(receipt.Logs)) + Fatal(t, "Unexpected log count:", len(receipt.Logs)) } parsed, err := simple.ParseRedeemedEvent(*receipt.Logs[0]) Require(t, err) aliasedSender := util.RemapL1Address(usertxopts.From) if parsed.Caller != aliasedSender { - Fail(t, "Unexpected caller", parsed.Caller, "expected", aliasedSender) + Fatal(t, "Unexpected caller", parsed.Caller, "expected", aliasedSender) } if parsed.Redeemer != ownerTxOpts.From { - Fail(t, "Unexpected redeemer", parsed.Redeemer, "expected", ownerTxOpts.From) + Fatal(t, "Unexpected redeemer", parsed.Redeemer, "expected", ownerTxOpts.From) } } @@ -324,7 +324,7 @@ func TestSubmissionGasCosts(t *testing.T) { l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) if l1receipt.Status != types.ReceiptStatusSuccessful { - Fail(t, "l1receipt indicated failure") + Fatal(t, "l1receipt indicated failure") } waitForL1DelayBlocks(t, ctx, l1client, l1info) @@ -353,13 +353,13 @@ func TestSubmissionGasCosts(t *testing.T) { colors.PrintMint("Receive ", receiveFunds) colors.PrintBlue("L2 Call Value ", retryableL2CallValue) if !arbmath.BigEquals(receiveFunds, retryableL2CallValue) { - Fail(t, "Recipient didn't receive the right funds") + Fatal(t, "Recipient didn't receive the right funds") } // the beneficiary should receive nothing colors.PrintMint("Beneficiary ", beneficiaryFunds) if beneficiaryFunds.Sign() != 0 { - Fail(t, "The beneficiary shouldn't have received funds") + Fatal(t, "The beneficiary shouldn't have received funds") } // the fee refund address should recieve the excess gas @@ -369,7 +369,7 @@ func TestSubmissionGasCosts(t *testing.T) { colors.PrintBlue("Excess Wei ", excessWei) colors.PrintMint("Fee Refund ", refundFunds) if !arbmath.BigEquals(refundFunds, arbmath.BigAdd(excessWei, maxSubmissionFee)) { - Fail(t, "The Fee Refund Address didn't receive the right funds") + Fatal(t, "The Fee Refund Address didn't receive the right funds") } // the faucet must pay for both the gas used and the call value supplied @@ -383,7 +383,7 @@ func TestSubmissionGasCosts(t *testing.T) { colors.PrintRed("Expected ", expectedGasChange) colors.PrintRed("Observed ", diff) colors.PrintRed("Off by ", arbmath.BigSub(expectedGasChange, diff)) - Fail(t, "Supplied gas was improperly deducted\n", fundsBeforeSubmit, "\n", fundsAfterSubmit) + Fatal(t, "Supplied gas was improperly deducted\n", fundsBeforeSubmit, "\n", fundsAfterSubmit) } } diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 84636e1549..2209e82d93 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -125,7 +125,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { break } if attempts > 10 { - Fail(t, "timeout waiting for msg ", msgNum, " debug: ", currentNode.SeqCoordinator.DebugPrint()) + Fatal(t, "timeout waiting for msg ", msgNum, " debug: ", currentNode.SeqCoordinator.DebugPrint()) } <-time.After(nodeConfig.SeqCoordinator.UpdateInterval / 3) } @@ -198,7 +198,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { // sequencing suceeds only on the leder for i := arbutil.MessageIndex(0); i < messagesPerRound; i++ { if sequencer := trySequencingEverywhere(); sequencer != currentSequencer { - Fail(t, "unexpected sequencer. expected: ", currentSequencer, " got ", sequencer) + Fatal(t, "unexpected sequencer. expected: ", currentSequencer, " got ", sequencer) } sequencedMesssages++ } @@ -223,7 +223,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { for attempts := 0; ; attempts++ { sequencer := trySequencingEverywhere() if sequencer == -1 && attempts > 15 { - Fail(t, "failed to sequence") + Fatal(t, "failed to sequence") } if sequencer != -1 { sequencedMesssages++ @@ -236,7 +236,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { if sequencer == currentSequencer { break } - Fail(t, "unexpected sequencer", "expected", currentSequencer, "got", sequencer, "messages", sequencedMesssages) + Fatal(t, "unexpected sequencer", "expected", currentSequencer, "got", sequencer, "messages", sequencedMesssages) } // all nodes get messages @@ -246,7 +246,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { for i := arbutil.MessageIndex(0); i < messagesPerRound; i++ { sequencer := trySequencingEverywhere() if sequencer != currentSequencer { - Fail(t, "unexpected sequencer", "expected", currentSequencer, "got", sequencer, "messages", sequencedMesssages) + Fatal(t, "unexpected sequencer", "expected", currentSequencer, "got", sequencer, "messages", sequencedMesssages) } sequencedMesssages++ } @@ -329,7 +329,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { } else { _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second) if err == nil { - Fail(t, "tx received by node with different seq coordinator signing key") + Fatal(t, "tx received by node with different seq coordinator signing key") } } } diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index 80de4cfa0a..968f141364 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -53,7 +53,7 @@ func TestSequencerParallelNonces(t *testing.T) { balance, err := client.BalanceAt(ctx, addr, nil) Require(t, err) if !arbmath.BigEquals(balance, big.NewInt(100)) { - Fail(t, "Unexpected user balance", balance) + Fatal(t, "Unexpected user balance", balance) } } @@ -72,14 +72,14 @@ func TestSequencerNonceTooHigh(t *testing.T) { tx := l2info.PrepareTx("Owner", "Owner", l2info.TransferGas, common.Big0, nil) err := client.SendTransaction(ctx, tx) if err == nil { - Fail(t, "No error when nonce was too high") + Fatal(t, "No error when nonce was too high") } if !strings.Contains(err.Error(), core.ErrNonceTooHigh.Error()) { - Fail(t, "Unexpected transaction error", err) + Fatal(t, "Unexpected transaction error", err) } elapsed := time.Since(before) if elapsed > 2*config.Sequencer.NonceFailureCacheExpiry { - Fail(t, "Sequencer took too long to respond with nonce too high") + Fatal(t, "Sequencer took too long to respond with nonce too high") } } @@ -102,7 +102,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { go func() { err := client.SendTransaction(ctx, tx) if err == nil { - Fail(t, "No error when nonce was too high") + Fatal(t, "No error when nonce was too high") } atomic.AddUint64(&completed, 1) }() @@ -115,7 +115,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { break } if wait == 0 || got > expected { - Fail(t, "Wrong number of transaction responses; got", got, "but expected", expected) + Fatal(t, "Wrong number of transaction responses; got", got, "but expected", expected) } time.Sleep(time.Millisecond * 100) } diff --git a/system_tests/seq_reject_test.go b/system_tests/seq_reject_test.go index 1e26c0182d..19c06c4bc3 100644 --- a/system_tests/seq_reject_test.go +++ b/system_tests/seq_reject_test.go @@ -108,11 +108,11 @@ func TestSequencerRejection(t *testing.T) { break } if i == 0 { - Fail(t, "failed to reach block 200, only reached block", block) + Fatal(t, "failed to reach block 200, only reached block", block) } select { case err := <-feedErrChan: - Fail(t, "error: ", err) + Fatal(t, "error: ", err) case <-time.After(time.Millisecond * 100): } } @@ -128,12 +128,12 @@ func TestSequencerRejection(t *testing.T) { if err != nil { select { case err := <-feedErrChan: - Fail(t, "error: ", err) + Fatal(t, "error: ", err) case <-time.After(time.Millisecond * 100): } if i == 0 { client2Block, _ := client2.BlockNumber(ctx) - Fail(t, "client2 failed to reach client1 block ", header1.Number, ", only reached block", client2Block) + Fatal(t, "client2 failed to reach client1 block ", header1.Number, ", only reached block", client2Block) } continue } @@ -143,7 +143,7 @@ func TestSequencerRejection(t *testing.T) { } else { colors.PrintBlue("header 1:", header1) colors.PrintBlue("header 2:", header2) - Fail(t, "header 1 and header 2 have different hashes") + Fatal(t, "header 1 and header 2 have different hashes") } } } diff --git a/system_tests/seq_whitelist_test.go b/system_tests/seq_whitelist_test.go index f24ce79c9b..2d671dcdd6 100644 --- a/system_tests/seq_whitelist_test.go +++ b/system_tests/seq_whitelist_test.go @@ -35,6 +35,6 @@ func TestSequencerWhitelist(t *testing.T) { tx := l2info.PrepareTx("User2", "User", l2info.TransferGas, big.NewInt(params.Ether/10), nil) err := client.SendTransaction(ctx, tx) if err == nil { - Fail(t, "transaction from user not on whitelist accepted") + Fatal(t, "transaction from user not on whitelist accepted") } } diff --git a/system_tests/seqcompensation_test.go b/system_tests/seqcompensation_test.go index d56854ac13..362acf6a30 100644 --- a/system_tests/seqcompensation_test.go +++ b/system_tests/seqcompensation_test.go @@ -50,12 +50,12 @@ func TestSequencerCompensation(t *testing.T) { l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } initialSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) Require(t, err) if initialSeqBalance.Sign() != 0 { - Fail(t, "Unexpected initial sequencer balance:", initialSeqBalance) + Fatal(t, "Unexpected initial sequencer balance:", initialSeqBalance) } } diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index fb0652bc6b..56d727dd26 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -81,9 +81,8 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { accountName := func(x int) string { if x == 0 { return "Owner" - } else { - return fmt.Sprintf("Account%v", x) } + return fmt.Sprintf("Account%v", x) } accounts := []string{"ReorgPadding"} @@ -124,7 +123,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { currentHeader, err := l1Client.HeaderByNumber(ctx, nil) Require(t, err) if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 { - Fail(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) + Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) } t.Logf("Reorganizing to L1 block %v", reorgTargetNumber) reorgTarget := l1BlockChain.GetBlockByNumber(reorgTargetNumber) @@ -245,12 +244,12 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { for i := 0; ; i++ { batchCount, err := seqInbox.BatchCount(&bind.CallOpts{}) if err != nil { - Fail(t, err) + Fatal(t, err) } if batchCount.Cmp(big.NewInt(int64(len(blockStates)))) == 0 { break } else if i >= 100 { - Fail(t, "timed out waiting for l1 batch count update; have", batchCount, "want", len(blockStates)-1) + Fatal(t, "timed out waiting for l1 batch count update; have", batchCount, "want", len(blockStates)-1) } time.Sleep(10 * time.Millisecond) } @@ -261,7 +260,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if blockNumber == expectedBlockNumber { break } else if i >= 1000 { - Fail(t, "timed out waiting for l2 block update; have", blockNumber, "want", expectedBlockNumber) + Fatal(t, "timed out waiting for l2 block update; have", blockNumber, "want", expectedBlockNumber) } time.Sleep(10 * time.Millisecond) } @@ -272,7 +271,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if lastValidated == expectedBlockNumber { break } else if i >= 1000 { - Fail(t, "timed out waiting for block validator; have", lastValidated, "want", expectedBlockNumber) + Fatal(t, "timed out waiting for block validator; have", lastValidated, "want", expectedBlockNumber) } time.Sleep(time.Second) } @@ -282,14 +281,14 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { block, err := l2Backend.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) if block == nil { - Fail(t, "missing state block", state.l2BlockNumber) + Fatal(t, "missing state block", state.l2BlockNumber) } stateDb, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) for acct, expectedBalance := range state.balances { haveBalance := stateDb.GetBalance(acct) if expectedBalance.Cmp(haveBalance) < 0 { - Fail(t, "unexpected balance for account", acct, "; expected", expectedBalance, "got", haveBalance) + Fatal(t, "unexpected balance for account", acct, "; expected", expectedBalance, "got", haveBalance) } } } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index fe30ef7cb8..e5ed3879e9 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -89,11 +89,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) nodeBGenesis := l2nodeB.Execution.Backend.APIBackend().CurrentHeader().Hash() if faultyStaker { if nodeAGenesis == nodeBGenesis { - Fail(t, "node A L2 genesis hash", nodeAGenesis, "== node B L2 genesis hash", nodeBGenesis) + Fatal(t, "node A L2 genesis hash", nodeAGenesis, "== node B L2 genesis hash", nodeBGenesis) } } else { if nodeAGenesis != nodeBGenesis { - Fail(t, "node A L2 genesis hash", nodeAGenesis, "!= node B L2 genesis hash", nodeBGenesis) + Fatal(t, "node A L2 genesis hash", nodeAGenesis, "!= node B L2 genesis hash", nodeBGenesis) } } @@ -304,7 +304,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) proxyAdminAddr := common.BytesToAddress(proxyAdminBytes) if proxyAdminAddr == (common.Address{}) { - Fail(t, "failed to get challenge manager proxy admin") + Fatal(t, "failed to get challenge manager proxy admin") } proxyAdmin, err := mocksgen.NewProxyAdminForBinding(proxyAdminAddr, l1client) @@ -348,14 +348,14 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) isHonestZombie, err := rollup.IsZombie(&bind.CallOpts{}, valWalletAddrA) Require(t, err) if isHonestZombie { - Fail(t, "staker A became a zombie") + Fatal(t, "staker A became a zombie") } watchTx, err := stakerC.Act(ctx) if err != nil && !strings.Contains(err.Error(), "catch up") { Require(t, err, "watchtower staker failed to act") } if watchTx != nil { - Fail(t, "watchtower staker made a transaction") + Fatal(t, "watchtower staker made a transaction") } if !stakerAWasStaked { stakerAWasStaked, err = rollup.IsStaked(&bind.CallOpts{}, valWalletAddrA) @@ -371,7 +371,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } if stakerATxs == 0 || stakerBTxs == 0 { - Fail(t, "staker didn't make txs: staker A made", stakerATxs, "staker B made", stakerBTxs) + Fatal(t, "staker didn't make txs: staker A made", stakerATxs, "staker B made", stakerBTxs) } latestConfirmedNode, err := rollup.LatestConfirmed(&bind.CallOpts{}) @@ -380,18 +380,18 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if latestConfirmedNode <= 1 && !honestStakerInactive { latestCreatedNode, err := rollup.LatestNodeCreated(&bind.CallOpts{}) Require(t, err) - Fail(t, "latest confirmed node didn't advance:", latestConfirmedNode, latestCreatedNode) + Fatal(t, "latest confirmed node didn't advance:", latestConfirmedNode, latestCreatedNode) } if faultyStaker && !sawStakerZombie { - Fail(t, "staker B didn't become a zombie despite being faulty") + Fatal(t, "staker B didn't become a zombie despite being faulty") } if !stakerAWasStaked { - Fail(t, "staker A was never staked") + Fatal(t, "staker A was never staked") } if !stakerBWasStaked { - Fail(t, "staker B was never staked") + Fatal(t, "staker B was never staked") } } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 4d46ffc930..a8209499df 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -127,11 +127,17 @@ func FuzzStateTransition(f *testing.F) { if err != nil { panic(err) } + initMessage := &arbostypes.ParsedInitMessage{ + ChainId: chainConfig.ChainID, + InitialL1BaseFee: arbostypes.DefaultInitialL1BaseFee, + ChainConfig: chainConfig, + SerializedChainConfig: serializedChainConfig, + } stateRoot, err := arbosState.InitializeArbosInDatabase( chainDb, statetransfer.NewMemoryInitDataReader(&statetransfer.ArbosInitializationInfo{}), chainConfig, - serializedChainConfig, + initMessage, 0, 0, ) diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index 217c61df00..2e3317907b 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -32,6 +32,6 @@ func TestTransfer(t *testing.T) { bal2, err := client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) if bal2.Cmp(big.NewInt(1e12)) != 0 { - Fail(t, "Unexpected recipient balance: ", bal2) + Fatal(t, "Unexpected recipient balance: ", bal2) } } diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 6fef8ce484..165b01b35a 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -57,7 +57,7 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { - Fail(t, "Unexpected balance:", l2balance) + Fatal(t, "Unexpected balance:", l2balance) } } diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index e9bf3e8937..c2a5979c8d 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -75,7 +75,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { t.Logf("DelayedFaucet has %v, per delayd: %v, baseprice: %v", delayedFaucetBalance, fundsPerDelayed, l2pricing.InitialBaseFeeWei) if avgTotalL1MessagesPerLoop < avgDelayedMessagesPerLoop { - Fail(t, "bad params, avgTotalL1MessagesPerLoop should include avgDelayedMessagesPerLoop") + Fatal(t, "bad params, avgTotalL1MessagesPerLoop should include avgDelayedMessagesPerLoop") } for i := 0; i < largeLoops; i++ { l1TxsThisTime := rand.Int() % (avgTotalL1MessagesPerLoop * 2) @@ -97,7 +97,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { errs := l1backend.TxPool().AddLocals(l1Txs) for _, err := range errs { if err != nil { - Fail(t, err) + Fatal(t, err) } } l2TxsThisTime := rand.Int() % (avgL2MsgsPerLoop * 2) @@ -110,7 +110,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { if len(l1Txs) > 0 { _, err := EnsureTxSucceeded(ctx, l1client, l1Txs[len(l1Txs)-1]) if err != nil { - Fail(t, err) + Fatal(t, err) } } // create bad tx on delayed inbox @@ -129,7 +129,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { t.Log("Done sending", delayedTransfers, "delayed transfers", directTransfers, "direct transfers") if (delayedTransfers + directTransfers) == 0 { - Fail(t, "No transfers sent!") + Fatal(t, "No transfers sent!") } // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in @@ -139,11 +139,11 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) err := l1client.SendTransaction(ctx, tx) if err != nil { - Fail(t, err) + Fatal(t, err) } _, err = EnsureTxSucceeded(ctx, l1client, tx) if err != nil { - Fail(t, err) + Fatal(t, err) } } } @@ -164,7 +164,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { ownerBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("Owner"), nil) delayedFaucetBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("DelayedFaucet"), nil) t.Error("owner balance", ownerBalance, "delayed faucet", delayedFaucetBalance) - Fail(t, "Unexpected balance") + Fatal(t, "Unexpected balance") } nodeA.StopAndWait() @@ -174,7 +174,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { Require(t, err) timeout := getDeadlineTimeout(t, time.Minute*30) if !nodeB.BlockValidator.WaitForBlock(ctx, lastBlockHeader.Number.Uint64(), timeout) { - Fail(t, "did not validate all blocks") + Fatal(t, "did not validate all blocks") } } } diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 1f501ed1c8..9fc563c194 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -5,6 +5,7 @@ package headerreader import ( "context" + "errors" "fmt" "math/big" "sync" @@ -19,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/stopwaiter" - "github.com/pkg/errors" flag "github.com/spf13/pflag" ) @@ -311,7 +311,7 @@ func (s *HeaderReader) logIfHeaderIsOld() { l1Timetamp := time.Unix(int64(storedHeader.Time), 0) headerTime := time.Since(l1Timetamp) if headerTime >= s.config().OldHeaderTimeout { - s.setError(errors.Errorf("latest header is at least %v old", headerTime)) + s.setError(fmt.Errorf("latest header is at least %v old", headerTime)) log.Warn( "latest L1 block is old", "l1Block", storedHeader.Number, "l1Timestamp", l1Timetamp, "age", headerTime, diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 77683b6af3..2d43ded0d7 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -33,6 +33,10 @@ type ClientConfig struct { } func (c *ClientConfig) Validate() error { + if c.RetryErrors == "" { + c.retryErrors = nil + return nil + } var err error c.retryErrors, err = regexp.Compile(c.RetryErrors) return err @@ -143,7 +147,8 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth if errors.Is(err, context.DeadlineExceeded) { continue } - if c.config().retryErrors.MatchString(err.Error()) { + retryErrs := c.config().retryErrors + if retryErrs != nil && retryErrs.MatchString(err.Error()) { continue } return err diff --git a/util/rpcclient/rpcclient_test.go b/util/rpcclient/rpcclient_test.go index 2eba61f95e..1b44b9479e 100644 --- a/util/rpcclient/rpcclient_test.go +++ b/util/rpcclient/rpcclient_test.go @@ -96,7 +96,7 @@ func TestRpcClientRetry(t *testing.T) { URL: "self", Timeout: time.Second * 5, Retries: 2, - RetryErrors: "b.*", + RetryErrors: "", } Require(t, config.Validate()) configFetcher := func() *ClientConfig { return config } @@ -157,6 +157,23 @@ func TestRpcClientRetry(t *testing.T) { if err == nil { Fail(t, "no error for failAtFirst") } + + noMatchconfig := &ClientConfig{ + URL: "self", + Timeout: time.Second * 5, + Retries: 2, + RetryErrors: "b.*", + } + Require(t, config.Validate()) + noMatchFetcher := func() *ClientConfig { return noMatchconfig } + serverWorkWithRetry2 := createTestNode(t, ctx, 1) + clientNoMatch := NewRpcClient(noMatchFetcher, serverWorkWithRetry2) + err = clientNoMatch.Start(ctx) + Require(t, err) + err = clientNoMatch.CallContext(ctx, nil, "test_failAtFirst") + if err == nil { + Fail(t, "no error for failAtFirst") + } } func Require(t *testing.T, err error, printables ...interface{}) { diff --git a/util/testhelpers/testhelpers.go b/util/testhelpers/testhelpers.go index 6bc40e0b5a..bccc269171 100644 --- a/util/testhelpers/testhelpers.go +++ b/util/testhelpers/testhelpers.go @@ -4,7 +4,7 @@ package testhelpers import ( - "math/rand" + "crypto/rand" "os" "regexp" "sync" diff --git a/validator/server_api/valiation_api.go b/validator/server_api/valiation_api.go index c8a276536b..9e5191ec81 100644 --- a/validator/server_api/valiation_api.go +++ b/validator/server_api/valiation_api.go @@ -60,7 +60,6 @@ type ExecServerAPI struct { } func NewExecutionServerAPI(valSpawner validator.ValidationSpawner, execution validator.ExecutionSpawner, config server_arb.ArbitratorSpawnerConfigFecher) *ExecServerAPI { - rand.Seed(time.Now().UnixNano()) return &ExecServerAPI{ ValidationServerAPI: *NewValidationServerAPI(valSpawner), execSpawner: execution, diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index 8dccfa9cab..3101362782 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -12,6 +12,8 @@ ResolvedPreimage preimageResolverC(size_t context, const uint8_t* hash); import "C" import ( "context" + "errors" + "fmt" "runtime" "sync" "sync/atomic" @@ -20,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/validator" - "github.com/pkg/errors" ) type MachineInterface interface { @@ -85,7 +86,7 @@ func LoadSimpleMachine(wasm string, libraries []string) (*ArbitratorMachine, err C.free(unsafe.Pointer(cWasm)) FreeCStringList(cLibraries, len(libraries)) if mach == nil { - return nil, errors.Errorf("failed to load simple machine at path %v", wasm) + return nil, fmt.Errorf("failed to load simple machine at path %v", wasm) } return machineFromPointer(mach), nil } diff --git a/validator/server_arb/machine_cache.go b/validator/server_arb/machine_cache.go index e16874e250..23fcdef6d6 100644 --- a/validator/server_arb/machine_cache.go +++ b/validator/server_arb/machine_cache.go @@ -5,10 +5,10 @@ package server_arb import ( "context" + "errors" "fmt" "sync" - "github.com/pkg/errors" flag "github.com/spf13/pflag" ) @@ -282,7 +282,7 @@ func (c *MachineCache) GetMachineAt(ctx context.Context, stepCount uint64) (Mach return nil, err } if !closestMachine.ValidForStep(stepCount) { - return nil, errors.Errorf("internal error: got machine with wrong step count %v looking for step count %v", closestMachine.GetStepCount(), stepCount) + return nil, fmt.Errorf("internal error: got machine with wrong step count %v looking for step count %v", closestMachine.GetStepCount(), stepCount) } c.setLastMachine(closestMachine) return closestMachine, nil diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 5d3200d7c9..394dae9c9d 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -6,6 +6,7 @@ package server_jit import ( "context" "encoding/binary" + "errors" "fmt" "io" "net" @@ -17,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" - "github.com/pkg/errors" ) type JitMachine struct { diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index e9fe920363..f140e6254f 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -20,7 +20,6 @@ import ( "github.com/gobwas/ws/wsflate" "github.com/gobwas/ws/wsutil" "github.com/mailru/easygo/netpoll" - "github.com/pkg/errors" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -270,7 +269,7 @@ func serializeMessage(cm *ClientManager, bm interface{}, enableNonCompressedOutp var err error cm.flateWriter, err = flate.NewWriterDict(nil, DeflateCompressionLevel, GetStaticCompressorDictionary()) if err != nil { - return bytes.Buffer{}, bytes.Buffer{}, errors.Wrap(err, "unable to create flate writer") + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to create flate writer: %w", err) } } compressedWriter = wsutil.NewWriter(&compressed, ws.StateServerSide|ws.StateExtended, ws.OpText) @@ -284,19 +283,19 @@ func serializeMessage(cm *ClientManager, bm interface{}, enableNonCompressedOutp multiWriter := io.MultiWriter(writers...) encoder := json.NewEncoder(multiWriter) if err := encoder.Encode(bm); err != nil { - return bytes.Buffer{}, bytes.Buffer{}, errors.Wrap(err, "unable to encode message") + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to encode message: %w", err) } if notCompressedWriter != nil { if err := notCompressedWriter.Flush(); err != nil { - return bytes.Buffer{}, bytes.Buffer{}, errors.Wrap(err, "unable to flush message") + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to flush message: %w", err) } } if compressedWriter != nil { if err := cm.flateWriter.Close(); err != nil { - return bytes.Buffer{}, bytes.Buffer{}, errors.Wrap(err, "unable to close flate writer") + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to close flate writer: %w", err) } if err := compressedWriter.Flush(); err != nil { - return bytes.Buffer{}, bytes.Buffer{}, errors.Wrap(err, "unable to flush message") + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to flush message: %w", err) } } return notCompressed, compressed, nil diff --git a/wsbroadcastserver/utils.go b/wsbroadcastserver/utils.go index 7983f58f57..9df1d7d9ca 100644 --- a/wsbroadcastserver/utils.go +++ b/wsbroadcastserver/utils.go @@ -52,9 +52,8 @@ func (cr *chainedReader) Read(b []byte) (n int, err error) { // If this isn't the last reader, return the data without the EOF since this // may not be the end of all the readers. return n, nil - } else { - return } + return } } break diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index daaad730a1..913eae81f3 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -5,6 +5,7 @@ package wsbroadcastserver import ( "context" + "errors" "fmt" "net" "net/http" @@ -19,7 +20,6 @@ import ( "github.com/gobwas/ws-examples/src/gopool" "github.com/gobwas/ws/wsflate" "github.com/mailru/easygo/netpoll" - "github.com/pkg/errors" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" @@ -468,7 +468,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands s.acceptDescMutex.Unlock() if err != nil { log.Warn("error in poller.Resume", "err", err) - s.fatalErrChan <- errors.Wrap(err, "error in poller.Resume") + s.fatalErrChan <- fmt.Errorf("error in poller.Resume: %w", err) return } }) diff --git a/zeroheavy/zeroheavy.go b/zeroheavy/zeroheavy.go index 447b79bd8c..0b0867f636 100644 --- a/zeroheavy/zeroheavy.go +++ b/zeroheavy/zeroheavy.go @@ -70,45 +70,43 @@ func (enc *ZeroheavyEncoder) readOneImpl() (byte, error) { } if !secondBit { return 0, nil - } else { - ret := byte(1) - for i := 0; i < 6; i++ { - nextBit, err := enc.nextInputBit() - if err != nil { - return 0, err - } - ret <<= 1 - if nextBit { - ret++ - } - } - if ret == 64 { - return 1, nil - } - ret = (ret << 1) & 0x7f + } + ret := byte(1) + for i := 0; i < 6; i++ { nextBit, err := enc.nextInputBit() if err != nil { return 0, err } + ret <<= 1 if nextBit { ret++ } - return ret, nil } - } else { - ret := byte(1) // first bit is 1 - for i := 0; i < 7; i++ { - ret <<= 1 - nextBit, err := enc.nextInputBit() - if err != nil { - return 0, err - } - if nextBit { - ret += 1 - } + if ret == 64 { + return 1, nil + } + ret = (ret << 1) & 0x7f + nextBit, err := enc.nextInputBit() + if err != nil { + return 0, err + } + if nextBit { + ret++ } return ret, nil } + ret := byte(1) // first bit is 1 + for i := 0; i < 7; i++ { + ret <<= 1 + nextBit, err := enc.nextInputBit() + if err != nil { + return 0, err + } + if nextBit { + ret += 1 + } + } + return ret, nil } func (enc *ZeroheavyEncoder) Read(p []byte) (int, error) {