diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index e11f5511a1..ae25171859 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-8 steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 with: submodules: recursive @@ -36,12 +36,12 @@ jobs: sudo ln -s /usr/bin/wasm-ld-14 /usr/local/bin/wasm-ld - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' @@ -119,7 +119,7 @@ jobs: - name: Setup emsdk if: steps.cache-cbrotli.outputs.cache-hit != 'true' - uses: mymindstorm/setup-emsdk@v11 + uses: mymindstorm/setup-emsdk@v12 with: # Make sure to set a version number! version: 3.1.6 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c77ef3b770..d2f5765d72 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: true @@ -36,14 +36,14 @@ jobs: run: sudo apt update && sudo apt install -y wabt gotestsum - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 92411d17e2..1b2e92754a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -59,14 +59,14 @@ jobs: config-file: ./.github/codeql/codeql-config.yml - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cf5fdd5ca9..d391dd4096 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: recursive @@ -65,7 +65,7 @@ jobs: # We work around this by piping a tarball through stdout docker run --rm --entrypoint tar localhost:5000/nitro-node-dev:latest -cf - target/machines/latest | tar xf - module_root="$(cat "target/machines/latest/module-root.txt")" - echo "::set-output name=module-root::$module_root" + echo "name=module-root=$module_root" >> $GITHUB_STATE echo -e "\x1b[1;34mWAVM module root:\x1b[0m $module_root" - name: Upload WAVM machine as artifact diff --git a/Makefile b/Makefile index 4b6928151f..96f15c20ce 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ # have to update an existing file. So - for docker, convert all dependencies # to order-only dependencies (timestamps ignored). # WARNING: when using this trick, you cannot use the $< automatic variable + ifeq ($(origin NITRO_BUILD_IGNORE_TIMESTAMPS),undefined) DEP_PREDICATE:= ORDER_ONLY_PREDICATE:=| diff --git a/arbitrator/jit/src/runtime.rs b/arbitrator/jit/src/runtime.rs index 4d83fbbe6d..d547a06553 100644 --- a/arbitrator/jit/src/runtime.rs +++ b/arbitrator/jit/src/runtime.rs @@ -39,20 +39,20 @@ pub fn wasm_write(mut env: WasmEnvMut, sp: u32) { } pub fn nanotime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time); } pub fn walltime(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u32(1, (env.go_state.time % 1_000_000_000) as u32); } pub fn walltime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u64(1, env.go_state.time % 1_000_000_000); diff --git a/arbitrator/wasm-libraries/go-stub/src/lib.rs b/arbitrator/wasm-libraries/go-stub/src/lib.rs index 8be1ff48e6..df77893fcb 100644 --- a/arbitrator/wasm-libraries/go-stub/src/lib.rs +++ b/arbitrator/wasm-libraries/go-stub/src/lib.rs @@ -587,7 +587,10 @@ pub unsafe extern "C" fn wavm__go_after_run() { while let Some(info) = state.times.pop() { while state.pending_ids.contains(&info.id) { TIME = std::cmp::max(TIME, info.time); - drop(state); + // Important: the current reference to state shouldn't be used after this resume call, + // as it might during the resume call the reference might be invalidated. + // That's why immediately after this resume call, we replace the reference + // with a new reference to TIMEOUT_STATE. wavm_guest_call__resume(); state = TIMEOUT_STATE.get_or_insert_with(Default::default); } diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 01b60e9c05..5352f9760f 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -1,6 +1,5 @@ [ { - "chain-id": 42161, "chain-name": "arb1", "parent-chain-id": 1, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", @@ -50,7 +49,6 @@ } }, { - "chain-id": 42170, "chain-name": "nova", "parent-chain-id": 1, "sequencer-url": "https://nova.arbitrum.io/rpc", @@ -100,7 +98,6 @@ } }, { - "chain-id": 421613, "chain-name": "goerli-rollup", "parent-chain-id": 5, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", @@ -149,7 +146,6 @@ } }, { - "chain-id": 412346, "chain-name": "arb-dev-test", "chain-config": { @@ -185,7 +181,6 @@ } }, { - "chain-id": 412347, "chain-name": "anytrust-dev-test", "chain-config": { diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index 46e7ada966..c9ffca9830 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,7 +18,6 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainId uint64 `json:"chain-id"` ChainName string `json:"chain-name"` ParentChainId uint64 `json:"parent-chain-id"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity @@ -94,7 +93,7 @@ func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*C return nil, err } for _, chainInfo := range chainsInfo { - if (chainId == 0 || chainInfo.ChainId == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { + if (chainId == 0 || chainInfo.ChainConfig.ChainID.Uint64() == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { return &chainInfo, nil } } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 522e021ee1..6f975ec712 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -16,6 +16,7 @@ import ( "strings" "time" + koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -34,7 +35,7 @@ import ( func main() { args := os.Args if len(args) < 2 { - panic("Usage: datool [client|keygen|generatehash] ...") + panic("Usage: datool [client|keygen|generatehash|dumpkeyset] ...") } var err error @@ -45,6 +46,8 @@ func main() { err = startKeyGen(args[2:]) case "generatehash": err = generateHash(args[2]) + case "dumpkeyset": + err = dumpKeyset(args[2:]) default: panic(fmt.Sprintf("Unknown tool '%s' specified, valid tools are 'client', 'keygen', 'generatehash'", args[1])) } @@ -313,3 +316,68 @@ func generateHash(message string) error { fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(dastree.HashBytes([]byte(message)))) return nil } + +func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { + f := flag.NewFlagSet("dump keyset", flag.ContinueOnError) + + das.AggregatorConfigAddOptions("keyset", f) + genericconf.ConfConfigAddOptions("conf", f) + + k, err := confighelpers.BeginCommonParse(f, args) + if err != nil { + return nil, err + } + + var config DumpKeysetConfig + if err := confighelpers.EndCommonParse(k, &config); err != nil { + return nil, err + } + + if config.ConfConfig.Dump { + c, err := k.Marshal(koanfjson.Parser()) + if err != nil { + return nil, fmt.Errorf("unable to marshal config file to JSON: %w", err) + } + + fmt.Println(string(c)) + os.Exit(0) + } + + if config.KeysetConfig.AssumedHonest == 0 { + return nil, errors.New("--keyset.assumed-honest must be set") + } + if config.KeysetConfig.Backends == "" { + return nil, errors.New("--keyset.backends must be set") + } + + return &config, nil +} + +// das keygen + +type DumpKeysetConfig struct { + KeysetConfig das.AggregatorConfig `koanf:"keyset"` + ConfConfig genericconf.ConfConfig `koanf:"conf"` +} + +func dumpKeyset(args []string) error { + config, err := parseDumpKeyset(args) + if err != nil { + return err + } + + services, err := das.ParseServices(config.KeysetConfig) + if err != nil { + return err + } + + keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.KeysetConfig.AssumedHonest)) + if err != nil { + return err + } + + fmt.Printf("Keyset: %s\n", hexutil.Encode(keysetBytes)) + fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) + + return err +} diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 91775ced25..5cecb7d2ad 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -148,7 +148,6 @@ func main() { } chainsInfo := []chaininfo.ChainInfo{ { - ChainId: chainConfig.ChainID.Uint64(), ChainName: *l2ChainName, ParentChainId: l1ChainId.Uint64(), ChainConfig: &chainConfig, diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f1af1388cf..ae46ff6678 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -374,12 +374,18 @@ func mainImpl() int { return 1 } + var deferFuncs []func() + defer func() { + for i := range deferFuncs { + deferFuncs[i]() + } + }() + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.L2.ChainID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) - defer closeDb(chainDb, "chainDb") if l2BlockChain != nil { - // Calling Stop on the blockchain multiple times does nothing - defer l2BlockChain.Stop() + deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } + deferFuncs = append(deferFuncs, func() { closeDb(chainDb, "chainDb") }) if err != nil { flag.Usage() log.Error("error initializing database", "err", err) @@ -387,7 +393,7 @@ func mainImpl() int { } arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", false) - defer closeDb(arbDb, "arbDb") + deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) return 1 @@ -479,7 +485,8 @@ func mainImpl() int { if err != nil { fatalErrChan <- fmt.Errorf("error starting node: %w", err) } - defer currentNode.StopAndWait() + // remove previous deferFuncs, StopAndWait closes database and blockchain. + deferFuncs = []func(){func() { currentNode.StopAndWait() }} } sigint := make(chan os.Signal, 1) @@ -731,7 +738,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, - "chain.id": chainInfo.ChainId, + "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), "parent-chain.id": chainInfo.ParentChainId, } if chainInfo.SequencerUrl != "" { diff --git a/das/aggregator.go b/das/aggregator.go index 7c1504d6f1..33ce5ad489 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -9,13 +9,11 @@ import ( "errors" "fmt" "math/bits" - "os" "time" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -32,13 +30,11 @@ type AggregatorConfig struct { Enable bool `koanf:"enable"` AssumedHonest int `koanf:"assumed-honest"` Backends string `koanf:"backends"` - DumpKeyset bool `koanf:"dump-keyset"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: "", - DumpKeyset: false, } var BatchToDasFailed = errors.New("unable to batch to DAS") @@ -47,7 +43,6 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.String(prefix+".backends", DefaultAggregatorConfig.Backends, "JSON RPC backend configuration") - f.Bool(prefix+".dump-keyset", DefaultAggregatorConfig.DumpKeyset, "Dump the keyset encoded in hexadecimal for the backends string") } type Aggregator struct { @@ -122,36 +117,11 @@ func NewAggregatorWithSeqInboxCaller( services []ServiceDetails, seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { - var aggSignersMask uint64 - pubKeys := []blsSignatures.PublicKey{} - for _, d := range services { - if bits.OnesCount64(d.signersMask) != 1 { - return nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) - } - aggSignersMask |= d.signersMask - pubKeys = append(pubKeys, d.pubKey) - } - if bits.OnesCount64(aggSignersMask) != len(services) { - return nil, errors.New("at least two signers share a mask") - } - keyset := &arbstate.DataAvailabilityKeyset{ - AssumedHonest: uint64(config.AggregatorConfig.AssumedHonest), - PubKeys: pubKeys, - } - ksBuf := bytes.NewBuffer([]byte{}) - if err := keyset.Serialize(ksBuf); err != nil { - return nil, err - } - keysetHash, err := keyset.Hash() + keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.AggregatorConfig.AssumedHonest)) if err != nil { return nil, err } - if config.AggregatorConfig.DumpKeyset { - fmt.Printf("Keyset: %s\n", hexutil.Encode(ksBuf.Bytes())) - fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) - os.Exit(0) - } var bpVerifier *contracts.BatchPosterVerifier if seqInboxCaller != nil { @@ -165,7 +135,7 @@ func NewAggregatorWithSeqInboxCaller( requiredServicesForStore: len(services) + 1 - config.AggregatorConfig.AssumedHonest, maxAllowedServiceStoreFailures: config.AggregatorConfig.AssumedHonest - 1, keysetHash: keysetHash, - keysetBytes: ksBuf.Bytes(), + keysetBytes: keysetBytes, bpVerifier: bpVerifier, }, nil } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index ff5f4aedb8..cc455250d3 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -4,10 +4,16 @@ package das import ( + "bytes" "context" "encoding/json" + "errors" + "fmt" + "math/bits" "net/url" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -22,7 +28,7 @@ type BackendConfig struct { } func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } @@ -30,7 +36,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggr } func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } @@ -38,16 +44,16 @@ func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil. } func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } return NewAggregatorWithSeqInboxCaller(config, services, seqInboxCaller) } -func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { +func ParseServices(config AggregatorConfig) ([]ServiceDetails, error) { var cs []BackendConfig - err := json.Unmarshal([]byte(config.AggregatorConfig.Backends), &cs) + err := json.Unmarshal([]byte(config.Backends), &cs) if err != nil { return nil, err } @@ -81,3 +87,33 @@ func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { return services, nil } + +func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([32]byte, []byte, error) { + var aggSignersMask uint64 + pubKeys := []blsSignatures.PublicKey{} + for _, d := range services { + if bits.OnesCount64(d.signersMask) != 1 { + return [32]byte{}, nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) + } + aggSignersMask |= d.signersMask + pubKeys = append(pubKeys, d.pubKey) + } + if bits.OnesCount64(aggSignersMask) != len(services) { + return [32]byte{}, nil, errors.New("at least two signers share a mask") + } + + keyset := &arbstate.DataAvailabilityKeyset{ + AssumedHonest: uint64(assumedHonest), + PubKeys: pubKeys, + } + ksBuf := bytes.NewBuffer([]byte{}) + if err := keyset.Serialize(ksBuf); err != nil { + return [32]byte{}, nil, err + } + keysetHash, err := keyset.Hash() + if err != nil { + return [32]byte{}, nil, err + } + + return keysetHash, ksBuf.Bytes(), nil +} diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index d36b9b2950..78907aa622 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "path/filepath" "testing" "time" @@ -132,7 +131,7 @@ func (s *ArbTraceAPIStub) Filter(ctx context.Context, filter *filterRequest) ([] func TestArbTraceForwarding(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "redirect.ipc") + ipcPath := tmpPath(t, "redirect.ipc") var apis []rpc.API apis = append(apis, rpc.API{ Namespace: "arbtrace", diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index d4cf0d8eb7..0e5cca319a 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -32,7 +32,7 @@ const nodesCount = 5 // number of testnodes to create in tests func TestStaticForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "test.ipc") + ipcPath := tmpPath(t, "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath stackConfig := stackConfigForTest(t) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 5e4bca1a64..fa049e4d98 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -52,30 +52,38 @@ func retryableSetup(t *testing.T) ( delayedBridge, err := arbnode.NewDelayedBridge(l1client, l1info.GetAddress("Bridge"), 0) Require(t, err) - lookupSubmitRetryableL2TxHash := func(l1Receipt *types.Receipt) common.Hash { + lookupL2Hash := func(l1Receipt *types.Receipt) common.Hash { messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber, nil) Require(t, err) if len(messages) == 0 { - Fatal(t, "didn't find message for retryable submission") + Fatal(t, "didn't find message for submission") } var submissionTxs []*types.Transaction + msgTypes := map[uint8]bool{ + arbostypes.L1MessageType_SubmitRetryable: true, + arbostypes.L1MessageType_EthDeposit: true, + arbostypes.L1MessageType_L2Message: true, + } + txTypes := map[uint8]bool{ + types.ArbitrumSubmitRetryableTxType: true, + types.ArbitrumDepositTxType: true, + types.ArbitrumContractTxType: true, + } for _, message := range messages { - k := message.Message.Header.Kind - if k != arbostypes.L1MessageType_SubmitRetryable && k != arbostypes.L1MessageType_EthDeposit { + if !msgTypes[message.Message.Header.Kind] { continue } txs, err := arbos.ParseL2Transactions(message.Message, params.ArbitrumDevTestChainConfig().ChainID, nil) Require(t, err) for _, tx := range txs { - if tx.Type() == types.ArbitrumSubmitRetryableTxType || tx.Type() == types.ArbitrumDepositTxType { + if txTypes[tx.Type()] { submissionTxs = append(submissionTxs, tx) } } } if len(submissionTxs) != 1 { - Fatal(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) + Fatal(t, "expected 1 tx from submission, found", len(submissionTxs)) } - return submissionTxs[0].Hash() } @@ -101,7 +109,7 @@ func retryableSetup(t *testing.T) ( l2node.StopAndWait() requireClose(t, l1stack) } - return l2info, l1info, l2client, l1client, delayedInbox, lookupSubmitRetryableL2TxHash, ctx, teardown + return l2info, l1info, l2client, l1client, delayedInbox, lookupL2Hash, ctx, teardown } func TestRetryableNoExist(t *testing.T) { @@ -445,6 +453,61 @@ func TestDepositETH(t *testing.T) { } } +func TestArbitrumContractTx(t *testing.T) { + l2Info, l1Info, l2Client, l1Client, delayedInbox, lookupL2Hash, ctx, teardown := retryableSetup(t) + defer teardown() + faucetL2Addr := util.RemapL1Address(l1Info.GetAddress("Faucet")) + TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), l2Info, l2Client, ctx) + + l2TxOpts := l2Info.GetDefaultTransactOpts("Faucet", ctx) + l2ContractAddr, _ := deploySimple(t, ctx, l2TxOpts, l2Client) + l2ContractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) + if err != nil { + t.Fatalf("Error parsing contract ABI: %v", err) + } + data, err := l2ContractABI.Pack("checkCalls", true, true, false, false, false, false) + if err != nil { + t.Fatalf("Error packing method's call data: %v", err) + } + unsignedTx := types.NewTx(&types.ArbitrumContractTx{ + ChainId: l2Info.Signer.ChainID(), + From: faucetL2Addr, + GasFeeCap: l2Info.GasPrice.Mul(l2Info.GasPrice, big.NewInt(2)), + Gas: 1e6, + To: &l2ContractAddr, + Value: common.Big0, + Data: data, + }) + txOpts := l1Info.GetDefaultTransactOpts("Faucet", ctx) + l1tx, err := delayedInbox.SendContractTransaction( + &txOpts, + arbmath.UintToBig(unsignedTx.Gas()), + unsignedTx.GasFeeCap(), + *unsignedTx.To(), + unsignedTx.Value(), + unsignedTx.Data(), + ) + if err != nil { + t.Fatalf("Error sending unsigned transaction: %v", err) + } + receipt, err := EnsureTxSucceeded(ctx, l1Client, l1tx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) + } + waitForL1DelayBlocks(t, ctx, l1Client, l1Info) + txHash := lookupL2Hash(receipt) + receipt, err = WaitForTx(ctx, l2Client, txHash, time.Second*5) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L2 transaction: %v has failed", receipt.TxHash) + } +} + func TestL1FundedUnsignedTransaction(t *testing.T) { t.Parallel() ctx := context.Background()