From a03711d61f051b096b5359d63ea3a13e4d604bce Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 14 Jun 2023 10:57:38 -0700 Subject: [PATCH 01/20] Add abilitiy to dump das keyset bytes to datool --- cmd/datool/datool.go | 67 ++++++++++++++++++++++++++++++++++++++++++- das/aggregator.go | 34 ++-------------------- das/rpc_aggregator.go | 46 +++++++++++++++++++++++++---- 3 files changed, 109 insertions(+), 38 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 522e021ee1..8f35369209 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -16,6 +16,7 @@ import ( "strings" "time" + koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -34,7 +35,7 @@ import ( func main() { args := os.Args if len(args) < 2 { - panic("Usage: datool [client|keygen|generatehash] ...") + panic("Usage: datool [client|keygen|generatehash|dumpkeyset] ...") } var err error @@ -45,6 +46,8 @@ func main() { err = startKeyGen(args[2:]) case "generatehash": err = generateHash(args[2]) + case "dumpkeyset": + err = dumpKeyset(args[2:]) default: panic(fmt.Sprintf("Unknown tool '%s' specified, valid tools are 'client', 'keygen', 'generatehash'", args[1])) } @@ -313,3 +316,65 @@ func generateHash(message string) error { fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(dastree.HashBytes([]byte(message)))) return nil } + +func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { + f := flag.NewFlagSet("dump keyset", flag.ContinueOnError) + + das.AggregatorConfigAddOptions("keyset", f) + genericconf.ConfConfigAddOptions("conf", f) + + k, err := confighelpers.BeginCommonParse(f, args) + if err != nil { + return nil, err + } + + var config DumpKeysetConfig + if err := confighelpers.EndCommonParse(k, &config); err != nil { + return nil, err + } + + if config.ConfConfig.Dump { + c, err := k.Marshal(koanfjson.Parser()) + if err != nil { + return nil, fmt.Errorf("unable to marshal config file to JSON: %w", err) + } + + fmt.Println(string(c)) + os.Exit(0) + } + + if config.KeysetConfig.AssumedHonest == 0 { + return nil, errors.New("--keyset.assumed-honest must be set") + } + if config.KeysetConfig.Backends == "" { + return nil, errors.New("--keyset.backends must be set") + } + + return &config, nil +} + +// das keygen + +type DumpKeysetConfig struct { + KeysetConfig das.AggregatorConfig `koanf:"keyset"` + ConfConfig genericconf.ConfConfig `koanf:"conf"` +} + +func dumpKeyset(args []string) error { + config, err := parseDumpKeyset(args) + + services, err := das.ParseServices(config.KeysetConfig) + if err != nil { + return err + } + + keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.KeysetConfig.AssumedHonest)) + if err != nil { + return err + } + + fmt.Printf("Keyset: %s\n", hexutil.Encode(keysetBytes)) + fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) + + return err +} diff --git a/das/aggregator.go b/das/aggregator.go index 7c1504d6f1..33ce5ad489 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -9,13 +9,11 @@ import ( "errors" "fmt" "math/bits" - "os" "time" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -32,13 +30,11 @@ type AggregatorConfig struct { Enable bool `koanf:"enable"` AssumedHonest int `koanf:"assumed-honest"` Backends string `koanf:"backends"` - DumpKeyset bool `koanf:"dump-keyset"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: "", - DumpKeyset: false, } var BatchToDasFailed = errors.New("unable to batch to DAS") @@ -47,7 +43,6 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.String(prefix+".backends", DefaultAggregatorConfig.Backends, "JSON RPC backend configuration") - f.Bool(prefix+".dump-keyset", DefaultAggregatorConfig.DumpKeyset, "Dump the keyset encoded in hexadecimal for the backends string") } type Aggregator struct { @@ -122,36 +117,11 @@ func NewAggregatorWithSeqInboxCaller( services []ServiceDetails, seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { - var aggSignersMask uint64 - pubKeys := []blsSignatures.PublicKey{} - for _, d := range services { - if bits.OnesCount64(d.signersMask) != 1 { - return nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) - } - aggSignersMask |= d.signersMask - pubKeys = append(pubKeys, d.pubKey) - } - if bits.OnesCount64(aggSignersMask) != len(services) { - return nil, errors.New("at least two signers share a mask") - } - keyset := &arbstate.DataAvailabilityKeyset{ - AssumedHonest: uint64(config.AggregatorConfig.AssumedHonest), - PubKeys: pubKeys, - } - ksBuf := bytes.NewBuffer([]byte{}) - if err := keyset.Serialize(ksBuf); err != nil { - return nil, err - } - keysetHash, err := keyset.Hash() + keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.AggregatorConfig.AssumedHonest)) if err != nil { return nil, err } - if config.AggregatorConfig.DumpKeyset { - fmt.Printf("Keyset: %s\n", hexutil.Encode(ksBuf.Bytes())) - fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) - os.Exit(0) - } var bpVerifier *contracts.BatchPosterVerifier if seqInboxCaller != nil { @@ -165,7 +135,7 @@ func NewAggregatorWithSeqInboxCaller( requiredServicesForStore: len(services) + 1 - config.AggregatorConfig.AssumedHonest, maxAllowedServiceStoreFailures: config.AggregatorConfig.AssumedHonest - 1, keysetHash: keysetHash, - keysetBytes: ksBuf.Bytes(), + keysetBytes: keysetBytes, bpVerifier: bpVerifier, }, nil } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index ff5f4aedb8..cc455250d3 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -4,10 +4,16 @@ package das import ( + "bytes" "context" "encoding/json" + "errors" + "fmt" + "math/bits" "net/url" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -22,7 +28,7 @@ type BackendConfig struct { } func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } @@ -30,7 +36,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggr } func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } @@ -38,16 +44,16 @@ func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil. } func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.AggregatorConfig) if err != nil { return nil, err } return NewAggregatorWithSeqInboxCaller(config, services, seqInboxCaller) } -func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { +func ParseServices(config AggregatorConfig) ([]ServiceDetails, error) { var cs []BackendConfig - err := json.Unmarshal([]byte(config.AggregatorConfig.Backends), &cs) + err := json.Unmarshal([]byte(config.Backends), &cs) if err != nil { return nil, err } @@ -81,3 +87,33 @@ func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { return services, nil } + +func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([32]byte, []byte, error) { + var aggSignersMask uint64 + pubKeys := []blsSignatures.PublicKey{} + for _, d := range services { + if bits.OnesCount64(d.signersMask) != 1 { + return [32]byte{}, nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) + } + aggSignersMask |= d.signersMask + pubKeys = append(pubKeys, d.pubKey) + } + if bits.OnesCount64(aggSignersMask) != len(services) { + return [32]byte{}, nil, errors.New("at least two signers share a mask") + } + + keyset := &arbstate.DataAvailabilityKeyset{ + AssumedHonest: uint64(assumedHonest), + PubKeys: pubKeys, + } + ksBuf := bytes.NewBuffer([]byte{}) + if err := keyset.Serialize(ksBuf); err != nil { + return [32]byte{}, nil, err + } + keysetHash, err := keyset.Hash() + if err != nil { + return [32]byte{}, nil, err + } + + return keysetHash, ksBuf.Bytes(), nil +} From ced9fc5579a3c61562872574e39392e92283c023 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 14 Jun 2023 12:38:32 -0700 Subject: [PATCH 02/20] Fix unhandled err --- cmd/datool/datool.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 8f35369209..6f975ec712 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -362,6 +362,9 @@ type DumpKeysetConfig struct { func dumpKeyset(args []string) error { config, err := parseDumpKeyset(args) + if err != nil { + return err + } services, err := das.ParseServices(config.KeysetConfig) if err != nil { From d3cdb0cf769e88bf4d2297d4a6aadff2412ee8a9 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 30 Jun 2023 18:07:29 +0200 Subject: [PATCH 03/20] tmp --- system_tests/retryable_test.go | 70 ++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 7b0c3a7563..14ad55699a 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -6,13 +6,17 @@ package arbtest import ( "context" "math/big" + "os" + "strings" "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" @@ -395,3 +399,69 @@ func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient }) } } + +func TestL1FundedUnsignedTransaction(t *testing.T) { + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.LvlTrace) + log.Root().SetHandler(glogger) + t.Parallel() + ctx := context.Background() + l2Info, node, l2Client, l1Info, _, l1Client, l1Stack := createTestNodeOnL1(t, ctx, true) + defer requireClose(t, l1Stack) + defer node.StopAndWait() + + faucetL2Addr := util.RemapL1Address(l1Info.GetAddress("Faucet")) + TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), l2Info, l2Client, ctx) + + l2TxOpts := l2Info.GetDefaultTransactOpts("Faucet", ctx) + l2ContractAddr, _ := deploySimple(t, ctx, l2TxOpts, l2Client) + l2ContractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) + if err != nil { + t.Fatalf("Error parsing contract ABI: %v", err) + } + data, err := l2ContractABI.Pack("checkCalls", true, true, false, false, false, false) + if err != nil { + t.Fatalf("Error packing method's call data: %v", err) + } + unsignedTx := types.NewTx(&types.ArbitrumContractTx{ + ChainId: l2Info.Signer.ChainID(), + GasFeeCap: l2Info.GasPrice, + Gas: 1e6, + To: &l2ContractAddr, + Value: common.Big0, + Data: data, + }) + + delayedInbox, err := bridgegen.NewInbox(l1Info.GetAddress("Inbox"), l1Client) + if err != nil { + t.Fatalf("Error getting Go binding of L1 Inbox contract: %v", err) + } + + txOpts := l1Info.GetDefaultTransactOpts("Faucet", ctx) + l1tx, err := delayedInbox.SendContractTransaction( + &txOpts, + arbmath.UintToBig(unsignedTx.Gas()), + unsignedTx.GasFeeCap(), + *unsignedTx.To(), + unsignedTx.Value(), + unsignedTx.Data(), + ) + if err != nil { + t.Fatalf("Error sending unsigned transaction: %v", err) + } + receipt, err := EnsureTxSucceeded(ctx, l1Client, l1tx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) + } + waitForL1DelayBlocks(t, ctx, l1Client, l1Info) + receipt, err = EnsureTxSucceeded(ctx, l2Client, unsignedTx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L2 transaction: %v has failed", receipt.TxHash) + } +} From a47d2ba27e699c4c41a819ace51f5f6fd0c9dec3 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 30 Jun 2023 20:29:18 +0200 Subject: [PATCH 04/20] Add test for ArbitrumContractTx transaction type --- system_tests/retryable_test.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 14ad55699a..cf03e717c4 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -6,7 +6,6 @@ package arbtest import ( "context" "math/big" - "os" "strings" "testing" "time" @@ -16,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" @@ -393,17 +391,14 @@ func TestSubmissionGasCosts(t *testing.T) { func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient.Client, l1info *BlockchainTestInfo) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { + for i := 0; i < 100; i++ { SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } } -func TestL1FundedUnsignedTransaction(t *testing.T) { - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.LvlTrace) - log.Root().SetHandler(glogger) +func TestArbitrumContractTx(t *testing.T) { t.Parallel() ctx := context.Background() l2Info, node, l2Client, l1Info, _, l1Client, l1Stack := createTestNodeOnL1(t, ctx, true) @@ -425,8 +420,10 @@ func TestL1FundedUnsignedTransaction(t *testing.T) { } unsignedTx := types.NewTx(&types.ArbitrumContractTx{ ChainId: l2Info.Signer.ChainID(), - GasFeeCap: l2Info.GasPrice, + GasFeeCap: l2Info.GasPrice.Mul(l2Info.GasPrice, big.NewInt(2)), + RequestId: common.BigToHash(common.Big1), Gas: 1e6, + From: faucetL2Addr, To: &l2ContractAddr, Value: common.Big0, Data: data, From 79ee0ad9ec721e30f0381ecb491629e58efb82e4 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 30 Jun 2023 20:31:04 +0200 Subject: [PATCH 05/20] Revert number of messages in waitForL1DelayBlocks --- system_tests/retryable_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index cf03e717c4..e84b15b16d 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -391,7 +391,7 @@ func TestSubmissionGasCosts(t *testing.T) { func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient.Client, l1info *BlockchainTestInfo) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 100; i++ { + for i := 0; i < 30; i++ { SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) From 577715e5adaba77d8248e1c66b8e476848311ed4 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 5 Jul 2023 14:07:49 +0200 Subject: [PATCH 06/20] Don't run contract tx test in parallel --- system_tests/retryable_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index e84b15b16d..ca40f235bc 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -399,7 +399,6 @@ func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient } func TestArbitrumContractTx(t *testing.T) { - t.Parallel() ctx := context.Background() l2Info, node, l2Client, l1Info, _, l1Client, l1Stack := createTestNodeOnL1(t, ctx, true) defer requireClose(t, l1Stack) From 522eb88a9df74d3746ce4275a869d1b6af5c8b56 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 17 Jul 2023 22:13:16 +0200 Subject: [PATCH 07/20] Use lookupL2Hash instead of looking up by tx hash --- system_tests/retryable_test.go | 39 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 16436cf47a..a8691c17a4 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -52,22 +52,31 @@ func retryableSetup(t *testing.T) ( delayedBridge, err := arbnode.NewDelayedBridge(l1client, l1info.GetAddress("Bridge"), 0) Require(t, err) - lookupSubmitRetryableL2TxHash := func(l1Receipt *types.Receipt) common.Hash { + lookupL2Hash := func(l1Receipt *types.Receipt) common.Hash { messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber, nil) Require(t, err) if len(messages) == 0 { Fatal(t, "didn't find message for retryable submission") } var submissionTxs []*types.Transaction + msgTypes := map[uint8]bool{ + arbostypes.L1MessageType_SubmitRetryable: true, + arbostypes.L1MessageType_EthDeposit: true, + arbostypes.L1MessageType_L2Message: true, + } + txTypes := map[uint8]bool{ + types.ArbitrumSubmitRetryableTxType: true, + types.ArbitrumDepositTxType: true, + types.ArbitrumContractTxType: true, + } for _, message := range messages { - k := message.Message.Header.Kind - if k != arbostypes.L1MessageType_SubmitRetryable && k != arbostypes.L1MessageType_EthDeposit { + if !msgTypes[message.Message.Header.Kind] { continue } txs, err := arbos.ParseL2Transactions(message.Message, params.ArbitrumDevTestChainConfig().ChainID, nil) Require(t, err) for _, tx := range txs { - if tx.Type() == types.ArbitrumSubmitRetryableTxType || tx.Type() == types.ArbitrumDepositTxType { + if txTypes[tx.Type()] { submissionTxs = append(submissionTxs, tx) } } @@ -75,7 +84,6 @@ func retryableSetup(t *testing.T) ( if len(submissionTxs) != 1 { Fatal(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) } - return submissionTxs[0].Hash() } @@ -101,7 +109,7 @@ func retryableSetup(t *testing.T) ( l2node.StopAndWait() requireClose(t, l1stack) } - return l2info, l1info, l2client, l1client, delayedInbox, lookupSubmitRetryableL2TxHash, ctx, teardown + return l2info, l1info, l2client, l1client, delayedInbox, lookupL2Hash, ctx, teardown } func TestRetryableNoExist(t *testing.T) { @@ -446,11 +454,8 @@ func TestDepositETH(t *testing.T) { } func TestArbitrumContractTx(t *testing.T) { - ctx := context.Background() - l2Info, node, l2Client, l1Info, _, l1Client, l1Stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1Stack) - defer node.StopAndWait() - + l2Info, l1Info, l2Client, l1Client, delayedInbox, lookupL2Hash, ctx, teardown := retryableSetup(t) + defer teardown() faucetL2Addr := util.RemapL1Address(l1Info.GetAddress("Faucet")) TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), l2Info, l2Client, ctx) @@ -466,20 +471,13 @@ func TestArbitrumContractTx(t *testing.T) { } unsignedTx := types.NewTx(&types.ArbitrumContractTx{ ChainId: l2Info.Signer.ChainID(), + From: faucetL2Addr, GasFeeCap: l2Info.GasPrice.Mul(l2Info.GasPrice, big.NewInt(2)), - RequestId: common.BigToHash(common.Big1), Gas: 1e6, - From: faucetL2Addr, To: &l2ContractAddr, Value: common.Big0, Data: data, }) - - delayedInbox, err := bridgegen.NewInbox(l1Info.GetAddress("Inbox"), l1Client) - if err != nil { - t.Fatalf("Error getting Go binding of L1 Inbox contract: %v", err) - } - txOpts := l1Info.GetDefaultTransactOpts("Faucet", ctx) l1tx, err := delayedInbox.SendContractTransaction( &txOpts, @@ -500,7 +498,8 @@ func TestArbitrumContractTx(t *testing.T) { t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) } waitForL1DelayBlocks(t, ctx, l1Client, l1Info) - receipt, err = EnsureTxSucceeded(ctx, l2Client, unsignedTx) + txHash := lookupL2Hash(receipt) + receipt, err = WaitForTx(ctx, l2Client, txHash, time.Second*5) if err != nil { t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) } From 9b2fa71a63247d231e2d084624d56959158525ff Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 20 Jul 2023 16:30:58 +0200 Subject: [PATCH 08/20] Remove redundant chain-id field from ChainInfo struct, chainConfig contains it already --- cmd/chaininfo/arbitrum_chain_info.json | 5 ----- cmd/chaininfo/chain_info.go | 3 +-- cmd/deploy/deploy.go | 1 - cmd/nitro/nitro.go | 2 +- 4 files changed, 2 insertions(+), 9 deletions(-) diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 01b60e9c05..5352f9760f 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -1,6 +1,5 @@ [ { - "chain-id": 42161, "chain-name": "arb1", "parent-chain-id": 1, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", @@ -50,7 +49,6 @@ } }, { - "chain-id": 42170, "chain-name": "nova", "parent-chain-id": 1, "sequencer-url": "https://nova.arbitrum.io/rpc", @@ -100,7 +98,6 @@ } }, { - "chain-id": 421613, "chain-name": "goerli-rollup", "parent-chain-id": 5, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", @@ -149,7 +146,6 @@ } }, { - "chain-id": 412346, "chain-name": "arb-dev-test", "chain-config": { @@ -185,7 +181,6 @@ } }, { - "chain-id": 412347, "chain-name": "anytrust-dev-test", "chain-config": { diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index 46e7ada966..c9ffca9830 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,7 +18,6 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainId uint64 `json:"chain-id"` ChainName string `json:"chain-name"` ParentChainId uint64 `json:"parent-chain-id"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity @@ -94,7 +93,7 @@ func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*C return nil, err } for _, chainInfo := range chainsInfo { - if (chainId == 0 || chainInfo.ChainId == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { + if (chainId == 0 || chainInfo.ChainConfig.ChainID.Uint64() == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { return &chainInfo, nil } } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 91775ced25..5cecb7d2ad 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -148,7 +148,6 @@ func main() { } chainsInfo := []chaininfo.ChainInfo{ { - ChainId: chainConfig.ChainID.Uint64(), ChainName: *l2ChainName, ParentChainId: l1ChainId.Uint64(), ChainConfig: &chainConfig, diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f1af1388cf..0e1a4c58a4 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -731,7 +731,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, - "chain.id": chainInfo.ChainId, + "chain.id": chainInfo.ChainConfig.ChainID, "parent-chain.id": chainInfo.ParentChainId, } if chainInfo.SequencerUrl != "" { From b5d986301cdd6d7745401029b247e209696e93ef Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 20 Jul 2023 13:43:51 -0700 Subject: [PATCH 09/20] stop using deprecated github actions `::set-output` refactor as suggested in https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cf5fdd5ca9..10259292b3 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -65,7 +65,7 @@ jobs: # We work around this by piping a tarball through stdout docker run --rm --entrypoint tar localhost:5000/nitro-node-dev:latest -cf - target/machines/latest | tar xf - module_root="$(cat "target/machines/latest/module-root.txt")" - echo "::set-output name=module-root::$module_root" + echo "name=module-root=$module_root" >> $GITHUB_STATE echo -e "\x1b[1;34mWAVM module root:\x1b[0m $module_root" - name: Upload WAVM machine as artifact From ba5be94f8ea6bf2a4b2880d8ac2403f9f900e2b1 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 20 Jul 2023 17:05:26 -0700 Subject: [PATCH 10/20] Force arbitrator CI to run --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 4b6928151f..96f15c20ce 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ # have to update an existing file. So - for docker, convert all dependencies # to order-only dependencies (timestamps ignored). # WARNING: when using this trick, you cannot use the $< automatic variable + ifeq ($(origin NITRO_BUILD_IGNORE_TIMESTAMPS),undefined) DEP_PREDICATE:= ORDER_ONLY_PREDICATE:=| From 34dfb97dc12a5d65adfbbf589e7d6a478ba08f5c Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 20 Jul 2023 17:45:51 -0700 Subject: [PATCH 11/20] update github action versions github deprecated node12, so update all actions to supported versions --- .github/workflows/arbitrator-ci.yml | 6 +++--- .github/workflows/ci.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/docker.yml | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index e11f5511a1..a1e912a3fc 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-8 steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 with: submodules: recursive @@ -36,12 +36,12 @@ jobs: sudo ln -s /usr/bin/wasm-ld-14 /usr/local/bin/wasm-ld - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c77ef3b770..d2f5765d72 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: true @@ -36,14 +36,14 @@ jobs: run: sudo apt update && sudo apt install -y wabt gotestsum - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 92411d17e2..1b2e92754a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -59,14 +59,14 @@ jobs: config-file: ./.github/codeql/codeql-config.yml - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 10259292b3..d391dd4096 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: recursive From 3c98e5229934f1bedfefbe4a1bc525098cfa90a9 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 20 Jul 2023 19:15:23 -0700 Subject: [PATCH 12/20] update emsdk version to support node16 --- .github/workflows/arbitrator-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index a1e912a3fc..ae25171859 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -119,7 +119,7 @@ jobs: - name: Setup emsdk if: steps.cache-cbrotli.outputs.cache-hit != 'true' - uses: mymindstorm/setup-emsdk@v11 + uses: mymindstorm/setup-emsdk@v12 with: # Make sure to set a version number! version: 3.1.6 From 9ceaa38f8ad214d0273aa042623f40dca0cfba90 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 12 Jul 2023 21:55:34 +0000 Subject: [PATCH 13/20] nitro: fix defers on main process --- cmd/nitro/nitro.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f1af1388cf..6f49a67572 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -374,12 +374,18 @@ func mainImpl() int { return 1 } + var deferFuncs []func() + defer func() { + for i := range deferFuncs { + deferFuncs[i]() + } + }() + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.L2.ChainID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) - defer closeDb(chainDb, "chainDb") if l2BlockChain != nil { - // Calling Stop on the blockchain multiple times does nothing - defer l2BlockChain.Stop() + deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } + deferFuncs = append(deferFuncs, func() { closeDb(chainDb, "chainDb") }) if err != nil { flag.Usage() log.Error("error initializing database", "err", err) @@ -387,7 +393,7 @@ func mainImpl() int { } arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", false) - defer closeDb(arbDb, "arbDb") + deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) return 1 @@ -479,7 +485,8 @@ func mainImpl() int { if err != nil { fatalErrChan <- fmt.Errorf("error starting node: %w", err) } - defer currentNode.StopAndWait() + // remove previous deferFuncs, StopAndWait closes dtabase and blockchain. + deferFuncs = []func(){func() { currentNode.StopAndWait() }} } sigint := make(chan os.Signal, 1) From 6dbaa5168221c3e42266dd275929727d6ea47982 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 21 Jul 2023 13:32:37 -0600 Subject: [PATCH 14/20] fix typo in comment --- cmd/nitro/nitro.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 6f49a67572..55e83d2456 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -485,7 +485,7 @@ func mainImpl() int { if err != nil { fatalErrChan <- fmt.Errorf("error starting node: %w", err) } - // remove previous deferFuncs, StopAndWait closes dtabase and blockchain. + // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } From 8aa6670fd4e3a8846cf32b720ccb05a66c9fe27a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 21 Jul 2023 16:45:28 -0400 Subject: [PATCH 15/20] use tmpPath for creating test directory --- system_tests/arbtrace_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index d36b9b2950..78907aa622 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "path/filepath" "testing" "time" @@ -132,7 +131,7 @@ func (s *ArbTraceAPIStub) Filter(ctx context.Context, filter *filterRequest) ([] func TestArbTraceForwarding(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "redirect.ipc") + ipcPath := tmpPath(t, "redirect.ipc") var apis []rpc.API apis = append(apis, rpc.API{ Namespace: "arbtrace", From 05c3efefd94145001926022dddf45656509d678c Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 21 Jul 2023 16:55:12 -0400 Subject: [PATCH 16/20] fix another test --- system_tests/forwarder_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index d4cf0d8eb7..0e5cca319a 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -32,7 +32,7 @@ const nodesCount = 5 // number of testnodes to create in tests func TestStaticForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "test.ipc") + ipcPath := tmpPath(t, "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath stackConfig := stackConfigForTest(t) From ca690bfc484ddcd24d0df0305dc8d02a57c1ac07 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 24 Jul 2023 14:34:05 +0200 Subject: [PATCH 17/20] drop from error message since it doesn't just refer to retryables --- system_tests/retryable_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index a8691c17a4..fa049e4d98 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -56,7 +56,7 @@ func retryableSetup(t *testing.T) ( messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber, nil) Require(t, err) if len(messages) == 0 { - Fatal(t, "didn't find message for retryable submission") + Fatal(t, "didn't find message for submission") } var submissionTxs []*types.Transaction msgTypes := map[uint8]bool{ @@ -82,7 +82,7 @@ func retryableSetup(t *testing.T) ( } } if len(submissionTxs) != 1 { - Fatal(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) + Fatal(t, "expected 1 tx from submission, found", len(submissionTxs)) } return submissionTxs[0].Hash() } From c8990c49ff185d413dc9621eaf6f98b322c43ff6 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 24 Jul 2023 17:20:47 +0200 Subject: [PATCH 18/20] Convert chain-id to uint64 --- cmd/nitro/nitro.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 0e1a4c58a4..734a713b63 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -731,7 +731,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, - "chain.id": chainInfo.ChainConfig.ChainID, + "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), "parent-chain.id": chainInfo.ParentChainId, } if chainInfo.SequencerUrl != "" { From b86620e602fcc84aa39e9a61dbc6bda1883ec642 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 24 Jul 2023 13:50:42 -0600 Subject: [PATCH 19/20] Fix clippy lint about useless drop --- arbitrator/wasm-libraries/go-stub/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arbitrator/wasm-libraries/go-stub/src/lib.rs b/arbitrator/wasm-libraries/go-stub/src/lib.rs index 8be1ff48e6..df77893fcb 100644 --- a/arbitrator/wasm-libraries/go-stub/src/lib.rs +++ b/arbitrator/wasm-libraries/go-stub/src/lib.rs @@ -587,7 +587,10 @@ pub unsafe extern "C" fn wavm__go_after_run() { while let Some(info) = state.times.pop() { while state.pending_ids.contains(&info.id) { TIME = std::cmp::max(TIME, info.time); - drop(state); + // Important: the current reference to state shouldn't be used after this resume call, + // as it might during the resume call the reference might be invalidated. + // That's why immediately after this resume call, we replace the reference + // with a new reference to TIMEOUT_STATE. wavm_guest_call__resume(); state = TIMEOUT_STATE.get_or_insert_with(Default::default); } From 6a00aa188f7b7b0b1dbae519e483edd421490e24 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 24 Jul 2023 15:36:06 -0600 Subject: [PATCH 20/20] Fix unnecessary mutable annotations --- arbitrator/jit/src/runtime.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arbitrator/jit/src/runtime.rs b/arbitrator/jit/src/runtime.rs index 4d83fbbe6d..d547a06553 100644 --- a/arbitrator/jit/src/runtime.rs +++ b/arbitrator/jit/src/runtime.rs @@ -39,20 +39,20 @@ pub fn wasm_write(mut env: WasmEnvMut, sp: u32) { } pub fn nanotime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time); } pub fn walltime(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u32(1, (env.go_state.time % 1_000_000_000) as u32); } pub fn walltime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u64(1, env.go_state.time % 1_000_000_000);