Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NIT-2554][Config Change] Test manual batch-poster fallback for DAS #2665

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
97 changes: 85 additions & 12 deletions system_tests/das_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package arbtest
import (
"context"
"encoding/base64"
"errors"
"io"
"math/big"
"net"
Expand Down Expand Up @@ -44,18 +45,13 @@ func startLocalDASServer(
pubkey, _, err := das.GenerateAndStoreKeys(keyDir)
Require(t, err)

config := das.DataAvailabilityConfig{
Enable: true,
Key: das.KeyConfig{
KeyDir: keyDir,
},
LocalFileStorage: das.LocalFileStorageConfig{
Enable: true,
DataDir: dataDir,
},
ParentChainNodeURL: "none",
RequestTimeout: 5 * time.Second,
}
config := das.DefaultDataAvailabilityConfig
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is something where it would be nice if we could develop an internally-accepted style guide, but:
I really find the inline declaration of the struct more readable. The structure of the data easier to visualize.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this case, I'm not using a composite literal because I want to initialize all fields based on the default config. The default config had changed, and the DAS tests were not using the correct config because of that.

config.Enable = true
config.Key = das.KeyConfig{KeyDir: keyDir}
config.ParentChainNodeURL = "none"
config.LocalFileStorage = das.DefaultLocalFileStorageConfig
config.LocalFileStorage.Enable = true
config.LocalFileStorage.DataDir = dataDir

storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config)
defer lifecycleManager.StopAndWaitUntil(time.Second)
Expand Down Expand Up @@ -327,3 +323,80 @@ func initTest(t *testing.T) {
enableLogging(logLvl)
}
}

func TestDASBatchPosterFallback(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

// Setup L1
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.chainConfig = params.ArbitrumDevTestDASChainConfig()
builder.BuildL1(t)
l1client := builder.L1.Client
l1info := builder.L1Info

// Setup DAS server
dasDataDir := t.TempDir()
dasRpcServer, pubkey, backendConfig, _, restServerUrl := startLocalDASServer(
t, ctx, dasDataDir, l1client, builder.addresses.SequencerInbox)
authorizeDASKeyset(t, ctx, pubkey, l1info, l1client)

// Setup sequence/batch-poster L2 node
builder.nodeConfig.DataAvailability.Enable = true
builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(backendConfig)
builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
builder.nodeConfig.DataAvailability.RestAggregator.Enable = true
builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none"
builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = true // Disable DAS fallback
builder.nodeConfig.BatchPoster.ErrorDelay = time.Millisecond * 250 // Increase error delay because we expect errors
builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID)
builder.L2Info.GenerateAccount("User2")
cleanup := builder.BuildL2OnL1(t)
defer cleanup()
l2client := builder.L2.Client
l2info := builder.L2Info

// Setup secondary L2 node
nodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest()
nodeConfigB.BlockValidator.Enable = false
nodeConfigB.DataAvailability.Enable = true
nodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
nodeConfigB.DataAvailability.RestAggregator.Enable = true
nodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
nodeConfigB.DataAvailability.ParentChainNodeURL = "none"
nodeBParams := SecondNodeParams{
nodeConfig: nodeConfigB,
initData: &l2info.ArbInitData,
}
l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams)
defer cleanupB()

// Check batch posting using the DAS
checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(1e12), l2B.Client)

// Shutdown the DAS
err := dasRpcServer.Shutdown(ctx)
Require(t, err)

// Send 2nd transaction and check it doesn't arrive on second node
tx, _ := TransferBalanceTo(t, "Owner", l2info.GetAddress("User2"), big.NewInt(1e12), l2info, l2client, ctx)
_, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
if err == nil || !errors.Is(err, context.DeadlineExceeded) {
Fatal(t, "expected context-deadline exceeded error, but got:", err)
}

// Enable the DAP fallback and check the transaction on the second node.
// (We don't need to restart the node because of the hot-reload.)
builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = false
gligneul marked this conversation as resolved.
Show resolved Hide resolved
_, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
Require(t, err)
l2balance, err := l2B.Client.BalanceAt(ctx, l2info.GetAddress("User2"), nil)
Require(t, err)
if l2balance.Cmp(big.NewInt(2e12)) != 0 {
Fatal(t, "Unexpected balance:", l2balance)
}

// Send another transaction with fallback on
checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(3e12), l2B.Client)
}
Loading