Skip to content

Commit

Permalink
Merge branch 'master' into fix-arbblockhash-pendingblock
Browse files Browse the repository at this point in the history
  • Loading branch information
ganeshvanahalli committed Mar 18, 2024
2 parents f965fb8 + 5d67b59 commit 86e2ae7
Show file tree
Hide file tree
Showing 7 changed files with 85 additions and 44 deletions.
10 changes: 1 addition & 9 deletions cmd/nitro/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ import (
"github.com/offchainlabs/nitro/cmd/ipfshelper"
"github.com/offchainlabs/nitro/cmd/pruning"
"github.com/offchainlabs/nitro/cmd/staterecovery"
"github.com/offchainlabs/nitro/cmd/util"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/statetransfer"
"github.com/offchainlabs/nitro/util/arbmath"
Expand Down Expand Up @@ -284,14 +283,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
if err != nil {
return chainDb, nil, err
}
combinedL2ChainInfoFiles := config.Chain.InfoFiles
if config.Chain.InfoIpfsUrl != "" {
l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath)
if err != nil {
log.Error("error getting l2 chain info file from ipfs", "err", err)
}
combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile)
}
combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, config.Chain.InfoFiles, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath)
chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson)
if err != nil {
return chainDb, nil, err
Expand Down
33 changes: 20 additions & 13 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,14 +293,7 @@ func mainImpl() int {
}
}

combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles
if nodeConfig.Chain.InfoIpfsUrl != "" {
l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath)
if err != nil {
log.Error("error getting chain info file from ipfs", "err", err)
}
combinedL2ChainInfoFile = append(combinedL2ChainInfoFile, l2ChainInfoIpfsFile)
}
combinedL2ChainInfoFile := aggregateL2ChainInfoFiles(ctx, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath)

if nodeConfig.Node.Staker.Enable {
if !nodeConfig.Node.ParentChainReader.Enable {
Expand Down Expand Up @@ -505,9 +498,19 @@ func mainImpl() int {
return 0
}

if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee && !nodeConfig.Node.DataAvailability.Enable {
chainInfo, err := chaininfo.ProcessChainInfo(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson)
if err != nil {
log.Error("error processing l2 chain info", "err", err)
return 1
}
if err := validateBlockChain(l2BlockChain, chainInfo.ChainConfig); err != nil {
log.Error("user provided chain config is not compatible with onchain chain config", "err", err)
return 1
}

if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee != nodeConfig.Node.DataAvailability.Enable {
flag.Usage()
log.Error("a data availability service must be configured for this chain (see the --node.data-availability family of options)")
log.Error(fmt.Sprintf("data availability service usage for this chain is set to %v but --node.data-availability.enable is set to %v", l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee, nodeConfig.Node.DataAvailability.Enable))
return 1
}

Expand Down Expand Up @@ -903,15 +906,19 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa
return &nodeConfig, &l1Wallet, &l2DevWallet, nil
}

func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error {
combinedL2ChainInfoFiles := l2ChainInfoFiles
func aggregateL2ChainInfoFiles(ctx context.Context, l2ChainInfoFiles []string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) []string {
if l2ChainInfoIpfsUrl != "" {
l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath)
if err != nil {
log.Error("error getting l2 chain info file from ipfs", "err", err)
}
combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile)
l2ChainInfoFiles = append(l2ChainInfoFiles, l2ChainInfoIpfsFile)
}
return l2ChainInfoFiles
}

func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error {
combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, l2ChainInfoFiles, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath)
chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, combinedL2ChainInfoFiles, l2ChainInfoJson)
if err != nil {
return err
Expand Down
18 changes: 10 additions & 8 deletions das/das_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) {
Fail(t, "unknown storage type")
}

dbConfig := DefaultLocalDBStorageConfig
dbConfig.Enable = enableDbStorage
dbConfig.DataDir = dbPath

config := DataAvailabilityConfig{
Enable: true,
Key: KeyConfig{
Expand All @@ -39,10 +43,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) {
Enable: enableFileStorage,
DataDir: dbPath,
},
LocalDBStorage: LocalDBStorageConfig{
Enable: enableDbStorage,
DataDir: dbPath,
},
LocalDBStorage: dbConfig,
ParentChainNodeURL: "none",
}

Expand Down Expand Up @@ -122,6 +123,10 @@ func testDASMissingMessage(t *testing.T, storageType string) {
Fail(t, "unknown storage type")
}

dbConfig := DefaultLocalDBStorageConfig
dbConfig.Enable = enableDbStorage
dbConfig.DataDir = dbPath

config := DataAvailabilityConfig{
Enable: true,
Key: KeyConfig{
Expand All @@ -131,10 +136,7 @@ func testDASMissingMessage(t *testing.T, storageType string) {
Enable: enableFileStorage,
DataDir: dbPath,
},
LocalDBStorage: LocalDBStorageConfig{
Enable: enableDbStorage,
DataDir: dbPath,
},
LocalDBStorage: dbConfig,
ParentChainNodeURL: "none",
}

Expand Down
48 changes: 43 additions & 5 deletions das/db_storage_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,47 @@ type LocalDBStorageConfig struct {
DiscardAfterTimeout bool `koanf:"discard-after-timeout"`
SyncFromStorageService bool `koanf:"sync-from-storage-service"`
SyncToStorageService bool `koanf:"sync-to-storage-service"`

// BadgerDB options
NumMemtables int `koanf:"num-memtables"`
NumLevelZeroTables int `koanf:"num-level-zero-tables"`
NumLevelZeroTablesStall int `koanf:"num-level-zero-tables-stall"`
NumCompactors int `koanf:"num-compactors"`
BaseTableSize int64 `koanf:"base-table-size"`
ValueLogFileSize int64 `koanf:"value-log-file-size"`
}

var DefaultLocalDBStorageConfig = LocalDBStorageConfig{}
var badgerDefaultOptions = badger.DefaultOptions("")

var DefaultLocalDBStorageConfig = LocalDBStorageConfig{
Enable: false,
DataDir: "",
DiscardAfterTimeout: false,
SyncFromStorageService: false,
SyncToStorageService: false,

NumMemtables: badgerDefaultOptions.NumMemtables,
NumLevelZeroTables: badgerDefaultOptions.NumLevelZeroTables,
NumLevelZeroTablesStall: badgerDefaultOptions.NumLevelZeroTablesStall,
NumCompactors: badgerDefaultOptions.NumCompactors,
BaseTableSize: badgerDefaultOptions.BaseTableSize,
ValueLogFileSize: badgerDefaultOptions.ValueLogFileSize,
}

func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem")
f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database")
f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout")
f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage")
f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage")

f.Int(prefix+".num-memtables", DefaultLocalDBStorageConfig.NumMemtables, "BadgerDB option: sets the maximum number of tables to keep in memory before stalling")
f.Int(prefix+".num-level-zero-tables", DefaultLocalDBStorageConfig.NumLevelZeroTables, "BadgerDB option: sets the maximum number of Level 0 tables before compaction starts")
f.Int(prefix+".num-level-zero-tables-stall", DefaultLocalDBStorageConfig.NumLevelZeroTablesStall, "BadgerDB option: sets the number of Level 0 tables that once reached causes the DB to stall until compaction succeeds")
f.Int(prefix+".num-compactors", DefaultLocalDBStorageConfig.NumCompactors, "BadgerDB option: Sets the number of compaction workers to run concurrently")
f.Int64(prefix+".base-table-size", DefaultLocalDBStorageConfig.BaseTableSize, "BadgerDB option: sets the maximum size in bytes for LSM table or file in the base level")
f.Int64(prefix+".value-log-file-size", DefaultLocalDBStorageConfig.ValueLogFileSize, "BadgerDB option: sets the maximum size of a single log file")

}

type DBStorageService struct {
Expand All @@ -44,16 +75,23 @@ type DBStorageService struct {
stopWaiter stopwaiter.StopWaiterSafe
}

func NewDBStorageService(ctx context.Context, dirPath string, discardAfterTimeout bool) (StorageService, error) {
db, err := badger.Open(badger.DefaultOptions(dirPath))
func NewDBStorageService(ctx context.Context, config *LocalDBStorageConfig) (StorageService, error) {
options := badger.DefaultOptions(config.DataDir).
WithNumMemtables(config.NumMemtables).
WithNumLevelZeroTables(config.NumLevelZeroTables).
WithNumLevelZeroTablesStall(config.NumLevelZeroTablesStall).
WithNumCompactors(config.NumCompactors).
WithBaseTableSize(config.BaseTableSize).
WithValueLogFileSize(config.ValueLogFileSize)
db, err := badger.Open(options)
if err != nil {
return nil, err
}

ret := &DBStorageService{
db: db,
discardAfterTimeout: discardAfterTimeout,
dirPath: dirPath,
discardAfterTimeout: config.DiscardAfterTimeout,
dirPath: config.DataDir,
}
if err := ret.stopWaiter.Start(ctx, ret); err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion das/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func CreatePersistentStorageService(
storageServices := make([]StorageService, 0, 10)
var lifecycleManager LifecycleManager
if config.LocalDBStorage.Enable {
s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout)
s, err := NewDBStorageService(ctx, &config.LocalDBStorage)
if err != nil {
return nil, nil, err
}
Expand Down
9 changes: 5 additions & 4 deletions system_tests/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1031,6 +1031,10 @@ func setupConfigWithDAS(
dasSignerKey, _, err := das.GenerateAndStoreKeys(dbPath)
Require(t, err)

dbConfig := das.DefaultLocalDBStorageConfig
dbConfig.Enable = enableDbStorage
dbConfig.DataDir = dbPath

dasConfig := &das.DataAvailabilityConfig{
Enable: enableDas,
Key: das.KeyConfig{
Expand All @@ -1040,10 +1044,7 @@ func setupConfigWithDAS(
Enable: enableFileStorage,
DataDir: dbPath,
},
LocalDBStorage: das.LocalDBStorageConfig{
Enable: enableDbStorage,
DataDir: dbPath,
},
LocalDBStorage: dbConfig,
RequestTimeout: 5 * time.Second,
ParentChainNodeURL: "none",
SequencerInboxAddress: "none",
Expand Down
9 changes: 5 additions & 4 deletions system_tests/das_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,10 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) {
pubkey, _, err := das.GenerateAndStoreKeys(keyDir)
Require(t, err)

dbConfig := das.DefaultLocalDBStorageConfig
dbConfig.Enable = true
dbConfig.DataDir = dbDataDir

serverConfig := das.DataAvailabilityConfig{
Enable: true,

Expand All @@ -262,10 +266,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) {
Enable: true,
DataDir: fileDataDir,
},
LocalDBStorage: das.LocalDBStorageConfig{
Enable: true,
DataDir: dbDataDir,
},
LocalDBStorage: dbConfig,

Key: das.KeyConfig{
KeyDir: keyDir,
Expand Down

0 comments on commit 86e2ae7

Please sign in to comment.