Skip to content

Commit

Permalink
Merge branch 'master' into inputs-wiring
Browse files Browse the repository at this point in the history
  • Loading branch information
eljobe committed Aug 29, 2024
2 parents 1a353b2 + e7e9d1b commit 1056e22
Show file tree
Hide file tree
Showing 12 changed files with 547 additions and 17 deletions.
14 changes: 11 additions & 3 deletions cmd/conf/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type InitConfig struct {
PruneThreads int `koanf:"prune-threads"`
PruneTrieCleanCache int `koanf:"prune-trie-clean-cache"`
RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"`
RebuildLocalWasm bool `koanf:"rebuild-local-wasm"`
RebuildLocalWasm string `koanf:"rebuild-local-wasm"`
ReorgToBatch int64 `koanf:"reorg-to-batch"`
ReorgToMessageBatch int64 `koanf:"reorg-to-message-batch"`
ReorgToBlockBatch int64 `koanf:"reorg-to-block-batch"`
Expand All @@ -56,7 +56,7 @@ var InitConfigDefault = InitConfig{
PruneThreads: runtime.NumCPU(),
PruneTrieCleanCache: 600,
RecreateMissingStateFrom: 0, // 0 = disabled
RebuildLocalWasm: true,
RebuildLocalWasm: "auto",
ReorgToBatch: -1,
ReorgToMessageBatch: -1,
ReorgToBlockBatch: -1,
Expand All @@ -82,10 +82,14 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.Int(prefix+".prune-threads", InitConfigDefault.PruneThreads, "the number of threads to use when pruning")
f.Int(prefix+".prune-trie-clean-cache", InitConfigDefault.PruneTrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with when traversing state database during pruning")
f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)")
f.Bool(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily)")
f.Int64(prefix+".reorg-to-batch", InitConfigDefault.ReorgToBatch, "rolls back the blockchain to a specified batch number")
f.Int64(prefix+".reorg-to-message-batch", InitConfigDefault.ReorgToMessageBatch, "rolls back the blockchain to the first batch at or before a given message index")
f.Int64(prefix+".reorg-to-block-batch", InitConfigDefault.ReorgToBlockBatch, "rolls back the blockchain to the first batch at or before a given block number")
f.String(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily). Three modes are supported \n"+
"\"auto\"- (enabled by default) if any previous rebuilding attempt was successful then rebuilding is disabled else continues to rebuild,\n"+
"\"force\"- force rebuilding which would commence rebuilding despite the status of previous attempts,\n"+
"\"false\"- do not rebuild on startup",
)
}

func (c *InitConfig) Validate() error {
Expand All @@ -110,6 +114,10 @@ func (c *InitConfig) Validate() error {
}
}
}
c.RebuildLocalWasm = strings.ToLower(c.RebuildLocalWasm)
if c.RebuildLocalWasm != "auto" && c.RebuildLocalWasm != "force" && c.RebuildLocalWasm != "false" {
return fmt.Errorf("invalid value of rebuild-local-wasm, want: auto or force or false, got: %s", c.RebuildLocalWasm)
}
return nil
}

Expand Down
18 changes: 13 additions & 5 deletions cmd/nitro/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -532,13 +532,21 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
if err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone); err != nil {
return nil, nil, fmt.Errorf("unable to set rebuilding status of wasm store to done: %w", err)
}
} else if config.Init.RebuildLocalWasm {
position, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey)
if err != nil {
log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err)
} else if config.Init.RebuildLocalWasm != "false" {
var position common.Hash
if config.Init.RebuildLocalWasm == "force" {
log.Info("Commencing force rebuilding of wasm store by setting codehash position in rebuilding to beginning")
if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil {
return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err)
}
} else {
position, err = gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey)
if err != nil {
log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err)
if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil {
return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err)
}
}
}
if position != gethexec.RebuildingDone {
startBlockHash, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingStartBlockHashKey)
Expand All @@ -550,7 +558,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
startBlockHash = latestBlock.Hash()
}
log.Info("Starting or continuing rebuilding of wasm store", "codeHash", position, "startBlockHash", startBlockHash)
if err := gethexec.RebuildWasmStore(ctx, wasmDb, chainDb, config.Execution.RPC.MaxRecreateStateDepth, l2BlockChain, position, startBlockHash); err != nil {
if err := gethexec.RebuildWasmStore(ctx, wasmDb, chainDb, config.Execution.RPC.MaxRecreateStateDepth, &config.Execution.StylusTarget, l2BlockChain, position, startBlockHash); err != nil {
return nil, nil, fmt.Errorf("error rebuilding of wasm store: %w", err)
}
}
Expand Down
15 changes: 11 additions & 4 deletions execution/gethexec/executionengine.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,7 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) {
}
}

func (s *ExecutionEngine) Initialize(rustCacheSize uint32, targetConfig *StylusTargetConfig) error {
if rustCacheSize != 0 {
programs.ResizeWasmLruCache(rustCacheSize)
}
func populateStylusTargetCache(targetConfig *StylusTargetConfig) error {
var effectiveStylusTarget string
target := rawdb.LocalTarget()
switch target {
Expand All @@ -171,6 +168,16 @@ func (s *ExecutionEngine) Initialize(rustCacheSize uint32, targetConfig *StylusT
return nil
}

func (s *ExecutionEngine) Initialize(rustCacheSize uint32, targetConfig *StylusTargetConfig) error {
if rustCacheSize != 0 {
programs.ResizeWasmLruCache(rustCacheSize)
}
if err := populateStylusTargetCache(targetConfig); err != nil {
return fmt.Errorf("error populating stylus target cache: %w", err)
}
return nil
}

func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) {
if s.Started() {
panic("trying to set recorder after start")
Expand Down
199 changes: 199 additions & 0 deletions execution/gethexec/stylus_tracer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
// Copyright 2024, Offchain Labs, Inc.
// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE

package gethexec

import (
"encoding/json"
"errors"
"fmt"
"math/big"
"strings"
"sync/atomic"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/nitro/util/containers"
)

func init() {
tracers.DefaultDirectory.Register("stylusTracer", newStylusTracer, false)
}

// stylusTracer captures Stylus HostIOs and returns them in a structured format to be used in Cargo
// Stylus Replay.
type stylusTracer struct {
open *containers.Stack[HostioTraceInfo]
stack *containers.Stack[*containers.Stack[HostioTraceInfo]]
interrupt atomic.Bool
reason error
}

// HostioTraceInfo contains the captured HostIO log returned by stylusTracer.
type HostioTraceInfo struct {
// Name of the HostIO.
Name string `json:"name"`

// Arguments of the HostIO encoded as binary.
// For details about the encoding check the HostIO implemenation on
// arbitrator/wasm-libraries/user-host-trait.
Args hexutil.Bytes `json:"args"`

// Outputs of the HostIO encoded as binary.
// For details about the encoding check the HostIO implemenation on
// arbitrator/wasm-libraries/user-host-trait.
Outs hexutil.Bytes `json:"outs"`

// Amount of Ink before executing the HostIO.
StartInk uint64 `json:"startInk"`

// Amount of Ink after executing the HostIO.
EndInk uint64 `json:"endInk"`

// For *call HostIOs, the address of the called contract.
Address *common.Address `json:"address,omitempty"`

// For *call HostIOs, the steps performed by the called contract.
Steps *containers.Stack[HostioTraceInfo] `json:"steps,omitempty"`
}

// nestsHostios contains the hostios with nested calls.
var nestsHostios = map[string]bool{
"call_contract": true,
"delegate_call_contract": true,
"static_call_contract": true,
}

func newStylusTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) {
return &stylusTracer{
open: containers.NewStack[HostioTraceInfo](),
stack: containers.NewStack[*containers.Stack[HostioTraceInfo]](),
}, nil
}

func (t *stylusTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) {
if t.interrupt.Load() {
return
}
info := HostioTraceInfo{
Name: name,
Args: args,
Outs: outs,
StartInk: startInk,
EndInk: endInk,
}
if nestsHostios[name] {
last, err := t.open.Pop()
if err != nil {
t.Stop(err)
return
}
if !strings.HasPrefix(last.Name, "evm_") || last.Name[4:] != info.Name {
t.Stop(fmt.Errorf("trace inconsistency for %v: last opcode is %v", info.Name, last.Name))
return
}
if last.Steps == nil {
t.Stop(fmt.Errorf("trace inconsistency for %v: nil steps", info.Name))
return
}
info.Address = last.Address
info.Steps = last.Steps
}
t.open.Push(info)
}

func (t *stylusTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
if t.interrupt.Load() {
return
}

// This function adds the prefix evm_ because it assumes the opcode came from the EVM.
// If the opcode comes from WASM, the CaptureStylusHostio function will remove the evm prefix.
var name string
switch typ {
case vm.CALL:
name = "evm_call_contract"
case vm.DELEGATECALL:
name = "evm_delegate_call_contract"
case vm.STATICCALL:
name = "evm_static_call_contract"
case vm.CREATE:
name = "evm_create1"
case vm.CREATE2:
name = "evm_create2"
case vm.SELFDESTRUCT:
name = "evm_self_destruct"
}

inner := containers.NewStack[HostioTraceInfo]()
info := HostioTraceInfo{
Name: name,
Address: &to,
Steps: inner,
}
t.open.Push(info)
t.stack.Push(t.open)
t.open = inner
}

func (t *stylusTracer) CaptureExit(output []byte, gasUsed uint64, _ error) {
if t.interrupt.Load() {
return
}
var err error
t.open, err = t.stack.Pop()
if err != nil {
t.Stop(err)
}
}

func (t *stylusTracer) GetResult() (json.RawMessage, error) {
if t.reason != nil {
return nil, t.reason
}

var internalErr error
if t.open == nil {
internalErr = errors.Join(internalErr, fmt.Errorf("tracer.open is nil"))
}
if t.stack == nil {
internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack is nil"))
}
if !t.stack.Empty() {
internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack should be empty, but has %d values", t.stack.Len()))
}
if internalErr != nil {
log.Error("stylusTracer: internal error when generating a trace", "error", internalErr)
return nil, fmt.Errorf("internal error: %w", internalErr)
}

msg, err := json.Marshal(t.open)
if err != nil {
return nil, err
}
return msg, nil
}

func (t *stylusTracer) Stop(err error) {
t.reason = err
t.interrupt.Store(true)
}

// Unimplemented EVMLogger interface methods

func (t *stylusTracer) CaptureArbitrumTransfer(env *vm.EVM, from, to *common.Address, value *big.Int, before bool, purpose string) {
}
func (t *stylusTracer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {}
func (t *stylusTracer) CaptureArbitrumStorageSet(key, value common.Hash, depth int, before bool) {}
func (t *stylusTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
}
func (t *stylusTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {}
func (t *stylusTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
}
func (t *stylusTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
}
func (t *stylusTracer) CaptureTxStart(gasLimit uint64) {}
func (t *stylusTracer) CaptureTxEnd(restGas uint64) {}
7 changes: 6 additions & 1 deletion execution/gethexec/wasmstorerebuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,14 @@ func WriteToKeyValueStore[T any](store ethdb.KeyValueStore, key []byte, val T) e
// It also stores a special value that is only set once when rebuilding commenced in RebuildingStartBlockHashKey as the block
// time of the latest block when rebuilding was first called, this is used to avoid recomputing of assembly and module of
// contracts that were created after rebuilding commenced since they would anyway already be added during sync.
func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainDb ethdb.Database, maxRecreateStateDepth int64, l2Blockchain *core.BlockChain, position, rebuildingStartBlockHash common.Hash) error {
func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainDb ethdb.Database, maxRecreateStateDepth int64, targetConfig *StylusTargetConfig, l2Blockchain *core.BlockChain, position, rebuildingStartBlockHash common.Hash) error {
var err error
var stateDb *state.StateDB

if err := populateStylusTargetCache(targetConfig); err != nil {
return fmt.Errorf("error populating stylus target cache: %w", err)
}

latestHeader := l2Blockchain.CurrentBlock()
// Attempt to get state at the start block when rebuilding commenced, if not available (in case of non-archival nodes) use latest state
rebuildingStartHeader := l2Blockchain.GetHeaderByHash(rebuildingStartBlockHash)
Expand Down
1 change: 1 addition & 0 deletions system_tests/contract_tx_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ func TestContractTxDeploy(t *testing.T) {
0xF3, // RETURN
}
var requestId common.Hash
// #nosec G115
requestId[0] = uint8(stateNonce)
contractTx := &types.ArbitrumContractTx{
ChainId: params.ArbitrumDevTestChainConfig().ChainID,
Expand Down
3 changes: 2 additions & 1 deletion system_tests/program_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1903,7 +1903,8 @@ func TestWasmStoreRebuilding(t *testing.T) {

// Start rebuilding and wait for it to finish
log.Info("starting rebuilding of wasm store")
Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, nodeB.ExecNode.ConfigFetcher().RPC.MaxRecreateStateDepth, bc, common.Hash{}, bc.CurrentBlock().Hash()))
execConfig := nodeB.ExecNode.ConfigFetcher()
Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, execConfig.RPC.MaxRecreateStateDepth, &execConfig.StylusTarget, bc, common.Hash{}, bc.CurrentBlock().Hash()))

wasmDbAfterRebuild := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore()

Expand Down
Loading

0 comments on commit 1056e22

Please sign in to comment.