Skip to content

Commit

Permalink
Legacy Executor Checks (#131)
Browse files Browse the repository at this point in the history
* feat(exec): legacy executor check contrived tests

wip: ibs resolve smt retain list

async verification of batches skeleton in place and working

noticed hermez db creation was returning an error when that can't happen any more so update the api there as well

move verification request sending to the verification stage

some tidy around the new verifier stage

fixing some hermez db uses that still expected an error

commenting verifier test temporarily
removing some dead code for now

expect something similar in the future when we tackle witness performance but cleaner without this for now

feat(l1): oldAccInputHash checker (and in hack) (#154)

wire up the verifier with the services it needs

handling a context issue in mapmutation for witness generation and db commit timing

getting oldAccInputHash for executor

sending payload to the executor for verification

making the executor run but ignoring errors for now

also sending full witness whilst we debug the problems with it

ensure we can run without executors in config and just ignore the checks

some tidying for the verification stages

sequencer execute stage log counters consumed and better block sealing

allow some quieter logging when unwinding for witness

nice output of counters for logging

correct logger for verifier

nice output of consumed counters from executor in logs

* adding strict executor mode flag

---------

Co-authored-by: Scott Fairclough <[email protected]>
  • Loading branch information
revitteth and hexoscott authored Feb 26, 2024
1 parent a0df5d2 commit 61f0b69
Show file tree
Hide file tree
Showing 30 changed files with 8,032 additions and 28 deletions.
2 changes: 2 additions & 0 deletions cmd/hack/hack.go
Original file line number Diff line number Diff line change
Expand Up @@ -1513,6 +1513,8 @@ func main() {
err = rlptest()
case "readAccountAtVersion":
err = readAccountAtVersion(*chaindata, *account, uint64(*block))
case "getOldAccInputHash":
err = getOldAccInputHash(uint64(*block))
}

if err != nil {
Expand Down
65 changes: 65 additions & 0 deletions cmd/hack/hack_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,13 @@ import (
"context"
"encoding/binary"
"fmt"
ethereum "github.com/ledgerwatch/erigon"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/zkevm/etherman"
"sort"
"strings"
)
Expand Down Expand Up @@ -62,3 +66,64 @@ func countAccounts(chaindata string) error {

return nil
}

func getOldAccInputHash(batchNum uint64) error {
sig := "0x25280169" // hardcoded abi signature
rollupID := "0000000000000000000000000000000000000000000000000000000000000001"
batchNumber := fmt.Sprintf("%064x", batchNum)
addr := libcommon.HexToAddress("0x9fB0B4A5d4d60aaCfa8DC20B8DF5528Ab26848d3")

cfg := &ethconfig.Zk{
L1ChainId: 11155111,
L1RpcUrl: "https://rpc.sepolia.org",
}
etherMan := newEtherMan(cfg)

resp, err := etherMan.EthClient.CallContract(context.Background(), ethereum.CallMsg{
To: &addr,
Data: common.FromHex(sig + rollupID + batchNumber),
}, nil)

if err != nil {
return err
}

if len(resp) < 32 {
return fmt.Errorf("response too short to contain hash data")
}
h := libcommon.BytesToHash(resp[:32])
fmt.Printf("hash: %s\n", h.String())

if len(resp) < 64 {
return fmt.Errorf("response too short to contain timestamp data")
}
ts := binary.BigEndian.Uint64(resp[56:64])

if len(resp) < 96 {
return fmt.Errorf("response too short to contain last batch number data")
}
lastBatchNumber := binary.BigEndian.Uint64(resp[88:96])

fmt.Println("timestamp: ", ts)
fmt.Println("last batch number: ", lastBatchNumber)

return nil
}

func newEtherMan(cfg *ethconfig.Zk) *etherman.Client {
ethmanConf := etherman.Config{
URL: cfg.L1RpcUrl,
L1ChainID: cfg.L1ChainId,
L2ChainID: cfg.L2ChainId,
PoEAddr: cfg.L1PolygonRollupManager,
MaticAddr: cfg.L1MaticContractAddress,
GlobalExitRootManagerAddr: cfg.L1GERManagerContractAddress,
}

em, err := etherman.NewClient(ethmanConf)
//panic on error
if err != nil {
panic(err)
}
return em
}
15 changes: 15 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,21 @@ var (
Usage: "Rebuild the state tree after this many blocks behind",
Value: 10000,
}
SequencerAddressFlag = cli.StringFlag{
Name: "zkevm.sequencer-address",
Usage: "The sequencer address to use if running as a sequencer",
Value: "",
}
ExecutorUrls = cli.StringFlag{
Name: "zkevm.executor-urls",
Usage: "A comma separated list of grpc addresses that host executors",
Value: "",
}
ExecutorStrictMode = cli.BoolFlag{
Name: "zkevm.executor-strict",
Usage: "Defaulted to true to ensure you must set some executor URLs, bypass this restriction by setting to false",
Value: true,
}
RpcRateLimitsFlag = cli.IntFlag{
Name: "zkevm.rpc-ratelimit",
Usage: "RPC rate limit in requests per second.",
Expand Down
13 changes: 13 additions & 0 deletions core/vm/zk_counters.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package vm

import (
"fmt"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/zk/hermez_db"
Expand All @@ -26,6 +27,18 @@ type Counter struct {

type Counters map[CounterKey]*Counter

func (c Counters) UsedAsString() string {
res := fmt.Sprintf("[SHA: %v]", c[SHA].used)
res += fmt.Sprintf("[A: %v]", c[A].used)
res += fmt.Sprintf("[B: %v]", c[B].used)
res += fmt.Sprintf("[K: %v]", c[K].used)
res += fmt.Sprintf("[M: %v]", c[M].used)
res += fmt.Sprintf("[P: %v]", c[P].used)
res += fmt.Sprintf("[S: %v]", c[S].used)
res += fmt.Sprintf("[D: %v]", c[D].used)
return res
}

type CounterKey string

var (
Expand Down
35 changes: 35 additions & 0 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,11 @@ import (
"github.com/ledgerwatch/erigon/zk/contracts"
"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/erigon/zk/hermez_db"
"github.com/ledgerwatch/erigon/zk/legacy_executor_verifier"
zkStages "github.com/ledgerwatch/erigon/zk/stages"
"github.com/ledgerwatch/erigon/zk/syncer"
txpool2 "github.com/ledgerwatch/erigon/zk/txpool"
"github.com/ledgerwatch/erigon/zk/witness"
"github.com/ledgerwatch/erigon/zkevm/etherman"
)

Expand Down Expand Up @@ -737,6 +739,38 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
cfg.L1QueryDelay,
)

witnessGenerator := witness.NewGenerator(
config.Dirs,
config.HistoryV3,
backend.agg,
backend.blockReader,
backend.chainConfig,
backend.engine,
)

var legacyExecutors []legacy_executor_verifier.ILegacyExecutor
if len(cfg.ExecutorUrls) > 0 && cfg.ExecutorUrls[0] != "" {
levCfg := legacy_executor_verifier.Config{
GrpcUrls: cfg.ExecutorUrls,
Timeout: time.Second * 5,
}
executors := legacy_executor_verifier.NewExecutors(levCfg)
for _, e := range executors {
legacyExecutors = append(legacyExecutors, e)
}
}

verifier := legacy_executor_verifier.NewLegacyExecutorVerifier(
*cfg,
legacyExecutors,
backend.chainConfig,
backend.chainDB,
witnessGenerator,
zkL1Syncer,
)

verifier.StartWork()

backend.syncStages = stages2.NewSequencerZkStages(
backend.sentryCtx,
backend.chainDB,
Expand All @@ -752,6 +786,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
zkL1Syncer,
backend.txPool2,
backend.txPool2DB,
verifier,
)

backend.syncUnwindOrder = zkStages.ZkSequencerUnwindOrder
Expand Down
3 changes: 3 additions & 0 deletions eth/ethconfig/config_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ type Zk struct {
L1FirstBlock uint64
RpcRateLimits int
DatastreamVersion int
SequencerAddress common.Address
ExecutorUrls []string
ExecutorStrictMode bool

RebuildTreeAfter uint64
}
17 changes: 12 additions & 5 deletions eth/stagedsync/stage_hashstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ type HashStateCfg struct {

historyV3 bool
agg *state.AggregatorV3
quiet bool
}

func (h *HashStateCfg) SetQuiet(quiet bool) {
h.quiet = quiet
}

func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs, historyV3 bool, agg *state.AggregatorV3) HashStateCfg {
Expand Down Expand Up @@ -139,13 +144,13 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t
prom := NewPromoter(tx, cfg.dirs, ctx)
if cfg.historyV3 {
cfg.agg.SetTx(tx)
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, true); err != nil {
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, true, cfg.quiet); err != nil {
return err
}
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, false); err != nil {
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, false, cfg.quiet); err != nil {
return err
}
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, true, false); err != nil {
if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, true, false, cfg.quiet); err != nil {
return err
}
return nil
Expand Down Expand Up @@ -723,8 +728,10 @@ func (p *Promoter) Promote(logPrefix string, from, to uint64, storage, codes boo
return nil
}

func (p *Promoter) UnwindOnHistoryV3(logPrefix string, agg *state.AggregatorV3, unwindFrom, unwindTo uint64, storage, codes bool) error {
log.Info(fmt.Sprintf("[%s] Unwinding started", logPrefix), "from", unwindFrom, "to", unwindTo, "storage", storage, "codes", codes)
func (p *Promoter) UnwindOnHistoryV3(logPrefix string, agg *state.AggregatorV3, unwindFrom, unwindTo uint64, storage, codes bool, quiet bool) error {
if !quiet {
log.Info(fmt.Sprintf("[%s] Unwinding started", logPrefix), "from", unwindFrom, "to", unwindTo, "storage", storage, "codes", codes)
}

txnFrom, err := rawdbv3.TxNums.Min(p.tx, unwindTo+1)
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions eth/stagedsync/stages/stages_zk.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ var (
ForkId SyncStage = "ForkId"
L1InfoTree SyncStage = "L1InfoTree"
HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree"
SequenceExecutorVerify SyncStage = "SequenceExecutorVerify"
)
3 changes: 3 additions & 0 deletions hermezconfig-dev.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ zkevm.rpc-ratelimit: 250
zkevm.data-stream-port: 6900
zkevm.datastream-version: 2
zkevm.data-stream-host: "localhost"
zkevm.sequencer-address: "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d"
zkevm.executor-strict: true
zkevm.executor-urls: "51.210.116.237:50071"

externalcl: true
http.api : ["eth","debug","net","trace","web3","erigon","txpool","zkevm"]
3 changes: 3 additions & 0 deletions turbo/cli/default_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,9 @@ var DefaultFlags = []cli.Flag{
&utils.RpcRateLimitsFlag,
&utils.DatastreamVersionFlag,
&utils.RebuildTreeAfterFlag,
&utils.SequencerAddressFlag,
&utils.ExecutorUrls,
&utils.ExecutorStrictMode,
&utils.DataStreamHost,
&utils.DataStreamPort,
}
14 changes: 14 additions & 0 deletions turbo/cli/flags_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/zk/sequencer"
"github.com/urfave/cli/v2"
"strings"
)

func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
Expand Down Expand Up @@ -42,12 +43,25 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
RpcRateLimits: ctx.Int(utils.RpcRateLimitsFlag.Name),
DatastreamVersion: ctx.Int(utils.DatastreamVersionFlag.Name),
RebuildTreeAfter: ctx.Uint64(utils.RebuildTreeAfterFlag.Name),
SequencerAddress: libcommon.HexToAddress(ctx.String(utils.SequencerAddressFlag.Name)),
ExecutorUrls: strings.Split(ctx.String(utils.ExecutorUrls.Name), ","),
ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name),
}

checkFlag(utils.L2ChainIdFlag.Name, cfg.Zk.L2ChainId)
if !sequencer.IsSequencer() {
checkFlag(utils.L2RpcUrlFlag.Name, cfg.Zk.L2RpcUrl)
checkFlag(utils.L2DataStreamerUrlFlag.Name, cfg.Zk.L2DataStreamerUrl)
} else {
checkFlag(utils.SequencerAddressFlag.Name, cfg.Zk.SequencerAddress)
checkFlag(utils.ExecutorUrls.Name, cfg.Zk.ExecutorUrls)
checkFlag(utils.ExecutorStrictMode.Name, cfg.Zk.ExecutorStrictMode)

// if we are running in strict mode, the default, and we have no executor URLs then we panic
if cfg.Zk.ExecutorStrictMode && (len(cfg.Zk.ExecutorUrls) == 0 || cfg.Zk.ExecutorUrls[0] == "") {
panic("You must set executor urls when running in executor strict mode (zkevm.executor-strict)")
}

}
checkFlag(utils.L1ChainIdFlag.Name, cfg.Zk.L1ChainId)
checkFlag(utils.L1RpcUrlFlag.Name, cfg.Zk.L1RpcUrl)
Expand Down
3 changes: 3 additions & 0 deletions turbo/stages/zk_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
"github.com/ledgerwatch/erigon/turbo/shards"
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/erigon/zk/legacy_executor_verifier"
zkStages "github.com/ledgerwatch/erigon/zk/stages"
"github.com/ledgerwatch/erigon/zk/syncer"
"github.com/ledgerwatch/erigon/zk/txpool"
Expand Down Expand Up @@ -96,6 +97,7 @@ func NewSequencerZkStages(ctx context.Context,
l1Syncer *syncer.L1Syncer,
txPool *txpool.TxPool,
txPoolDb kv.RwDB,
verifier *legacy_executor_verifier.LegacyExecutorVerifier,
) []*stagedsync.Stage {
dirs := cfg.Dirs
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
Expand Down Expand Up @@ -132,6 +134,7 @@ func NewSequencerZkStages(ctx context.Context,
),
stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg),
zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk),
zkStages.StageSequencerExecutorVerifyCfg(db, verifier),
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp),
Expand Down
10 changes: 10 additions & 0 deletions turbo/trie/retain_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,3 +358,13 @@ func (rl *RetainList) Rewind() {
func (rl *RetainList) String() string {
return fmt.Sprintf("%x", rl.hexes)
}

type AlwaysTrueRetainDecider struct{}

func (a AlwaysTrueRetainDecider) Retain([]byte) bool {
return true
}

func (a AlwaysTrueRetainDecider) IsCodeTouched(libcommon.Hash) bool {
return true
}
14 changes: 12 additions & 2 deletions zk/datastream/server/data_stream_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,13 @@ type BookmarkType byte

var BlockBookmarkType BookmarkType = 0

type OperationMode int

const (
StandardOperationMode OperationMode = iota
ExecutorOperationMode
)

var entryTypeMappings = map[types.EntryType]datastreamer.EntryType{
types.EntryTypeStartL2Block: datastreamer.EntryType(1),
types.EntryTypeL2Tx: datastreamer.EntryType(2),
Expand All @@ -25,17 +32,19 @@ var entryTypeMappings = map[types.EntryType]datastreamer.EntryType{
type DataStreamServer struct {
stream *datastreamer.StreamServer
chainId uint64
mode OperationMode
}

type DataStreamEntry interface {
EntryType() types.EntryType
Bytes(bigEndian bool) []byte
}

func NewDataStreamServer(stream *datastreamer.StreamServer, chainId uint64) *DataStreamServer {
func NewDataStreamServer(stream *datastreamer.StreamServer, chainId uint64, mode OperationMode) *DataStreamServer {
return &DataStreamServer{
stream: stream,
chainId: chainId,
mode: mode,
}
}

Expand Down Expand Up @@ -103,7 +112,8 @@ func (srv *DataStreamServer) CreateTransactionEntry(

encoded := writer.Bytes()

if fork >= 5 {
// we only want to append the effective price when not running in an executor context
if fork >= 5 && srv.mode != ExecutorOperationMode {
encoded = append(encoded, effectiveGasPricePercentage)
}

Expand Down
Loading

0 comments on commit 61f0b69

Please sign in to comment.